summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/exported-sql-viewer.py
diff options
context:
space:
mode:
authorKeiji Hayashibara <hayashibara.keiji@socionext.com>2019-06-26 09:41:47 +0900
committerMark Brown <broonie@kernel.org>2019-06-26 12:30:37 +0100
commite4671df0bfd67d4864de014fa1751d5e2a56c7a6 (patch)
treedff190447f929e0aacd0c12ad1e428162ebae015 /tools/perf/scripts/python/exported-sql-viewer.py
parent94613d5ae1091a28b33f38e18d96e8d953ac18df (diff)
spi: uniphier: fix timeout error
Timeout error was silently ignored. This commit adds timeout error handling and modifies return type of wait_for_completion_timeout(). Signed-off-by: Keiji Hayashibara <hayashibara.keiji@socionext.com> Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'tools/perf/scripts/python/exported-sql-viewer.py')
0 files changed, 0 insertions, 0 deletions
h'>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h252
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c252
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c591
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c517
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c450
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c307
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c426
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c455
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c317
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c193
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c)27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1314
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c427
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c1104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c665
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c484
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c193
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c349
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c465
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c1482
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c1011
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c708
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c586
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c316
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c119
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c409
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c419
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3824
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c374
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c450
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c1039
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c942
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c409
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c357
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c430
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c237
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c158
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c423
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c593
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c501
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c325
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h208
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c219
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c300
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c263
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c325
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c511
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h246
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h1964
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.c839
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.h (renamed from drivers/gpu/drm/amd/amdgpu/dce_v11_0.h)14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c228
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c368
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c1296
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c832
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c879
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c924
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c812
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c742
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c1201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c49
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h2925
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm202
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm1136
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c86
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c204
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c421
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c77
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c145
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c97
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c151
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c121
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c81
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c92
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c103
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c178
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c100
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c181
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c126
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h18
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c57
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h1
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2456
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h109
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c858
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c)32
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c272
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c100
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c146
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c136
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c84
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c229
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c92
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile46
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c)28
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c)30
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c233
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c419
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1885
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c3001
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c563
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c945
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h150
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h200
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h188
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c154
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h401
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c12413
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c)20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c)516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h)8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c)144
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h)8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c)151
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h)28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h)39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c)44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c)4676
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h)242
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c)149
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c)25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c)33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c)537
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h189
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h)100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c)56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c)49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c)291
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h161
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c245
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h166
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c174
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c178
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c410
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c373
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c351
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c2474
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1491
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h123
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h144
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h166
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h (renamed from drivers/gpu/drm/amd/display/dc/inc/link.h)32
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c81
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c81
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c211
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c143
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c336
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c531
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c243
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c386
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h268
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c204
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile34
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c)129
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c2196
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c184
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/Makefile (renamed from drivers/gpu/drm/amd/display/dc/spl/Makefile)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl.c)268
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c)452
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c)1058
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c)232
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h)19
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_debug.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h232
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h1514
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c85
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c118
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c169
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c165
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c287
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c402
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c29
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c156
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c11
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c56
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c73
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c122
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h26
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h175
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c42
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h10
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h4
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h269
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h125
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h15485
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h61940
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h48
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h188
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h22
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h39
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h6
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h33
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h74
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h7
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h352
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h9
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h76
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h8
-rw-r--r--drivers/gpu/drm/amd/include/v12_structs.h8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c271
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c86
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c1131
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h26
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h6
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c105
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c135
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c555
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h557
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c66
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c28
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c78
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c38
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c440
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h186
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h378
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h146
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h141
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c75
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c52
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c63
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c52
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c94
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c200
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c165
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c1073
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c1238
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h233
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c124
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c77
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c65
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c305
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c107
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h122
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h1
-rw-r--r--drivers/gpu/drm/amd/ras/Makefile34
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/Makefile33
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c285
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h54
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c182
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h27
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c648
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h83
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c94
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c125
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c190
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h41
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c279
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h110
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile44
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras.h370
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.c672
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.h164
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c379
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h71
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.c522
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.h426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core.c603
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core_status.h37
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.c315
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.h304
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c1339
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h197
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.c70
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.h43
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h259
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c317
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h93
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.c81
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.h50
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c105
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.c96
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.h46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c123
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.c322
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.h53
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.c750
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.h145
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h231
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.c707
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.h166
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c511
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h314
-rw-r--r--drivers/gpu/drm/arm/Kconfig1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c31
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c6
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c24
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c13
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c1
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c13
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c1
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c8
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c4
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/Makefile10
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c257
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c480
-rw-r--r--drivers/gpu/drm/ast/ast_2200.c92
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c1463
-rw-r--r--drivers/gpu/drm/ast/ast_2400.c100
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c675
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c116
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c314
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c269
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h207
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c79
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h229
-rw-r--r--drivers/gpu/drm/ast/ast_main.c323
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c26
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c803
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2018
-rw-r--r--drivers/gpu/drm/ast/ast_post.h50
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h34
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h247
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.c241
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.h108
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c21
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c15
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c53
-rw-r--r--drivers/gpu/drm/bridge/Kconfig39
-rw-r--r--drivers/gpu/drm/bridge/Makefile6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h59
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c85
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c57
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c446
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c41
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c42
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c286
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c52
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c71
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c14
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c14
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c442
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c113
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c21
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c15
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c8
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c27
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c21
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c19
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c158
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c18
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c65
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c42
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c50
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c35
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c21
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c19
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c12
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c97
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c106
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c87
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c17
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c18
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c46
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c19
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c12
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c24
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c14
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c22
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c15
-rw-r--r--drivers/gpu/drm/bridge/panel.c37
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c10
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c16
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c445
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c42
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c9
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c35
-rw-r--r--drivers/gpu/drm/bridge/ssd2825.c775
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig15
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-dp.c2097
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c741
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c45
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c20
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c20
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c22
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c84
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c49
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c54
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c (renamed from drivers/gpu/drm/i2c/tda998x_drv.c)62
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c11
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c18
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c157
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c395
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c20
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c15
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c19
-rw-r--r--drivers/gpu/drm/bridge/waveshare-dsi.c203
-rw-r--r--drivers/gpu/drm/ci/arm64.config2
-rw-r--r--drivers/gpu/drm/ci/build.sh27
-rw-r--r--drivers/gpu/drm/ci/build.yml128
-rw-r--r--drivers/gpu/drm/ci/check-devicetrees.yml50
-rw-r--r--drivers/gpu/drm/ci/container.yml36
-rwxr-xr-xdrivers/gpu/drm/ci/dt-binding-check.sh19
-rwxr-xr-xdrivers/gpu/drm/ci/dtbs-check.sh22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml250
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh25
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml21
-rwxr-xr-xdrivers/gpu/drm/ci/kunit.sh16
-rw-r--r--drivers/gpu/drm/ci/kunit.yml37
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh103
-rwxr-xr-xdrivers/gpu/drm/ci/setup-llvm-links.sh13
-rw-r--r--drivers/gpu/drm/ci/test.yml136
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt35
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt299
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt15
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt113
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt60
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt27
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt34
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt31
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt132
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt70
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt545
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c11
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c37
-rw-r--r--drivers/gpu/drm/clients/drm_log.c47
-rw-r--r--drivers/gpu/drm/display/Kconfig13
-rw-r--r--drivers/gpu/drm/display/Makefile4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c355
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c3
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c54
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c943
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c198
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c33
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c7
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_helper.c193
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c65
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c171
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c428
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c286
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c518
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c197
-rw-r--r--drivers/gpu/drm/drm_auth.c65
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c360
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c60
-rw-r--r--drivers/gpu/drm/drm_buddy.c426
-rw-r--r--drivers/gpu/drm/drm_cache.c9
-rw-r--r--drivers/gpu/drm/drm_client.c232
-rw-r--r--drivers/gpu/drm/drm_client_event.c71
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c312
-rw-r--r--drivers/gpu/drm/drm_client_sysrq.c65
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c287
-rw-r--r--drivers/gpu/drm/drm_colorop.c599
-rw-r--r--drivers/gpu/drm/drm_connector.c52
-rw-r--r--drivers/gpu/drm/drm_crtc.c55
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h2
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c166
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c1
-rw-r--r--drivers/gpu/drm/drm_displayid.c58
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h33
-rw-r--r--drivers/gpu/drm/drm_draw.c104
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h2
-rw-r--r--drivers/gpu/drm/drm_drv.c216
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c171
-rw-r--r--drivers/gpu/drm/drm_edid.c355
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c187
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c235
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c22
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c25
-rw-r--r--drivers/gpu/drm/drm_file.c82
-rw-r--r--drivers/gpu/drm/drm_flip_work.c1
-rw-r--r--drivers/gpu/drm/drm_format_helper.c842
-rw-r--r--drivers/gpu/drm/drm_format_internal.h174
-rw-r--r--drivers/gpu/drm/drm_fourcc.c45
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c60
-rw-r--r--drivers/gpu/drm/drm_gem.c271
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c11
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c12
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c59
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c346
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c91
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c1635
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c691
-rw-r--r--drivers/gpu/drm/drm_internal.h31
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_managed.c9
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c20
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c214
-rw-r--r--drivers/gpu/drm/drm_mm.c1
-rw-r--r--drivers/gpu/drm/drm_mode_config.c15
-rw-r--r--drivers/gpu/drm/drm_mode_object.c18
-rw-r--r--drivers/gpu/drm/drm_modes.c11
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c12
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_of.c9
-rw-r--r--drivers/gpu/drm/drm_pagemap.c882
-rw-r--r--drivers/gpu/drm/drm_panel.c268
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c114
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c53
-rw-r--r--drivers/gpu/drm/drm_panic.c206
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs227
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane.c116
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c72
-rw-r--r--drivers/gpu/drm/drm_print.c1
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c47
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c1
-rw-r--r--drivers/gpu/drm/drm_suballoc.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c48
-rw-r--r--drivers/gpu/drm/drm_sysfs.c47
-rw-r--r--drivers/gpu/drm/drm_vblank.c185
-rw-r--r--drivers/gpu/drm/drm_vblank_helper.c176
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c6
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c188
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c29
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c43
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c32
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c45
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/backlight.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c3
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c68
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/gma500/gem.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c6
-rw-r--r--drivers/gpu/drm/gma500/mmu.c41
-rw-r--r--drivers/gpu/drm/gma500/mmu.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c31
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c37
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c33
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c116
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h14
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c91
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c16
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c91
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h36
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h130
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c71
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c74
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c95
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c14
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c5
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c3
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c218
-rw-r--r--drivers/gpu/drm/i2c/Kconfig36
-rw-r--r--drivers/gpu/drm/i2c/Makefile10
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c507
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/Makefile39
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c7
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c4
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c215
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h14
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c234
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h7
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c105
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c6
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c556
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h16
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1312
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h18
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm_regs.h257
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c222
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c411
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c533
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c540
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c92
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h178
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c980
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf_regs.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c941
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h68
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c188
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c439
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_regs.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c183
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c192
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c279
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c642
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.c295
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c1846
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c137
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h135
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2864
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h97
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h96
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c348
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c201
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c1756
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_jiffies.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c314
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h79
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c284
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2934
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h152
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h279
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c65
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c679
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h493
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2136
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c264
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c831
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c261
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c582
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1556
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h110
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c431
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c254
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h197
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c76
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c511
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c528
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc_regs.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c492
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c525
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c233
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c472
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c184
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h42
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c103
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c447
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c126
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c181
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c356
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c710
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c678
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c286
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c2327
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c188
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c338
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h56
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c300
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c314
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c185
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit_regs.h79
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c150
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.c)778
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.h)31
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c274
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c94
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c1364
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c92
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c328
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c366
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c256
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c600
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_tdf.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c294
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga_regs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c701
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h121
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c176
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h13
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.c157
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.h46
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c689
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h37
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c1019
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h150
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c1658
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h69
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h52
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.c88
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.h38
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c264
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h6
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c206
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h17
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.c50
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.h156
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c71
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c103
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c137
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c173
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c108
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c88
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c38
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c57
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c89
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h139
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c48
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c117
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c81
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c59
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c8
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/README6
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c8
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c117
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c80
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c40
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c88
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c140
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c76
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h23
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c128
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c293
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_active.c23
-rw-r--r--drivers/gpu/drm/i915/i915_active.h1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/i915_config.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c327
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h100
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c162
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gtt_view_types.h59
-rw-r--r--drivers/gpu/drm/i915/i915_iosf_mbi.h6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c432
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h4
-rw-r--r--drivers/gpu/drm/i915/i915_jiffies.h16
-rw-r--r--drivers/gpu/drm/i915/i915_list_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.c18
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.h19
-rw-r--r--drivers/gpu/drm/i915/i915_module.c5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c101
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c135
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h13
-rw-r--r--drivers/gpu/drm/i915/i915_ptr_util.h66
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3354
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h126
-rw-r--r--drivers/gpu/drm/i915/i915_request.c14
-rw-r--r--drivers/gpu/drm/i915/i915_request.h9
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c13
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c11
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.c36
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h28
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c31
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h253
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c56
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h33
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h52
-rw-r--r--drivers/gpu/drm/i915/i915_wait_util.h119
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c45
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.c44
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.h13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c9
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c274
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c16
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h15
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c83
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/intel_step.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c44
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h15
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.h49
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c9
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h11
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c12
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c35
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c205
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h34
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c6
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c290
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h89
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.c7
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.h6
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.c (renamed from drivers/gpu/drm/i915/vlv_sideband.c)180
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.h37
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/i915/vlv_sideband_reg.h)6
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h125
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c6
-rw-r--r--drivers/gpu/drm/imagination/Kconfig4
-rw-r--r--drivers/gpu/drm/imagination/Makefile4
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c137
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h65
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c37
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c68
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c34
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_riscv.c165
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c17
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c36
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_util.c66
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c21
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c318
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h18
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c49
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h153
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_riscv.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c150
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c3
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c31
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c20
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c26
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c27
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c4
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c4
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c6
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c34
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h6
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.c4
-rw-r--r--drivers/gpu/drm/loongson/Kconfig2
-rw-r--r--drivers/gpu/drm/loongson/lsdc_benchmark.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_crtc.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_debugfs.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c32
-rw-r--r--drivers/gpu/drm/loongson/lsdc_i2c.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_irq.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a1000.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a2000.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_pixpll.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c6
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c13
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c1
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c10
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig28
-rw-r--r--drivers/gpu/drm/mediatek/Makefile11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c70
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c34
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c80
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c77
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c447
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi_regs.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c57
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c84
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c818
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.c456
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.h198
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c396
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h263
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_v2.c1521
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c43
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h3
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c18
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c18
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c47
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c1
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c1
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c226
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h13
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ddc.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh5.c205
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c84
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga_bmc.c1
-rw-r--r--drivers/gpu/drm/msm/Kconfig39
-rw-r--r--drivers/gpu/drm/msm/Makefile30
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c75
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c18
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c67
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c73
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c26
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c120
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c614
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c842
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c860
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h39
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c85
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h117
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c221
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h43
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c61
-rw-r--r--drivers/gpu/drm/msm/adreno/a8xx_gpu.c1201
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c103
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h435
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h340
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h506
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c229
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h142
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h86
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h494
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h541
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h61
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h65
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h254
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h64
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h408
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h64
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c175
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c476
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c529
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c161
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h126
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c206
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c224
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c142
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c860
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c497
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c90
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c52
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c88
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c36
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c27
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h13
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c512
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h37
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c216
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1341
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h128
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c768
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c274
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c49
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c136
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c366
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h25
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.c10
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c34
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c266
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c44
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c67
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c24
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c56
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c22
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c33
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c280
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c262
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h59
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c199
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c393
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c93
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c18
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c12
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c17
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c136
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c385
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h105
-rw-r--r--drivers/gpu/drm/msm/msm_dsc_helper.h11
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c51
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c13
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c603
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h302
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c71
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c413
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c1609
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c245
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h181
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h26
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c391
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c99
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h58
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c346
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h28
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h44
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c62
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c27
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c102
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.c172
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.h37
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml6143
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml158
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml429
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml281
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml600
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml216
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml1030
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml121
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml299
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_common.xml12
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml704
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi.xml28
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml37
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml2
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdss.xml11
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py212
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c5
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c23
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c1
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c17
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c (renamed from drivers/gpu/drm/i2c/ch7006_drv.c)32
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c (renamed from drivers/gpu/drm/i2c/ch7006_mode.c)8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h (renamed from drivers/gpu/drm/i2c/ch7006_priv.h)11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c (renamed from drivers/gpu/drm/i2c/sil164_drv.c)35
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c (renamed from drivers/gpu/drm/drm_encoder_slave.c)95
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/coreca7d.c122
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcca7d.c98
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c61
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headca7d.c297
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c53
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c242
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c98
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h87
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h220
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h868
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h173
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/chan.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/pushc97b.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h136
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h166
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h335
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h95
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h148
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h82
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h119
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h124
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h174
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c89
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c215
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c332
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c63
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c113
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c159
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan506f.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan906f.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chanc36f.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c320
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c)43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c)396
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c)418
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c)1372
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h741
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h350
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h)64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h825
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h)55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c)34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c698
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c)118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c217
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h355
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h)241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h634
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c267
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c306
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c2
-rw-r--r--drivers/gpu/drm/nova/Kconfig16
-rw-r--r--drivers/gpu/drm/nova/Makefile3
-rw-r--r--drivers/gpu/drm/nova/driver.rs71
-rw-r--r--drivers/gpu/drm/nova/file.rs69
-rw-r--r--drivers/gpu/drm/nova/gem.rs47
-rw-r--r--drivers/gpu/drm/nova/nova.rs17
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c10
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c34
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c34
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c17
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c28
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig193
-rw-r--r--drivers/gpu/drm/panel/Makefile16
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c11
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c125
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-td4320.c247
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c10
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c78
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c113
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c10
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c11
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c1296
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c392
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112b.c430
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c594
-rw-r--r--drivers/gpu/drm/panel/panel-hydis-hv101hd1.c188
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c11
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c1765
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c81
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c11
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c11
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c43
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c204
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c11
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c13
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c14
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c11
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lg-ld070wx3.c184
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lincolntech-lcd197.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c14
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c11
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c11
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c10
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c20
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c418
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c210
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c14
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c1688
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt37801.c340
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c10
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c18
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c12
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c493
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c10
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c325
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c281
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c19
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c250
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c385
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c102
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c981
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c204
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c225
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c41
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c70
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c508
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c133
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c13
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c17
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c118
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c11
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c134
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c81
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-tddi.c277
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c17
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c11
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-g2647fb105.c280
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c190
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c326
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c442
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c11
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c11
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c188
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c142
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h58
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c389
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c195
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c362
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h38
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c259
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c36
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h36
-rw-r--r--drivers/gpu/drm/panthor/Makefile2
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c64
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c81
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h127
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c192
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c163
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h38
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c268
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h96
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c284
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h13
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c61
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.c224
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.h56
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c402
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_pwr.c549
-rw-r--r--drivers/gpu/drm/panthor/panthor_pwr.h23
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h188
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c538
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h9
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c14
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig2
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/cik.c42
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c589
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c215
-rw-r--r--drivers/gpu/drm/radeon/r200.c34
-rw-r--r--drivers/gpu/drm/radeon/r300.c69
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c449
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c116
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c8
-rw-r--r--drivers/gpu/drm/radeon/rs400.c18
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c5
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c1
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c20
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c20
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c295
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h342
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig17
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c14
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c44
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c123
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c9
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c371
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h56
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig12
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c245
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c294
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h8
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c163
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c21
-rw-r--r--drivers/gpu/drm/rockchip/dw_dp-rockchip.c150
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c104
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c444
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c487
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h349
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c316
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c1720
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h313
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c143
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c1916
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c95
-rw-r--r--drivers/gpu/drm/scheduler/.kunitconfig12
-rw-r--r--drivers/gpu/drm/scheduler/Makefile2
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h107
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c141
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c8
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h91
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c435
-rw-r--r--drivers/gpu/drm/scheduler/tests/Makefile7
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c370
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h224
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c563
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig42
-rw-r--r--drivers/gpu/drm/sitronix/Makefile3
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c1083
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c (renamed from drivers/gpu/drm/tiny/st7586.c)1
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c (renamed from drivers/gpu/drm/tiny/st7735r.c)1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c10
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c93
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c13
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c13
-rw-r--r--drivers/gpu/drm/sti/Makefile2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c1
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c19
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c45
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c57
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c45
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c17
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c1
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c14
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c21
-rw-r--r--drivers/gpu/drm/stm/drv.c13
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c14
-rw-r--r--drivers/gpu/drm/stm/ltdc.c202
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/stm/lvds.c35
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h16
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c378
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h86
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c182
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c246
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.h6
-rw-r--r--drivers/gpu/drm/sysfb/Kconfig76
-rw-r--r--drivers/gpu/drm/sysfb/Makefile12
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb.c35
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h218
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c608
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c104
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c390
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c (renamed from drivers/gpu/drm/tiny/ofdrm.c)462
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c (renamed from drivers/gpu/drm/tiny/simpledrm.c)280
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c652
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c24
-rw-r--r--drivers/gpu/drm/tegra/dp.c67
-rw-r--r--drivers/gpu/drm/tegra/dp.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tegra/drm.h3
-rw-r--r--drivers/gpu/drm/tegra/dsi.c71
-rw-r--r--drivers/gpu/drm/tegra/falcon.c20
-rw-r--r--drivers/gpu/drm/tegra/falcon.h1
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c15
-rw-r--r--drivers/gpu/drm/tegra/gem.c11
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c7
-rw-r--r--drivers/gpu/drm/tegra/hub.c5
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c6
-rw-r--r--drivers/gpu/drm/tegra/nvjpg.c330
-rw-r--r--drivers/gpu/drm/tegra/rgb.c14
-rw-r--r--drivers/gpu/drm/tegra/sor.c11
-rw-r--r--drivers/gpu/drm/tegra/uapi.c7
-rw-r--r--drivers/gpu/drm/tests/.kunitconfig2
-rw-r--r--drivers/gpu/drm/tests/Makefile7
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_state_test.c379
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_test.c153
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c521
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c135
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c12
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c10
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c60
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c17
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c22
-rw-r--r--drivers/gpu/drm/tests/drm_fixp_test.c71
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c334
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c31
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c1167
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h374
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c128
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c26
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c8
-rw-r--r--drivers/gpu/drm/tests/drm_sysfb_modeset_test.c168
-rw-r--r--drivers/gpu/drm/tidss/Makefile3
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c53
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c707
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h29
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h107
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c35
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h9
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c13
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c619
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.h43
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig86
-rw-r--r--drivers/gpu/drm/tiny/Makefile6
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c834
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c4
-rw-r--r--drivers/gpu/drm/tiny/bochs.c38
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c157
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c47
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c1
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c1
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c6
-rw-r--r--drivers/gpu/drm/tiny/pixpaper.c1166
-rw-r--r--drivers/gpu/drm/tiny/repaper.c21
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c27
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c52
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c135
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c33
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c25
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h7
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c4
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c24
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c182
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c168
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h (renamed from drivers/gpu/drm/amd/amdkfd/kfd_pasid.c)70
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c387
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c786
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool_internal.h25
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c113
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c95
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c1
-rw-r--r--drivers/gpu/drm/tyr/Kconfig19
-rw-r--r--drivers/gpu/drm/tyr/Makefile3
-rw-r--r--drivers/gpu/drm/tyr/driver.rs205
-rw-r--r--drivers/gpu/drm/tyr/file.rs56
-rw-r--r--drivers/gpu/drm/tyr/gem.rs18
-rw-r--r--drivers/gpu/drm/tyr/gpu.rs219
-rw-r--r--drivers/gpu/drm/tyr/regs.rs108
-rw-r--r--drivers/gpu/drm/tyr/tyr.rs22
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c24
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h20
-rw-r--r--drivers/gpu/drm/udl/udl_edid.c1
-rw-r--r--drivers/gpu/drm/udl/udl_main.c191
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c22
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c127
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c113
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h66
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c41
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c18
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c159
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h26
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c207
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c5
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c37
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c9
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c62
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c188
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c33
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h27
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c47
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c186
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c275
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c1
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c30
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c51
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c25
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c131
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c45
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c157
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c9
-rw-r--r--drivers/gpu/drm/vkms/Kconfig16
-rw-r--r--drivers/gpu/drm/vkms/Makefile8
-rw-r--r--drivers/gpu/drm/vkms/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile8
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_color_test.c414
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c1029
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c279
-rw-r--r--drivers/gpu/drm/vkms/vkms_colorop.c120
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c138
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.h28
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c649
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h489
-rw-r--r--drivers/gpu/drm/vkms/vkms_configfs.c843
-rw-r--r--drivers/gpu/drm/vkms/vkms_configfs.h8
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c96
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h35
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c123
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c114
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h101
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c636
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_luts.c811
-rw-r--r--drivers/gpu/drm/vkms/vkms_luts.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c185
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c51
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c40
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c858
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h82
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c62
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c51
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c510
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c884
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c75
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c106
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c10
-rw-r--r--drivers/gpu/drm/xe/Kconfig57
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug32
-rw-r--r--drivers/gpu/drm/xe/Kconfig.profile1
-rw-r--r--drivers/gpu/drm/xe/Makefile94
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h41
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h40
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h8
-rw-r--r--drivers/gpu/drm/xe/abi/guc_capture_abi.h2
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h17
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h62
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h117
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h82
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h5
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h74
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h31
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h76
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h)0
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h)2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h132
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c23
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c26
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c62
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c12
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c85
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c349
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h19
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c74
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.h11
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c9
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c21
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c211
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c143
-rw-r--r--drivers/gpu/drm/xe/display/xe_panic.c102
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c59
-rw-r--r--drivers/gpu/drm/xe/display/xe_stolen.c123
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c10
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_alu_commands.h79
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h7
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_instr_defs.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mfx_commands.h28
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h15
-rw-r--r--drivers/gpu/drm/xe/regs/xe_bars.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h25
-rw-r--r--drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h29
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h62
-rw-r--r--drivers/gpu/drm/xe/regs/xe_hw_error_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h23
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h18
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h14
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h11
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h18
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pxp_regs.h23
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h30
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c71
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c56
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c208
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c232
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c333
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c776
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c121
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c12
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c331
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c34
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c8
-rw-r--r--drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c227
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c90
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h4
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c39
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h5
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c1827
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h195
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c392
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h41
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c1291
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h47
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c208
-rw-r--r--drivers/gpu/drm/xe/xe_dep_job_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.c143
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.h21
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c147
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h2
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c645
-rw-r--r--drivers/gpu/drm/xe/xe_device.h64
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c249
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h305
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules5
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c126
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c14
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c989
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.h25
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c77
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c430
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h34
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h57
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c52
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c8
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c59
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c459
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h32
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c31
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h37
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c45
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c77
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c449
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h55
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c87
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c318
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c145
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c90
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c144
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c693
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c189
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c685
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h17
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c757
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c440
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c1022
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h48
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c50
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c219
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_printk.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c747
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h63
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c64
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c357
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c548
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h39
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c129
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h23
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h131
-rw-r--r--drivers/gpu/drm/xe/xe_guard.h119
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c651
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c257
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c207
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.h49
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c139
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c514
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c147
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c521
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h102
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h21
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h57
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pagefault.c95
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pagefault.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c659
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c829
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h9
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.c242
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h18
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c47
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c253
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h11
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c12
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c160
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c109
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c58
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.c182
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.h15
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c780
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.h4
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c372
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h68
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c468
-rw-r--r--drivers/gpu/drm/xe/xe_irq.h8
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.c464
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.h17
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c97
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c791
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h35
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_map.h4
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c59
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h2
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c1127
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h55
-rw-r--r--drivers/gpu/drm/xe/xe_migrate_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c158
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h8
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.c226
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.h20
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c49
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.h8
-rw-r--r--drivers/gpu/drm/xe/xe_module.c89
-rw-r--r--drivers/gpu/drm/xe/xe_module.h1
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c170
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.h15
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c686
-rw-r--r--drivers/gpu/drm/xe/xe_oa.h4
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h23
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c16
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.c444
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault_types.h136
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c189
-rw-r--r--drivers/gpu/drm/xe/xe_pat.h12
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c725
-rw-r--r--drivers/gpu/drm/xe/xe_pci.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c156
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h64
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c85
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h15
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h46
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c298
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h21
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c600
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.h18
-rw-r--r--drivers/gpu/drm/xe/xe_pmu_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c11
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_printk.h129
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.c294
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c1049
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h8
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h4
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c949
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.h35
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.c129
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c602
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.h22
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_types.h135
-rw-r--r--drivers/gpu/drm/xe/xe_query.c141
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.h4
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c14
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h123
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c102
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c77
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h47
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c76
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h46
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c42
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h13
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c306
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c19
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet.c520
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c181
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h19
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.c279
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.h22
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c395
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h18
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h27
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.c365
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h37
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.c438
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.c216
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.h23
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c647
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h70
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_printk.h12
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c174
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h8
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c480
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.h35
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h47
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vfio.c80
-rw-r--r--drivers/gpu/drm/xe/xe_step.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c377
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h18
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h43
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c1537
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h389
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c94
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h3
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c94
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h8
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.c142
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_tile_printk.h127
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c253
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h15
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_printk.h33
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c350
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h23
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h23
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c12
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.c433
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.h46
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.c285
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.h34
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_types.h130
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h93
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h35
-rw-r--r--drivers/gpu/drm/xe/xe_trace_guc.h49
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c89
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h2
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c9
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c37
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c110
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h3
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c102
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h7
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c177
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_abi.h130
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_uc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c322
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.h107
-rw-r--r--drivers/gpu/drm/xe/xe_validation.c278
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h192
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c2227
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h164
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h10
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.c431
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.h15
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h227
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c292
-rw-r--r--drivers/gpu/drm/xe/xe_vram.h12
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vram_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c24
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h4
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c295
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h30
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules54
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c4
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c247
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c9
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c3
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c10
3830 files changed, 375570 insertions, 137619 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 6f1cf235073d..7e6bc0b3a589 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -26,6 +26,11 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+menu "DRM debugging options"
+depends on DRM
+source "drivers/gpu/drm/Kconfig.debug"
+endmenu
+
if DRM
config DRM_MIPI_DBI
@@ -37,68 +42,10 @@ config DRM_MIPI_DSI
bool
depends on DRM
-config DRM_DEBUG_MM
- bool "Insert extra checks and debug info into the DRM range managers"
- default n
- depends on DRM
- depends on STACKTRACE_SUPPORT
- select STACKDEPOT
- help
- Enable allocation tracking of memory manager and leak detection on
- shutdown.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
-
-config DRM_USE_DYNAMIC_DEBUG
- bool "use dynamic debug to implement drm.debug"
- default n
- depends on BROKEN
- depends on DRM
- depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
- depends on JUMP_LABEL
- help
- Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
- Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
- bytes per callsite, the .data costs can be substantial, and
- are therefore configurable.
-
-config DRM_KUNIT_TEST_HELPERS
- tristate
- depends on DRM && KUNIT
- select DRM_KMS_HELPER
- help
- KUnit Helpers for KMS drivers.
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT && MMU
- select DRM_BUDDY
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_STATE_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_EXEC
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_GEM_SHMEM_HELPER
- select DRM_KUNIT_TEST_HELPERS
- select DRM_LIB_RANDOM
- select PRIME_NUMBERS
- default KUNIT_ALL_TESTS
- help
- This builds unit tests for DRM. This option is not useful for
- distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- For more information on KUnit and unit tests in general,
- please refer to the KUnit documentation in
- Documentation/dev-tools/kunit/.
-
- If in doubt, say "N".
-
config DRM_KMS_HELPER
tristate
depends on DRM
+ select FB_CORE if DRM_FBDEV_EMULATION
help
CRTC helpers for KMS drivers.
@@ -187,7 +134,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
select STACKDEPOT
- depends on DRM_KMS_HELPER
+ select DRM_KMS_HELPER
depends on DEBUG_KERNEL
depends on EXPERT
help
@@ -241,28 +188,12 @@ source "drivers/gpu/drm/display/Kconfig"
config DRM_TTM
tristate
depends on DRM && MMU
+ select SHMEM
help
GPU memory management subsystem for devices with multiple
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_KUNIT_TEST
- tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
- default n
- depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
- select DRM_TTM
- select DRM_BUDDY
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_KUNIT_TEST_HELPERS
- default KUNIT_ALL_TESTS
- help
- Enables unit tests for TTM, a GPU memory manager subsystem used
- to manage memory buffers. This option is mostly useful for kernel
- developers. It depends on (UML || COMPILE_TEST) since no other driver
- which uses TTM can be loaded while running the tests.
-
- If in doubt, say "N".
-
config DRM_EXEC
tristate
depends on DRM
@@ -277,6 +208,15 @@ config DRM_GPUVM
GPU-VM representation providing helpers to manage a GPUs virtual
address space
+config DRM_GPUSVM
+ tristate
+ depends on DRM && DEVICE_PRIVATE
+ select HMM_MIRROR
+ select MMU_NOTIFIER
+ help
+ GPU-SVM representation providing helpers to manage a GPUs shared
+ virtual memory
+
config DRM_BUDDY
tristate
depends on DRM
@@ -293,6 +233,8 @@ config DRM_TTM_HELPER
tristate
depends on DRM
select DRM_TTM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Helpers for ttm-based gem objects
@@ -300,6 +242,8 @@ config DRM_TTM_HELPER
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM DMA helper functions
@@ -307,6 +251,8 @@ config DRM_GEM_DMA_HELPER
config DRM_GEM_SHMEM_HELPER
tristate
depends on DRM && MMU
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM shmem helper functions
@@ -319,7 +265,7 @@ config DRM_SCHED
tristate
depends on DRM
-source "drivers/gpu/drm/i2c/Kconfig"
+source "drivers/gpu/drm/sysfb/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
@@ -329,6 +275,8 @@ source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
+source "drivers/gpu/drm/nova/Kconfig"
+
source "drivers/gpu/drm/i915/Kconfig"
source "drivers/gpu/drm/xe/Kconfig"
@@ -434,19 +382,25 @@ source "drivers/gpu/drm/mcde/Kconfig"
source "drivers/gpu/drm/tidss/Kconfig"
+source "drivers/gpu/drm/adp/Kconfig"
+
source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/sitronix/Kconfig"
+
source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
source "drivers/gpu/drm/imagination/Kconfig"
+source "drivers/gpu/drm/tyr/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && MMU && HYPERV
+ depends on DRM && PCI && HYPERV_VMBUS
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
@@ -458,9 +412,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-config DRM_EXPORT_FOR_TESTS
- bool
-
# Separate option as not all DRM drivers use it
config DRM_PANEL_BACKLIGHT_QUIRKS
tristate
@@ -473,20 +424,6 @@ config DRM_PRIVACY_SCREEN
bool
default n
-config DRM_WERROR
- bool "Compile the drm subsystem with warnings as errors"
- depends on DRM && EXPERT
- depends on !WERROR
- default n
- help
- A kernel build should not cause any compiler warnings, and this
- enables the '-Werror' flag to enforce that rule in the drm subsystem.
-
- The drm subsystem enables more warnings than the kernel default, so
- this config option is disabled by default.
-
- If in doubt, say N.
-
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
new file mode 100644
index 000000000000..05dc43c0b8c5
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -0,0 +1,117 @@
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default n
+ depends on BROKEN
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ depends on !WERROR
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
+
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT && BROKEN
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ select DRM_KMS_HELPER
+ help
+ KUnit Helpers for KMS drivers.
+
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT && MMU
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_BUDDY
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_EXEC
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KUNIT_TEST_HELPERS
+ select DRM_LIB_RANDOM
+ select DRM_SYSFB_HELPER
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for DRM. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
+ select DRM_TTM
+ select DRM_BUDDY
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
+
+ If in doubt, say "N".
+
+config DRM_SCHED_KUNIT_TEST
+ tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS
+ select DRM_SCHED
+ depends on DRM && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Choose this option to build unit tests for the DRM scheduler.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_EXPORT_FOR_TESTS
+ bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 19fb370fbc56..0e1c668b46d2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,7 +6,7 @@
CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
# Unconditionally enable W=1 warnings locally
-# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn
+# --- begin copy-paste W=1 warnings from scripts/Makefile.warn
subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
subdir-ccflags-y += $(call cc-option, -Wrestrict)
subdir-ccflags-y += -Wmissing-format-attribute
@@ -41,6 +41,7 @@ drm-y := \
drm_bridge.o \
drm_cache.o \
drm_color_mgmt.o \
+ drm_colorop.o \
drm_connector.o \
drm_crtc.o \
drm_displayid.o \
@@ -76,7 +77,8 @@ drm-y := \
drm-$(CONFIG_DRM_CLIENT) += \
drm_client.o \
drm_client_event.o \
- drm_client_modeset.o
+ drm_client_modeset.o \
+ drm_client_sysrq.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@@ -105,6 +107,11 @@ obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
+drm_gpusvm_helper-y := \
+ drm_gpusvm.o\
+ drm_pagemap.o
+obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
+
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
drm_dma_helper-y := drm_gem_dma_helper.o
@@ -133,9 +140,9 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
+ drm_bridge_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
- drm_encoder_slave.o \
drm_flip_work.o \
drm_format_helper.o \
drm_gem_atomic_helper.o \
@@ -145,7 +152,8 @@ drm_kms_helper-y := \
drm_plane_helper.o \
drm_probe_helper.o \
drm_self_refresh_helper.o \
- drm_simple_kms_helper.o
+ drm_simple_kms_helper.o \
+ drm_vblank_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -176,6 +184,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_NOVA) += nova/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
@@ -198,28 +207,49 @@ obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_LOGICVC) += logicvc/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
-obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
obj-y += mxsfb/
+obj-y += sysfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_ADP) += adp/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_PANTHOR) += panthor/
+obj-$(CONFIG_DRM_TYR) += tyr/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-y += sitronix/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
obj-$(CONFIG_DRM_POWERVR) += imagination/
+
+# Ensure drm headers are self-contained and pass kernel-doc
+hdrtest-files := \
+ $(shell cd $(src) && find . -maxdepth 1 -name 'drm_*.h') \
+ $(shell cd $(src) && find display lib -name '*.h')
+
+always-$(CONFIG_DRM_HEADER_TEST) += \
+ $(patsubst %.h,%.hdrtest, $(hdrtest-files))
+
+# Include the header twice to detect missing include guard.
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = \
+ $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/adp/Kconfig b/drivers/gpu/drm/adp/Kconfig
new file mode 100644
index 000000000000..9fcc27eb200d
--- /dev/null
+++ b/drivers/gpu/drm/adp/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+config DRM_ADP
+ tristate "DRM Support for pre-DCP Apple display controllers"
+ depends on DRM && OF && ARM64
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_DMA_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_PANEL_BRIDGE
+ select VIDEOMODE_HELPERS
+ select DRM_MIPI_DSI
+ help
+ Chose this option if you have an Apple Arm laptop with a touchbar.
+
+ If M is selected, this module will be called adpdrm.
diff --git a/drivers/gpu/drm/adp/Makefile b/drivers/gpu/drm/adp/Makefile
new file mode 100644
index 000000000000..8e7b618edd35
--- /dev/null
+++ b/drivers/gpu/drm/adp/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+adpdrm-y := adp_drv.o
+adpdrm-mipi-y := adp-mipi.o
+obj-$(CONFIG_DRM_ADP) += adpdrm.o adpdrm-mipi.o
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
new file mode 100644
index 000000000000..cba7d32150a9
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define DSI_GEN_HDR 0x6c
+#define DSI_GEN_PLD_DATA 0x70
+
+#define DSI_CMD_PKT_STATUS 0x74
+
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_CMD_EMPTY BIT(0)
+#define GEN_RD_CMD_BUSY BIT(6)
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+struct adp_mipi_drv_private {
+ struct mipi_dsi_host dsi;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ void __iomem *mipi;
+};
+
+#define mipi_to_adp(x) container_of(x, struct adp_mipi_drv_private, dsi)
+
+static int adp_dsi_gen_pkt_hdr_write(struct adp_mipi_drv_private *adp, u32 hdr_val)
+{
+ int ret;
+ u32 val, mask;
+
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_CMD_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to get available command FIFO\n");
+ return ret;
+ }
+
+ writel(hdr_val, adp->mipi + DSI_GEN_HDR);
+
+ mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, (val & mask) == mask,
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to write command FIFO\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp_dsi_write(struct adp_mipi_drv_private *adp,
+ const struct mipi_dsi_packet *packet)
+{
+ const u8 *tx_buf = packet->payload;
+ int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret;
+ __le32 word;
+ u32 val;
+
+ while (len) {
+ if (len < pld_data_bytes) {
+ word = 0;
+ memcpy(&word, tx_buf, len);
+ writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+ len = 0;
+ } else {
+ memcpy(&word, tx_buf, pld_data_bytes);
+ writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_W_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev,
+ "failed to get available write payload FIFO\n");
+ return ret;
+ }
+ }
+
+ word = 0;
+ memcpy(&word, packet->header, sizeof(packet->header));
+ return adp_dsi_gen_pkt_hdr_write(adp, le32_to_cpu(word));
+}
+
+static int adp_dsi_read(struct adp_mipi_drv_private *adp,
+ const struct mipi_dsi_msg *msg)
+{
+ int i, j, ret, len = msg->rx_len;
+ u8 *buf = msg->rx_buf;
+ u32 val;
+
+ /* Wait end of the read operation */
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_RD_CMD_BUSY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "Timeout during read operation\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ /* Read fifo must not be empty before all bytes are read */
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_R_EMPTY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "Read payload FIFO is empty\n");
+ return ret;
+ }
+
+ val = readl(adp->mipi + DSI_GEN_PLD_DATA);
+ for (j = 0; j < 4 && j + i < len; j++)
+ buf[i + j] = val >> (8 * j);
+ }
+
+ return ret;
+}
+
+static ssize_t adp_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+ struct mipi_dsi_packet packet;
+ int ret, nb_bytes;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to create packet: %d\n", ret);
+ return ret;
+ }
+
+ ret = adp_dsi_write(adp, &packet);
+ if (ret)
+ return ret;
+
+ if (msg->rx_buf && msg->rx_len) {
+ ret = adp_dsi_read(adp, msg);
+ if (ret)
+ return ret;
+ nb_bytes = msg->rx_len;
+ } else {
+ nb_bytes = packet.size;
+ }
+
+ return nb_bytes;
+}
+
+static int adp_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ return 0;
+}
+
+static void adp_dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops adp_dsi_component_ops = {
+ .bind = adp_dsi_bind,
+ .unbind = adp_dsi_unbind,
+};
+
+static int adp_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+ struct drm_bridge *next;
+ int ret;
+
+ next = devm_drm_of_get_bridge(adp->dsi.dev, adp->dsi.dev->of_node, 1, 0);
+ if (IS_ERR(next))
+ return PTR_ERR(next);
+
+ adp->next_bridge = next;
+
+ drm_bridge_add(&adp->bridge);
+
+ ret = component_add(host->dev, &adp_dsi_component_ops);
+ if (ret) {
+ pr_err("failed to add dsi_host component: %d\n", ret);
+ drm_bridge_remove(&adp->bridge);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+
+ component_del(host->dev, &adp_dsi_component_ops);
+ drm_bridge_remove(&adp->bridge);
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
+ .transfer = adp_dsi_host_transfer,
+ .attach = adp_dsi_host_attach,
+ .detach = adp_dsi_host_detach,
+};
+
+static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct adp_mipi_drv_private *adp =
+ container_of(bridge, struct adp_mipi_drv_private, bridge);
+
+ return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags);
+}
+
+static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
+ .attach = adp_dsi_bridge_attach,
+};
+
+static int adp_mipi_probe(struct platform_device *pdev)
+{
+ struct adp_mipi_drv_private *adp;
+
+ adp = devm_drm_bridge_alloc(&pdev->dev, struct adp_mipi_drv_private,
+ bridge, &adp_dsi_bridge_funcs);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
+
+ adp->mipi = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adp->mipi)) {
+ dev_err(&pdev->dev, "failed to map mipi mmio");
+ return PTR_ERR(adp->mipi);
+ }
+
+ adp->dsi.dev = &pdev->dev;
+ adp->dsi.ops = &adp_dsi_host_ops;
+ adp->bridge.of_node = pdev->dev.of_node;
+ adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
+ dev_set_drvdata(&pdev->dev, adp);
+ return mipi_dsi_host_register(&adp->dsi);
+}
+
+static void adp_mipi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adp_mipi_drv_private *adp = dev_get_drvdata(dev);
+
+ mipi_dsi_host_unregister(&adp->dsi);
+}
+
+static const struct of_device_id adp_mipi_of_match[] = {
+ { .compatible = "apple,h7-display-pipe-mipi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adp_mipi_of_match);
+
+static struct platform_driver adp_mipi_platform_driver = {
+ .driver = {
+ .name = "adp-mipi",
+ .of_match_table = adp_mipi_of_match,
+ },
+ .probe = adp_mipi_probe,
+ .remove = adp_mipi_remove,
+};
+
+module_platform_driver(adp_mipi_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe MIPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
new file mode 100644
index 000000000000..4554cf75565e
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#define ADP_INT_STATUS 0x34
+#define ADP_INT_STATUS_INT_MASK 0x7
+#define ADP_INT_STATUS_VBLANK 0x1
+#define ADP_CTRL 0x100
+#define ADP_CTRL_VBLANK_ON 0x12
+#define ADP_CTRL_FIFO_ON 0x601
+#define ADP_SCREEN_SIZE 0x0c
+#define ADP_SCREEN_HSIZE GENMASK(15, 0)
+#define ADP_SCREEN_VSIZE GENMASK(31, 16)
+
+#define ADBE_FIFO 0x10c0
+#define ADBE_FIFO_SYNC 0xc0000000
+
+#define ADBE_BLEND_BYPASS 0x2020
+#define ADBE_BLEND_EN1 0x2028
+#define ADBE_BLEND_EN2 0x2074
+#define ADBE_BLEND_EN3 0x202c
+#define ADBE_BLEND_EN4 0x2034
+#define ADBE_MASK_BUF 0x2200
+
+#define ADBE_SRC_START 0x4040
+#define ADBE_SRC_SIZE 0x4048
+#define ADBE_DST_START 0x4050
+#define ADBE_DST_SIZE 0x4054
+#define ADBE_STRIDE 0x4038
+#define ADBE_FB_BASE 0x4030
+
+#define ADBE_LAYER_EN1 0x4020
+#define ADBE_LAYER_EN2 0x4068
+#define ADBE_LAYER_EN3 0x40b4
+#define ADBE_LAYER_EN4 0x40f4
+#define ADBE_SCALE_CTL 0x40ac
+#define ADBE_SCALE_CTL_BYPASS 0x100000
+
+#define ADBE_LAYER_CTL 0x1038
+#define ADBE_LAYER_CTL_ENABLE 0x10000
+
+#define ADBE_PIX_FMT 0x402c
+#define ADBE_PIX_FMT_XRGB32 0x53e4001
+
+static int adp_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The modesetting driver does not check the non-desktop connector
+ * property and keeps the device open and locked. If the touchbar daemon
+ * opens the device first, modesetting breaks the whole X session.
+ * Simply refuse to open the device for X11 server processes as
+ * workaround.
+ */
+ if (current->comm[0] == 'X')
+ return -EBUSY;
+
+ return drm_open(inode, filp);
+}
+
+static const struct file_operations adp_fops = {
+ .owner = THIS_MODULE,
+ .open = adp_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+};
+
+static int adp_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ args->height = ALIGN(args->height, 64);
+ args->size = args->pitch * args->height;
+
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver adp_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &adp_fops,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(adp_drm_gem_dumb_create),
+ .name = "adp",
+ .desc = "Apple Display Pipe DRM Driver",
+ .major = 0,
+ .minor = 1,
+};
+
+struct adp_drv_private {
+ struct drm_device drm;
+ struct drm_crtc crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_bridge *next_bridge;
+ void __iomem *be;
+ void __iomem *fe;
+ u32 *mask_buf;
+ u64 mask_buf_size;
+ dma_addr_t mask_iova;
+ int be_irq;
+ int fe_irq;
+ struct drm_pending_vblank_event *event;
+};
+
+#define to_adp(x) container_of(x, struct adp_drv_private, drm)
+#define crtc_to_adp(x) container_of(x, struct adp_drv_private, crtc)
+
+static int adp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *crtc_state;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+}
+
+static void adp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp;
+ struct drm_rect src_rect;
+ struct drm_gem_dma_object *obj;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ u32 src_pos, src_size, dst_pos, dst_size;
+
+ if (!plane || !new_state)
+ return;
+
+ fb = new_state->fb;
+ if (!fb)
+ return;
+ adp = to_adp(plane->dev);
+
+ drm_rect_fp_to_int(&src_rect, &new_state->src);
+ src_pos = src_rect.x1 << 16 | src_rect.y1;
+ dst_pos = new_state->dst.x1 << 16 | new_state->dst.y1;
+ src_size = drm_rect_width(&src_rect) << 16 | drm_rect_height(&src_rect);
+ dst_size = drm_rect_width(&new_state->dst) << 16 |
+ drm_rect_height(&new_state->dst);
+ writel(src_pos, adp->be + ADBE_SRC_START);
+ writel(src_size, adp->be + ADBE_SRC_SIZE);
+ writel(dst_pos, adp->be + ADBE_DST_START);
+ writel(dst_size, adp->be + ADBE_DST_SIZE);
+ writel(fb->pitches[0], adp->be + ADBE_STRIDE);
+ obj = drm_fb_dma_get_gem_obj(fb, 0);
+ if (obj)
+ writel(obj->dma_addr + fb->offsets[0], adp->be + ADBE_FB_BASE);
+
+ writel(BIT(0), adp->be + ADBE_LAYER_EN1);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN2);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN3);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN4);
+ writel(ADBE_SCALE_CTL_BYPASS, adp->be + ADBE_SCALE_CTL);
+ writel(ADBE_LAYER_CTL_ENABLE | BIT(0), adp->be + ADBE_LAYER_CTL);
+ writel(ADBE_PIX_FMT_XRGB32, adp->be + ADBE_PIX_FMT);
+}
+
+static void adp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = to_adp(plane->dev);
+
+ writel(0x0, adp->be + ADBE_LAYER_EN1);
+ writel(0x0, adp->be + ADBE_LAYER_EN2);
+ writel(0x0, adp->be + ADBE_LAYER_EN3);
+ writel(0x0, adp->be + ADBE_LAYER_EN4);
+ writel(ADBE_LAYER_CTL_ENABLE, adp->be + ADBE_LAYER_CTL);
+}
+
+static const struct drm_plane_helper_funcs adp_plane_helper_funcs = {
+ .atomic_check = adp_plane_atomic_check,
+ .atomic_update = adp_plane_atomic_update,
+ .atomic_disable = adp_plane_atomic_disable,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS
+};
+
+static const struct drm_plane_funcs adp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_GEM_SHADOW_PLANE_FUNCS
+};
+
+static const u32 plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+#define ALL_CRTCS 1
+
+static struct drm_plane *adp_plane_new(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ struct drm_plane *plane;
+
+ plane = __drmm_universal_plane_alloc(drm, sizeof(struct drm_plane), 0,
+ ALL_CRTCS, &adp_plane_funcs,
+ plane_formats, ARRAY_SIZE(plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, "plane");
+ if (IS_ERR(plane)) {
+ drm_err(drm, "failed to allocate plane");
+ return plane;
+ }
+
+ drm_plane_helper_add(plane, &adp_plane_helper_funcs);
+ return plane;
+}
+
+static void adp_enable_vblank(struct adp_drv_private *adp)
+{
+ u32 cur_ctrl;
+
+ writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+
+ cur_ctrl = readl(adp->fe + ADP_CTRL);
+ writel(cur_ctrl | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+}
+
+static int adp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct adp_drv_private *adp = to_adp(dev);
+
+ adp_enable_vblank(adp);
+
+ return 0;
+}
+
+static void adp_disable_vblank(struct adp_drv_private *adp)
+{
+ u32 cur_ctrl;
+
+ cur_ctrl = readl(adp->fe + ADP_CTRL);
+ writel(cur_ctrl & ~ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+ writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+}
+
+static void adp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct adp_drv_private *adp = to_adp(dev);
+
+ adp_disable_vblank(adp);
+}
+
+static void adp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+
+ writel(BIT(0), adp->be + ADBE_BLEND_EN2);
+ writel(BIT(4), adp->be + ADBE_BLEND_EN1);
+ writel(BIT(0), adp->be + ADBE_BLEND_EN3);
+ writel(BIT(0), adp->be + ADBE_BLEND_BYPASS);
+ writel(BIT(0), adp->be + ADBE_BLEND_EN4);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void adp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
+ writel(0x0, adp->be + ADBE_BLEND_EN2);
+ writel(0x0, adp->be + ADBE_BLEND_EN1);
+ writel(0x0, adp->be + ADBE_BLEND_EN3);
+ writel(0x0, adp->be + ADBE_BLEND_BYPASS);
+ writel(0x0, adp->be + ADBE_BLEND_EN4);
+ drm_crtc_vblank_off(crtc);
+}
+
+static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ u32 frame_num = 1;
+ unsigned long flags;
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+ struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc);
+ u64 new_size = ALIGN(new_state->mode.hdisplay *
+ new_state->mode.vdisplay * 4, PAGE_SIZE);
+
+ if (new_size != adp->mask_buf_size) {
+ if (adp->mask_buf)
+ dma_free_coherent(crtc->dev->dev, adp->mask_buf_size,
+ adp->mask_buf, adp->mask_iova);
+ adp->mask_buf = NULL;
+ if (new_size != 0) {
+ adp->mask_buf = dma_alloc_coherent(crtc->dev->dev, new_size,
+ &adp->mask_iova, GFP_KERNEL);
+ memset(adp->mask_buf, 0xFF, new_size);
+ writel(adp->mask_iova, adp->be + ADBE_MASK_BUF);
+ }
+ adp->mask_buf_size = new_size;
+ }
+ writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO);
+ //FIXME: use adbe flush interrupt
+ if (crtc->state->event) {
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ crtc->state->event = NULL;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, event);
+ else
+ adp->event = event;
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static const struct drm_crtc_funcs adp_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = adp_crtc_enable_vblank,
+ .disable_vblank = adp_crtc_disable_vblank,
+};
+
+
+static const struct drm_crtc_helper_funcs adp_crtc_helper_funcs = {
+ .atomic_enable = adp_crtc_atomic_enable,
+ .atomic_disable = adp_crtc_atomic_disable,
+ .atomic_flush = adp_crtc_atomic_flush,
+};
+
+static int adp_setup_crtc(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = adp_plane_new(adp);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = drm_crtc_init_with_planes(drm, &adp->crtc, primary,
+ NULL, &adp_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&adp->crtc, &adp_crtc_helper_funcs);
+ return 0;
+}
+
+static const struct drm_mode_config_funcs adp_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int adp_setup_mode_config(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ int ret;
+ u32 size;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ /*
+ * Query screen size restrict the frame buffer size to the screen size
+ * aligned to the next multiple of 64. This is not necessary but can be
+ * used as simple check for non-desktop devices.
+ * Xorg's modesetting driver does not care about the connector
+ * "non-desktop" property. The max frame buffer width or height can be
+ * easily checked and a device can be reject if the max width/height is
+ * smaller than 120 for example.
+ * Any touchbar daemon is not limited by this small framebuffer size.
+ */
+ size = readl(adp->fe + ADP_SCREEN_SIZE);
+
+ drm->mode_config.min_width = 32;
+ drm->mode_config.min_height = 32;
+ drm->mode_config.max_width = ALIGN(FIELD_GET(ADP_SCREEN_HSIZE, size), 64);
+ drm->mode_config.max_height = ALIGN(FIELD_GET(ADP_SCREEN_VSIZE, size), 64);
+ drm->mode_config.preferred_depth = 24;
+ drm->mode_config.prefer_shadow = 0;
+ drm->mode_config.funcs = &adp_mode_config_funcs;
+
+ ret = adp_setup_crtc(adp);
+ if (ret) {
+ drm_err(drm, "failed to create crtc");
+ return ret;
+ }
+
+ adp->encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DSI, NULL);
+ if (IS_ERR(adp->encoder)) {
+ drm_err(drm, "failed to init encoder");
+ return PTR_ERR(adp->encoder);
+ }
+ adp->encoder->possible_crtcs = ALL_CRTCS;
+
+ ret = drm_bridge_attach(adp->encoder, adp->next_bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ drm_err(drm, "failed to init bridge chain");
+ return ret;
+ }
+
+ adp->connector = drm_bridge_connector_init(drm, adp->encoder);
+ if (IS_ERR(adp->connector))
+ return PTR_ERR(adp->connector);
+
+ drm_connector_attach_encoder(adp->connector, adp->encoder);
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ drm_err(drm, "failed to initialize vblank");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ return 0;
+}
+
+static int adp_parse_of(struct platform_device *pdev, struct adp_drv_private *adp)
+{
+ struct device *dev = &pdev->dev;
+
+ adp->be = devm_platform_ioremap_resource_byname(pdev, "be");
+ if (IS_ERR(adp->be)) {
+ dev_err(dev, "failed to map display backend mmio");
+ return PTR_ERR(adp->be);
+ }
+
+ adp->fe = devm_platform_ioremap_resource_byname(pdev, "fe");
+ if (IS_ERR(adp->fe)) {
+ dev_err(dev, "failed to map display pipe mmio");
+ return PTR_ERR(adp->fe);
+ }
+
+ adp->be_irq = platform_get_irq_byname(pdev, "be");
+ if (adp->be_irq < 0)
+ return adp->be_irq;
+
+ adp->fe_irq = platform_get_irq_byname(pdev, "fe");
+ if (adp->fe_irq < 0)
+ return adp->fe_irq;
+
+ return 0;
+}
+
+static irqreturn_t adp_fe_irq(int irq, void *arg)
+{
+ struct adp_drv_private *adp = (struct adp_drv_private *)arg;
+ u32 int_status;
+ u32 int_ctl;
+
+ int_status = readl(adp->fe + ADP_INT_STATUS);
+ if (int_status & ADP_INT_STATUS_VBLANK) {
+ drm_crtc_handle_vblank(&adp->crtc);
+ spin_lock(&adp->crtc.dev->event_lock);
+ if (adp->event) {
+ int_ctl = readl(adp->fe + ADP_CTRL);
+ if ((int_ctl & 0xF00) == 0x600) {
+ drm_crtc_send_vblank_event(&adp->crtc, adp->event);
+ adp->event = NULL;
+ drm_crtc_vblank_put(&adp->crtc);
+ }
+ }
+ spin_unlock(&adp->crtc.dev->event_lock);
+ }
+
+ writel(int_status, adp->fe + ADP_INT_STATUS);
+
+
+ return IRQ_HANDLED;
+}
+
+static int adp_drm_bind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct adp_drv_private *adp = to_adp(drm);
+ int err;
+
+ writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL);
+
+ adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0);
+ if (IS_ERR(adp->next_bridge)) {
+ dev_err(dev, "failed to find next bridge");
+ return PTR_ERR(adp->next_bridge);
+ }
+
+ err = adp_setup_mode_config(adp);
+ if (err < 0)
+ return err;
+
+ err = request_irq(adp->fe_irq, adp_fe_irq, 0, "adp-fe", adp);
+ if (err)
+ return err;
+
+ err = drm_dev_register(&adp->drm, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void adp_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct adp_drv_private *adp = to_adp(drm);
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+ free_irq(adp->fe_irq, adp);
+}
+
+static const struct component_master_ops adp_master_ops = {
+ .bind = adp_drm_bind,
+ .unbind = adp_drm_unbind,
+};
+
+static int compare_dev(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int adp_probe(struct platform_device *pdev)
+{
+ struct device_node *port;
+ struct component_match *match = NULL;
+ struct adp_drv_private *adp;
+ int err;
+
+ adp = devm_drm_dev_alloc(&pdev->dev, &adp_driver, struct adp_drv_private, drm);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
+
+ dev_set_drvdata(&pdev->dev, &adp->drm);
+
+ err = adp_parse_of(pdev, adp);
+ if (err < 0)
+ return err;
+
+ port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!port)
+ return -ENODEV;
+
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
+
+ return component_master_add_with_match(&pdev->dev, &adp_master_ops, match);
+}
+
+static void adp_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &adp_master_ops);
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id adp_of_match[] = {
+ { .compatible = "apple,h7-display-pipe", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adp_of_match);
+
+static struct platform_driver adp_platform_driver = {
+ .driver = {
+ .name = "adp",
+ .of_match_table = adp_of_match,
+ },
+ .probe = adp_probe,
+ .remove = adp_remove,
+};
+
+module_platform_driver(adp_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 1a11cab741ac..7f515be5185d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -2,7 +2,7 @@
config DRM_AMDGPU
tristate "AMD GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on !UML
select FW_LOADER
select DRM_CLIENT
@@ -43,14 +43,16 @@ config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU
help
- Choose this option if you want to enable experimental support
+ Choose this option if you want to enable support
for SI (Southern Islands) asics.
- SI is already supported in radeon. Experimental support for SI
- in amdgpu will be disabled by default and is still provided by
- radeon. Use module options to override this:
+ SI (Southern Islands) are first generation GCN GPUs,
+ supported by both drivers: radeon (old) and amdgpu (new).
+ By default, SI dedicated GPUs are supported by amdgpu.
- radeon.si_support=0 amdgpu.si_support=1
+ Use module options to override this:
+ To use radeon for SI,
+ radeon.si_support=1 amdgpu.si_support=0
config DRM_AMDGPU_CIK
bool "Enable amdgpu support for CIK parts"
@@ -59,16 +61,21 @@ config DRM_AMDGPU_CIK
Choose this option if you want to enable support for CIK (Sea
Islands) asics.
- CIK is already supported in radeon. Support for CIK in amdgpu
- will be disabled by default and is still provided by radeon.
- Use module options to override this:
+ CIK (Sea Islands) are second generation GCN GPUs,
+ supported by both drivers: radeon (old) and amdgpu (new).
+ By default,
+ CIK dedicated GPUs are supported by amdgpu
+ CIK APUs are supported by radeon
+ Use module options to override this:
+ To use amdgpu for CIK,
radeon.cik_support=0 amdgpu.cik_support=1
+ To use radeon for CIK,
+ radeon.cik_support=1 amdgpu.cik_support=0
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
- depends on MMU
select HMM_MIRROR
select MMU_NOTIFIER
help
@@ -77,7 +84,7 @@ config DRM_AMDGPU_USERPTR
config DRM_AMD_ISP
bool "Enable AMD Image Signal Processor IP support"
- depends on DRM_AMDGPU
+ depends on DRM_AMDGPU && ACPI
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 5b21674b07fb..c88760fb52ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -37,7 +37,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/modules/inc \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
- -I$(FULL_AMD_PATH)/amdkfd
+ -I$(FULL_AMD_PATH)/amdkfd \
+ -I$(FULL_AMD_PATH)/ras/ras_mgr
# Locally disable W=1 warnings enabled in drm subsystem Makefile
subdir-ccflags-y += -Wno-override-init
@@ -65,7 +66,8 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -76,14 +78,15 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
- uvd_v3_1.o
+ uvd_v3_1.o vce_v1_0.o
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
+ cyan_skillfish_reg_init.o
# add DF block
amdgpu-y += \
@@ -136,7 +139,6 @@ amdgpu-y += \
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o \
amdgpu_vkms.o
# add GFX block
@@ -173,7 +175,10 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_mes.o \
mes_v11_0.o \
- mes_v12_0.o
+ mes_v12_0.o \
+
+# add GFX userqueue support
+amdgpu-y += mes_userqueue.o
# add UVD block
amdgpu-y += \
@@ -252,6 +257,8 @@ amdgpu-y += \
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
+# add gfx usermode queue
+amdgpu-y += amdgpu_userq.o
ifneq ($(CONFIG_HSA_AMD),)
AMDKFD_PATH := ../amdkfd
@@ -318,4 +325,9 @@ amdgpu-y += \
isp_v4_1_1.o
endif
+AMD_GPU_RAS_PATH := ../ras
+AMD_GPU_RAS_FULL_PATH := $(FULL_AMD_PATH)/ras
+include $(AMD_GPU_RAS_FULL_PATH)/Makefile
+amdgpu-y += $(AMD_GPU_RAS_FILES)
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index e13fbd974141..daa7b23bc775 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -71,18 +71,33 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
return NULL;
}
+static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev)
+{
+ uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) |
+ BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
+ if (adev->aid_mask)
+ ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH);
+
+ return ip_block_mask;
+}
+
static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
{
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
+ uint32_t ip_block;
int r, i;
+ /* Skip suspend of SDMA IP versions >= 4.4.2. They are multi-aid */
+ if (adev->aid_mask)
+ ip_block_mask &= ~BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!(adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_GFX ||
- adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_SDMA))
+ ip_block = BIT(adev->ip_blocks[i].version->type);
+ if (!(ip_block_mask & ip_block))
continue;
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
@@ -200,8 +215,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
{
struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
struct amdgpu_firmware_info *ucode;
struct amdgpu_ip_block *cmn_block;
+ struct amdgpu_ip_block *ih_block;
int ucode_count = 0;
int i, r;
@@ -243,6 +260,18 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
if (r)
return r;
+ if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) {
+ ih_block = amdgpu_device_ip_get_ip_block(adev,
+ AMD_IP_BLOCK_TYPE_IH);
+ if (unlikely(!ih_block)) {
+ dev_err(adev->dev, "Failed to get IH handle\n");
+ return -EINVAL;
+ }
+ r = amdgpu_ip_block_resume(ih_block);
+ if (r)
+ return r;
+ }
+
/* Reinit GFXHUB */
adev->gfxhub.funcs->init(adev);
r = adev->gfxhub.funcs->gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 69895fccb474..9f9774f58ce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -63,6 +63,7 @@
#include "kgd_pp_interface.h"
#include "amd_shared.h"
+#include "amdgpu_utils.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
@@ -109,9 +110,12 @@
#include "amdgpu_mca.h"
#include "amdgpu_aca.h"
#include "amdgpu_ras.h"
+#include "amdgpu_cper.h"
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_eviction_fence.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif
@@ -227,7 +231,7 @@ extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
extern int amdgpu_mtype_local;
-extern bool enforce_isolation;
+extern int amdgpu_enforce_isolation;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
@@ -265,8 +269,10 @@ extern int amdgpu_umsch_mm_fwlog;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
+extern int amdgpu_rebar;
extern int amdgpu_wbrf;
+extern int amdgpu_user_queue;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -352,7 +358,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
@@ -367,13 +372,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
-#define AMDGPU_MAX_IP_NUM 16
+#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM
struct amdgpu_ip_block_status {
bool valid;
@@ -415,6 +422,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev);
bool amdgpu_read_bios(struct amdgpu_device *adev);
bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
+void amdgpu_bios_release(struct amdgpu_device *adev);
/*
* Clocks
*/
@@ -429,7 +437,6 @@ struct amdgpu_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
- uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
@@ -465,9 +472,6 @@ struct amdgpu_sa_manager {
void *cpu_ptr;
};
-int amdgpu_fence_slab_init(void);
-void amdgpu_fence_slab_fini(void);
-
/*
* IRQS.
*/
@@ -487,7 +491,6 @@ struct amdgpu_flip_work {
bool async;
};
-
/*
* file private structure
*/
@@ -500,6 +503,11 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
+ struct amdgpu_userq_mgr userq_mgr;
+
+ /* Eviction fence infra */
+ struct amdgpu_eviction_fence_mgr evf_mgr;
+
/** GPU partition selection */
uint32_t xcp_id;
};
@@ -511,12 +519,62 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+/**
+ * amdgpu_wb - This struct is used for small GPU memory allocation.
+ *
+ * This struct is used to allocate a small amount of GPU memory that can be
+ * used to shadow certain states into the memory. This is especially useful for
+ * providing easy CPU access to some states without requiring register access
+ * (e.g., if some block is power gated, reading register may be problematic).
+ *
+ * Note: the term writeback was initially used because many of the amdgpu
+ * components had some level of writeback memory, and this struct initially
+ * described those components.
+ */
struct amdgpu_wb {
+
+ /**
+ * @wb_obj:
+ *
+ * Buffer Object used for the writeback memory.
+ */
struct amdgpu_bo *wb_obj;
- volatile uint32_t *wb;
+
+ /**
+ * @wb:
+ *
+ * Pointer to the first writeback slot. In terms of CPU address
+ * this value can be accessed directly by using the offset as an index.
+ * For the GPU address, it is necessary to use gpu_addr and the offset.
+ */
+ uint32_t *wb;
+
+ /**
+ * @gpu_addr:
+ *
+ * Writeback base address in the GPU.
+ */
uint64_t gpu_addr;
- u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
+
+ /**
+ * @num_wb:
+ *
+ * Number of writeback slots reserved for amdgpu.
+ */
+ u32 num_wb;
+
+ /**
+ * @used:
+ *
+ * Track the writeback slot already used.
+ */
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+
+ /**
+ * @lock:
+ *
+ * Protects read and write of the used field array.
+ */
spinlock_t lock;
};
@@ -550,6 +608,7 @@ struct amdgpu_allowed_register_entry {
* are reset depends on the ASIC. Notably doesn't reset IPs
* shared with the CPU on APUs or the memory controllers (so
* VRAM is not lost). Not available on all ASICs.
+ * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
* @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
* but without powering off the PCI bus. Suitable only for
* discrete GPUs.
@@ -567,6 +626,7 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
AMD_RESET_METHOD_MODE2,
+ AMD_RESET_METHOD_LINK,
AMD_RESET_METHOD_BACO,
AMD_RESET_METHOD_PCI,
AMD_RESET_METHOD_ON_INIT,
@@ -663,7 +723,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
- volatile uint32_t *ptr;
+ uint32_t *ptr;
u64 gpu_addr;
};
@@ -694,6 +754,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
+ struct amdgpu_bo *bo;
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
@@ -761,13 +822,25 @@ struct amdgpu_ip_map_info {
uint32_t mask);
};
+enum amdgpu_uid_type {
+ AMDGPU_UID_TYPE_XCD,
+ AMDGPU_UID_TYPE_AID,
+ AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MAX
+};
+
+#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */
+
+struct amdgpu_uid {
+ uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX];
+ struct amdgpu_device *adev;
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
};
-struct ip_discovery_top;
-
/* polaris10 kickers */
#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
((rid == 0xE3) || \
@@ -820,6 +893,12 @@ struct amdgpu_mqd_prop {
uint32_t hqd_queue_priority;
bool allow_tunneling;
bool hqd_active;
+ uint64_t shadow_addr;
+ uint64_t gds_bkup_addr;
+ uint64_t csa_addr;
+ uint64_t fence_address;
+ bool tmz_queue;
+ bool kernel_queue;
};
struct amdgpu_mqd {
@@ -828,6 +907,15 @@ struct amdgpu_mqd {
struct amdgpu_mqd_prop *p);
};
+struct amdgpu_pcie_reset_ctx {
+ bool in_link_reset;
+ bool occurs_dpc;
+ bool audio_suspended;
+ struct pci_dev *swus;
+ struct pci_saved_state *swus_pcistate;
+ struct pci_saved_state *swds_pcistate;
+};
+
/*
* Custom Init levels could be defined for different situations where a full
* initialization of all hardware blocks are not expected. Sample cases are
@@ -852,10 +940,12 @@ struct amdgpu_init_level {
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
-/*
- * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
- */
-#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
+enum amdgpu_enforce_isolation_mode {
+ AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
+};
struct amdgpu_device {
struct device *dev;
@@ -882,8 +972,7 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
- struct debugfs_blob_wrapper debugfs_vbios_blob;
- struct debugfs_blob_wrapper debugfs_discovery_blob;
+ struct debugfs_blob_wrapper debugfs_vbios_blob;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -973,6 +1062,9 @@ struct amdgpu_device {
u32 log2_max_MBps;
} mm_stats;
+ /* discovery*/
+ struct amdgpu_discovery_info discovery;
+
/* display */
bool enable_virtual_display;
struct amdgpu_vkms_output *amdgpu_vkms_output;
@@ -1060,9 +1152,6 @@ struct amdgpu_device {
/* for userq and VM fences */
struct amdgpu_seq64 seq64;
- /* KFD */
- struct amdgpu_kfd_dev kfd;
-
/* UMC */
struct amdgpu_umc umc;
@@ -1080,6 +1169,19 @@ struct amdgpu_device {
bool enable_uni_mes;
struct amdgpu_mes mes;
struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
+ const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
+
+ /* xarray used to retrieve the user queue fence driver reference
+ * in the EOP interrupt handler to signal the particular user
+ * queue fence.
+ */
+ struct xarray userq_xa;
+ /**
+ * @userq_doorbell_xa: Global user queue map (doorbell index → queue)
+ * Key: doorbell_index (unique global identifier for the queue)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_doorbell_xa;
/* df */
struct amdgpu_df df;
@@ -1090,6 +1192,9 @@ struct amdgpu_device {
/* ACA */
struct amdgpu_aca aca;
+ /* CPER */
+ struct amdgpu_cper cper;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
uint32_t harvest_ip_mask;
int num_ip_blocks;
@@ -1119,6 +1224,7 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ suspend_state_t last_suspend_state;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
@@ -1149,11 +1255,14 @@ struct amdgpu_device {
struct ratelimit_state throttling_logging_rs;
uint32_t ras_hw_enabled;
uint32_t ras_enabled;
+ bool ras_default_ecc_enabled;
bool no_hw_access;
struct pci_saved_state *pci_state;
pci_channel_state_t pci_channel_state;
+ struct amdgpu_pcie_reset_ctx pcie_reset_ctx;
+
/* Track auto wait count on s_barrier settings */
bool barrier_has_auto_waitcnt;
@@ -1164,8 +1273,6 @@ struct amdgpu_device {
struct list_head ras_list;
- struct ip_discovery_top *ip_top;
-
struct amdgpu_reset_domain *reset_domain;
struct mutex benchmark_mutex;
@@ -1186,12 +1293,36 @@ struct amdgpu_device {
bool debug_use_vram_fw_buf;
bool debug_enable_ras_aca;
bool debug_exp_resets;
+ bool debug_disable_gpu_ring_reset;
+ bool debug_vm_userptr;
+ bool debug_disable_ce_logs;
+ bool debug_enable_ce_cs;
- bool enforce_isolation[MAX_XCP];
- /* Added this mutex for cleaner shader isolation between GFX and compute processes */
+ /* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
+ enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP];
+ struct amdgpu_isolation {
+ void *owner;
+ struct dma_fence *spearhead;
+ struct amdgpu_sync active;
+ struct amdgpu_sync prev;
+ } isolation[MAX_XCP];
struct amdgpu_init_level *init_lvl;
+
+ /* This flag is used to determine how VRAM allocations are handled for APUs
+ * in KFD: VRAM or GTT.
+ */
+ bool apu_prefer_gtt;
+
+ bool userq_halt_for_enforce_isolation;
+ struct work_struct userq_reset_work;
+ struct amdgpu_uid *uid_info;
+
+ /* KFD
+ * Must be last --ends in a flexible-array member.
+ */
+ struct amdgpu_kfd_dev kfd;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1225,6 +1356,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
+static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev)
+{
+ return !!adev->aid_mask;
+}
+
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags);
void amdgpu_device_fini_hw(struct amdgpu_device *adev);
@@ -1276,7 +1412,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
u64 reg_addr, u64 reg_data);
u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
@@ -1403,11 +1540,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev, r) \
- ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
-#define amdgpu_asic_invalidate_hdp(adev, r) \
- ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
- ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
@@ -1446,16 +1578,17 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
-bool amdgpu_device_supports_px(struct drm_device *dev);
-bool amdgpu_device_supports_boco(struct drm_device *dev);
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
-int amdgpu_device_supports_baco(struct drm_device *dev);
+int amdgpu_device_link_reset(struct amdgpu_device *adev);
+bool amdgpu_device_supports_atpx(struct amdgpu_device *adev);
+bool amdgpu_device_supports_px(struct amdgpu_device *adev);
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev);
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev);
+int amdgpu_device_supports_baco(struct amdgpu_device *adev);
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
-int amdgpu_device_baco_enter(struct drm_device *dev);
-int amdgpu_device_baco_exit(struct drm_device *dev);
+int amdgpu_device_baco_enter(struct amdgpu_device *adev);
+int amdgpu_device_baco_exit(struct amdgpu_device *adev);
void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -1470,6 +1603,9 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
@@ -1502,8 +1638,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_release_kms(struct drm_device *dev);
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
+void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
@@ -1554,7 +1690,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state);
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
u64 *tmr_size);
@@ -1585,19 +1722,24 @@ static inline void amdgpu_acpi_release(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
-static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
- enum amdgpu_ss ss_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
+{
+ return 0;
+}
static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
-static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
+#endif
+
+#if defined(CONFIG_DRM_AMD_ISP)
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
@@ -1643,4 +1785,24 @@ extern const struct attribute_group amdgpu_flash_attr_group;
void amdgpu_set_init_level(struct amdgpu_device *adev,
enum amdgpu_init_lvl_id lvl);
+
+static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
+{
+ u32 status;
+ int r;
+
+ r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status);
+ if (r || PCI_POSSIBLE_ERROR(status)) {
+ dev_err(adev->dev, "device lost from bus!");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid);
+uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 9d6345146495..9b3180449150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -30,16 +30,6 @@
typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
-struct aca_banks {
- int nr_banks;
- struct list_head list;
-};
-
-struct aca_hwip {
- int hwid;
- int mcatype;
-};
-
static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = {
ACA_BANK_HWID(SMU, 0x01, 0x01),
ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00),
@@ -86,6 +76,7 @@ static void aca_banks_release(struct aca_banks *banks)
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
+ banks->nr_banks--;
}
}
@@ -111,7 +102,7 @@ static struct aca_regs_dump {
{"STATUS", ACA_REG_IDX_STATUS},
{"ADDR", ACA_REG_IDX_ADDR},
{"MISC", ACA_REG_IDX_MISC0},
- {"CONFIG", ACA_REG_IDX_CONFG},
+ {"CONFIG", ACA_REG_IDX_CONFIG},
{"IPID", ACA_REG_IDX_IPID},
{"SYND", ACA_REG_IDX_SYND},
{"DESTAT", ACA_REG_IDX_DESTAT},
@@ -125,11 +116,40 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
int i;
+ if (adev->debug_disable_ce_logs &&
+ bank->smu_err_type == ACA_SMU_TYPE_CE &&
+ !ACA_BANK_ERR_IS_DEFFERED(bank))
+ return;
+
RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
/* plus 1 for output format, e.g: ACA[08/08]: xxxx */
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+
+ if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS]))
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
+}
+
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
}
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
@@ -168,7 +188,16 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
if (ret)
return ret;
- bank.type = type;
+ bank.smu_err_type = type;
+
+ /*
+ * Poison being consumed when injecting a UE while running background workloads,
+ * which are unexpected.
+ */
+ if (type == ACA_SMU_TYPE_UE &&
+ ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
+ !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
+ continue;
aca_smu_bank_dump(adev, i, count, &bank, qctx);
@@ -180,31 +209,14 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return 0;
}
-static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
-{
-
- struct aca_hwip *hwip;
- int hwid, mcatype;
- u64 ipid;
-
- if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
- return false;
-
- hwip = &aca_hwid_mcatypes[type];
- if (!hwip->hwid)
- return false;
-
- ipid = bank->regs[ACA_REG_IDX_IPID];
- hwid = ACA_REG__IPID__HARDWAREID(ipid);
- mcatype = ACA_REG__IPID__MCATYPE(ipid);
-
- return hwip->hwid == hwid && hwip->mcatype == mcatype;
-}
-
static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
+ /* Parse all deferred errors with UMC aca handle */
+ if (ACA_BANK_ERR_IS_DEFFERED(bank))
+ return handle->hwip == ACA_HWIP_TYPE_UMC;
+
if (!aca_bank_hwip_is_matched(bank, handle->hwip))
return false;
@@ -227,6 +239,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
mutex_lock(&aerr->lock);
list_add_tail(&bank_error->node, &aerr->list);
+ aerr->nr_errors++;
mutex_unlock(&aerr->lock);
return bank_error;
@@ -394,6 +407,56 @@ static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type
return ret;
}
+static void aca_banks_generate_cper(struct amdgpu_device *adev,
+ enum aca_smu_type type,
+ struct aca_banks *banks,
+ int count)
+{
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ int r;
+
+ if (!adev->cper.enabled)
+ return;
+
+ if (!banks || !count) {
+ dev_warn(adev->dev, "fail to generate cper records\n");
+ return;
+ }
+
+ /* UEs must be encoded into separate CPER entries */
+ if (type == ACA_SMU_TYPE_UE) {
+ struct aca_banks de_banks;
+
+ aca_banks_init(&de_banks);
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
+ r = aca_banks_add_bank(&de_banks, bank);
+ if (r)
+ dev_warn(adev->dev, "fail to add de banks, ret = %d\n", r);
+ } else {
+ if (amdgpu_cper_generate_ue_record(adev, bank))
+ dev_warn(adev->dev, "fail to generate ue cper records\n");
+ }
+ }
+
+ if (!list_empty(&de_banks.list)) {
+ if (amdgpu_cper_generate_ce_records(adev, &de_banks, de_banks.nr_banks))
+ dev_warn(adev->dev, "fail to generate de cper records\n");
+ }
+
+ aca_banks_release(&de_banks);
+ } else {
+ /*
+ * SMU_TYPE_CE banks are combined into 1 CPER entries,
+ * they could be CEs or DEs or both
+ */
+ if (amdgpu_cper_generate_ce_records(adev, banks, count))
+ dev_warn(adev->dev, "fail to generate ce cper records\n");
+ }
+}
+
static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
bank_handler_t handler, struct ras_query_context *qctx, void *data)
{
@@ -431,6 +494,8 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
if (ret)
goto err_release_banks;
+ aca_banks_generate_cper(adev, type, &banks, count);
+
err_release_banks:
aca_banks_release(&banks);
@@ -516,6 +581,10 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h
if (ret)
return ret;
+ /* DEs may contain in CEs or UEs */
+ if (type != ACA_ERROR_TYPE_DEFERRED)
+ aca_log_aca_error(handle, ACA_ERROR_TYPE_DEFERRED, err_data);
+
return aca_log_aca_error(handle, type, err_data);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index f3289d289913..38c88897e1ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -76,12 +76,16 @@ struct ras_query_context;
#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+#define ACA_BANK_ERR_IS_DEFFERED(bank) \
+ (ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \
+ ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS]))
+
enum aca_reg_idx {
ACA_REG_IDX_CTL = 0,
ACA_REG_IDX_STATUS = 1,
ACA_REG_IDX_ADDR = 2,
ACA_REG_IDX_MISC0 = 3,
- ACA_REG_IDX_CONFG = 4,
+ ACA_REG_IDX_CONFIG = 4,
ACA_REG_IDX_IPID = 5,
ACA_REG_IDX_SYND = 6,
ACA_REG_IDX_DESTAT = 8,
@@ -108,13 +112,20 @@ enum aca_error_type {
};
enum aca_smu_type {
+ ACA_SMU_TYPE_INVALID = -1,
ACA_SMU_TYPE_UE = 0,
ACA_SMU_TYPE_CE,
ACA_SMU_TYPE_COUNT,
};
+struct aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
struct aca_bank {
- enum aca_smu_type type;
+ enum aca_error_type aca_err_type;
+ enum aca_smu_type smu_err_type;
u64 regs[ACA_MAX_REGS_COUNT];
};
@@ -123,6 +134,11 @@ struct aca_bank_node {
struct list_head node;
};
+struct aca_banks {
+ int nr_banks;
+ struct list_head list;
+};
+
struct aca_bank_info {
int die_id;
int socket_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index deb0785350e8..381ef205b0df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -302,17 +302,19 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].id = 0;
adev->acp.acp_cell[0].num_resources = 3;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].id = 1;
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
+ r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, 2, NULL, 0, NULL);
if (r)
goto failure;
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
@@ -410,30 +412,34 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].id = 0;
adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].id = 1;
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[2].name = "designware-i2s";
+ adev->acp.acp_cell[2].id = 2;
adev->acp.acp_cell[2].num_resources = 1;
adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].id = 3;
adev->acp.acp_cell[3].num_resources = 1;
adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
+ r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, ACP_DEVS, NULL, 0, NULL);
if (r)
goto failure;
@@ -579,7 +585,7 @@ static int acp_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool acp_is_idle(void *handle)
+static bool acp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index b8d4e07d2043..d31460a9e958 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -394,6 +394,10 @@ static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
characteristics.max_input_signal;
atif->backlight_caps.ac_level = characteristics.ac_level;
atif->backlight_caps.dc_level = characteristics.dc_level;
+ atif->backlight_caps.data_points = characteristics.number_of_points;
+ memcpy(atif->backlight_caps.luminance_data,
+ characteristics.data_points,
+ sizeof(atif->backlight_caps.luminance_data));
out:
kfree(info);
return err;
@@ -503,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev_to_drm(adev));
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
}
@@ -807,18 +810,18 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
/**
* amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
* @ss_state: current smart shift event
*
* returns 0 on success,
* otherwise return error number.
*/
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- if (!amdgpu_device_supports_smart_shift(dev))
+ if (!amdgpu_device_supports_smart_shift(adev))
return 0;
switch (ss_state) {
@@ -1277,11 +1280,7 @@ void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
- caps->caps_valid = atif->backlight_caps.caps_valid;
- caps->min_input_signal = atif->backlight_caps.min_input_signal;
- caps->max_input_signal = atif->backlight_caps.max_input_signal;
- caps->ac_level = atif->backlight_caps.ac_level;
- caps->dc_level = atif->backlight_caps.dc_level;
+ memcpy(caps, &atif->backlight_caps, sizeof(*caps));
}
/**
@@ -1532,23 +1531,35 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
return true;
#endif /* CONFIG_AMD_PMC */
}
+#endif /* CONFIG_SUSPEND */
-/**
- * amdgpu_choose_low_power_state
- *
- * @adev: amdgpu_device_pointer
- *
- * Choose the target low power state for the GPU
- */
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
-{
- if (adev->in_runpm)
- return;
+#if IS_ENABLED(CONFIG_DRM_AMD_ISP)
+static const struct acpi_device_id isp_sensor_ids[] = {
+ { "OMNI5C10" },
+ { }
+};
- if (amdgpu_acpi_is_s0ix_active(adev))
- adev->in_s0ix = true;
- else if (amdgpu_acpi_is_s3_active(adev))
- adev->in_s3 = true;
+static int isp_match_acpi_device_ids(struct device *dev, const void *data)
+{
+ return acpi_match_device(data, dev) ? 1 : 0;
}
-#endif /* CONFIG_SUSPEND */
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev)
+{
+ struct device *pdev __free(put_device) = NULL;
+ struct acpi_device *acpi_pdev;
+
+ pdev = bus_find_device(&platform_bus_type, NULL, isp_sensor_ids,
+ isp_match_acpi_device_ids);
+ if (!pdev)
+ return -EINVAL;
+
+ acpi_pdev = ACPI_COMPANION(pdev);
+ if (!acpi_pdev)
+ return -ENODEV;
+
+ *dev = acpi_pdev;
+
+ return 0;
+}
+#endif /* CONFIG_DRM_AMD_ISP */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 2e5732dfd425..a2879d2b7c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -248,18 +248,42 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
+{
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
+{
+ int r = 0;
+
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
+
+ return r;
+}
+
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, run_pm);
+ kgd2kfd_suspend_process(adev->kfd.dev);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, run_pm);
+ r = kgd2kfd_resume_process(adev->kfd.dev);
return r;
}
@@ -368,6 +392,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
+ if (!bo || !*bo)
+ return;
+
(void)amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);
@@ -459,7 +486,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
else
mem_info->local_mem_size_private =
KFD_XCP_MEMORY_SIZE(adev, xcp->id);
- } else if (adev->flags & AMD_IS_APU) {
+ } else if (adev->apu_prefer_gtt) {
mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
mem_info->local_mem_size_private = 0;
} else {
@@ -555,48 +582,6 @@ out_put:
return r;
}
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
- struct amdgpu_device *src)
-{
- struct amdgpu_device *peer_adev = src;
- struct amdgpu_device *adev = dst;
- int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
-
- if (ret < 0) {
- DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
- adev->gmc.xgmi.physical_node_id,
- peer_adev->gmc.xgmi.physical_node_id, ret);
- ret = 0;
- }
- return (uint8_t)ret;
-}
-
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
- struct amdgpu_device *src,
- bool is_min)
-{
- struct amdgpu_device *adev = dst, *peer_adev;
- int num_links;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
- return 0;
-
- if (src)
- peer_adev = src;
-
- /* num links returns 0 for indirect peers since indirect route is unknown. */
- num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
- if (num_links < 0) {
- DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
- adev->gmc.xgmi.physical_node_id,
- peer_adev->gmc.xgmi.physical_node_id, num_links);
- num_links = 0;
- }
-
- /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
- return (num_links * 16 * 25000)/BITS_PER_BYTE;
-}
-
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
{
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
@@ -681,7 +666,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0);
if (ret)
goto err;
@@ -715,8 +700,9 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
- ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
@@ -787,12 +773,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
- return kgd2kfd_check_and_lock_kfd();
+ return kgd2kfd_check_and_lock_kfd(adev->kfd.dev);
}
void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
{
- kgd2kfd_unlock_kfd();
+ kgd2kfd_unlock_kfd(adev->kfd.dev);
}
@@ -817,7 +803,7 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
}
do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
return ALIGN_DOWN(tmp, PAGE_SIZE);
- } else if (adev->flags & AMD_IS_APU) {
+ } else if (adev->apu_prefer_gtt) {
return (ttm_tt_pages_limit() << PAGE_SHIFT);
} else {
return adev->gmc.real_vram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8af67f18500a..8bdfcde2029b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -47,6 +47,7 @@ enum TLB_FLUSH_TYPE {
};
struct amdgpu_device;
+struct kfd_process_device;
struct amdgpu_reset_context;
enum kfd_mem_attachment_type {
@@ -70,7 +71,7 @@ struct kgd_mem {
struct mutex lock;
struct amdgpu_bo *bo;
struct dma_buf *dmabuf;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct list_head validate_list;
@@ -106,11 +107,13 @@ struct amdgpu_kfd_dev {
bool init_complete;
struct work_struct reset_work;
- /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
- struct dev_pagemap pgmap;
-
/* Client for KFD BO GEM handle allocations */
struct drm_client_dev client;
+
+ /* HMM page migration MEMORY_DEVICE_PRIVATE mapping
+ * Must be last --ends in a flexible-array member.
+ */
+ struct dev_pagemap pgmap;
};
enum kgd_engine_type {
@@ -153,8 +156,10 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -192,7 +197,7 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#if IS_ENABLED(CONFIG_HSA_AMD)
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
@@ -212,9 +217,8 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
}
static inline
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- return 0;
}
static inline
@@ -254,11 +258,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags, int8_t *xcp_id);
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
- struct amdgpu_device *src);
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
- struct amdgpu_device *src,
- bool is_min);
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
uint32_t *payload);
@@ -299,14 +298,10 @@ bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id);
(&((struct amdgpu_fpriv *) \
((struct drm_file *)(drm_priv))->driver_priv)->vm)
-int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct amdgpu_vm *avm, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
- void *drm_priv);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint8_t xcp_id);
@@ -420,18 +415,22 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
+void kgd2kfd_suspend_process(struct kfd_dev *kfd);
+int kgd2kfd_resume_process(struct kfd_dev *kfd);
int kgd2kfd_pre_reset(struct kfd_dev *kfd,
struct amdgpu_reset_context *reset_context);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
-int kgd2kfd_check_and_lock_kfd(void);
-void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@@ -463,11 +462,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
}
-static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
+{
+}
+
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
{
}
-static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
{
return 0;
}
@@ -498,12 +506,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
}
-static inline int kgd2kfd_check_and_lock_kfd(void)
+static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
return 0;
}
-static inline void kgd2kfd_unlock_kfd(void)
+static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
}
@@ -512,11 +520,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index 8dfdb18197c4..7e9f7a280c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -189,8 +189,9 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.set_address_watch = kgd_gfx_aldebaran_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
.hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 9abf29b58ac7..1105a09e55dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
if (!down_read_trylock(&adev->reset_domain->sem))
return;
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
if (suspend_resume_compute_scheduler(adev, true))
goto out;
@@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
out:
suspend_resume_compute_scheduler(adev, false);
- amdgpu_amdkfd_resume(adev, false);
+ amdgpu_amdkfd_resume(adev, true);
up_read(&adev->reset_domain->sem);
}
@@ -415,9 +415,10 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
index e2ae714a700f..89a45a9218f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -509,6 +509,17 @@ static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v9_4_3_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ uint32_t reg_offset = get_sdma_rlc_reg_offset(adev, engine, queue);
+ uint32_t status = RREG32(regSDMA_RLC0_CONTEXT_STATUS + reg_offset);
+ uint32_t doorbell_off = RREG32(regSDMA_RLC0_DOORBELL_OFFSET + reg_offset);
+ bool is_active = !!REG_GET_FIELD(status, SDMA_RLC0_CONTEXT_STATUS, SELECTED);
+
+ return is_active ? doorbell_off >> 2 : 0;
+}
+
const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping,
@@ -530,8 +541,8 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings =
kgd_gfx_v9_program_trap_handler_settings,
- .build_grace_period_packet_info =
- kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info =
+ kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.enable_debug_trap = kgd_aldebaran_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap,
@@ -543,5 +554,6 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
.clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_4_3_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 62176d607bef..0239114fb6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -352,7 +352,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -449,7 +449,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -1021,25 +1021,25 @@ void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
*wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
}
-void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v10_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data)
{
*reg_data = wait_times;
- /*
- * The CP cannont handle a 0 grace period input and will result in
- * an infinite grace period being set so set to 1 to prevent this.
- */
- if (grace_period == 0)
- grace_period = 1;
-
- *reg_data = REG_SET_FIELD(*reg_data,
- CP_IQ_WAIT_TIME2,
- SCH_WAVE,
- grace_period);
+ if (sch_wave)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ sch_wave);
+ if (que_sleep)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ QUE_SLEEP,
+ que_sleep);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
@@ -1084,6 +1084,12 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
return 0;
}
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -1109,8 +1115,9 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.set_address_watch = kgd_gfx_v10_set_address_watch,
.clear_address_watch = kgd_gfx_v10_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v10_build_dequeue_wait_counts_packet_info,
.program_trap_handler_settings = program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v10_hqd_reset
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
index 9efd2dd4fdd7..a4c607c88178 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -51,9 +51,10 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
-void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v10_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data);
uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
@@ -65,3 +66,5 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
uint32_t queue_id,
uint32_t inst,
unsigned int utimeout);
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index c718bedda0ca..f2278a0937ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -338,7 +338,7 @@ static int hqd_dump_v10_3(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -435,7 +435,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -673,7 +673,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
.program_trap_handler_settings = program_trap_handler_settings_v10_3,
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v10_build_dequeue_wait_counts_packet_info,
.enable_debug_trap = kgd_gfx_v10_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v10_disable_debug_trap,
.validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request,
@@ -682,5 +682,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_address_watch = kgd_gfx_v10_set_address_watch,
.clear_address_watch = kgd_gfx_v10_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v10_hqd_reset
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index a4ba49cb22db..aaccf0b9947d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -323,7 +323,7 @@ static int hqd_dump_v11(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -420,7 +420,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (7+11+1+12+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -800,6 +800,12 @@ static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v11_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
@@ -824,5 +830,6 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.set_address_watch = kgd_gfx_v11_set_address_watch,
.clear_address_watch = kgd_gfx_v11_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v11_hqd_reset
+ .hqd_reset = kgd_gfx_v11_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v11_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
index 0dfe7093bd8a..e0ceab400b2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -115,7 +115,7 @@ static int hqd_dump_v12(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int hqd_sdma_dump_v12(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (last_reg - first_reg + 1)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -361,6 +361,12 @@ static uint32_t kgd_gfx_v12_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v12_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
.init_interrupts = init_interrupts_v12,
.hqd_dump = hqd_dump_v12,
@@ -374,4 +380,5 @@ const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
.set_wave_launch_mode = kgd_gfx_v12_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v12_set_address_watch,
.clear_address_watch = kgd_gfx_v12_clear_address_watch,
+ .hqd_sdma_get_doorbell = kgd_gfx_v12_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ca4a6b82817f..df77558e03ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -561,6 +561,13 @@ static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -578,4 +585,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 0f3e2944edd7..e68c0fa8d751 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -582,6 +582,13 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev,
lower_32_bits(page_table_base));
}
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -599,4 +606,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index cc66ebb7bae1..088d09cc7a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1077,25 +1077,25 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
adev->gfx.cu_info.max_waves_per_simd;
}
-void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v9_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data)
{
*reg_data = wait_times;
- /*
- * The CP cannot handle a 0 grace period input and will result in
- * an infinite grace period being set so set to 1 to prevent this.
- */
- if (grace_period == 0)
- grace_period = 1;
-
- *reg_data = REG_SET_FIELD(*reg_data,
- CP_IQ_WAIT_TIME2,
- SCH_WAVE,
- grace_period);
+ if (sch_wave)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ sch_wave);
+ if (que_sleep)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ QUE_SLEEP,
+ que_sleep);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
@@ -1223,6 +1223,13 @@ unlock_out:
return queue_addr;
}
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -1248,9 +1255,10 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index b6a91a552aa4..704452ca62f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -97,9 +97,10 @@ uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
-void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v9_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data);
uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
@@ -111,3 +112,5 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t queue_id,
uint32_t inst,
unsigned int utimeout);
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1e998f972c30..b1c24c8fa686 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -197,7 +197,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
system_mem_needed = size;
ttm_mem_needed = size;
}
@@ -213,19 +213,35 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit)
+ kfd_mem_limit.max_system_mem_limit) {
pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
+ if (!no_system_mem_limit) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
- if ((kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
- (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
- kfd_mem_limit.max_ttm_mem_limit) ||
- (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
- vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) {
+ if (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) {
ret = -ENOMEM;
goto release;
}
+ /*if is_app_apu is false and apu_prefer_gtt is true, it is an APU with
+ * carve out < gtt. In that case, VRAM allocation will go to gtt domain, skip
+ * VRAM check since ttm_mem_limit check already cover this allocation
+ */
+
+ if (adev && xcp_id >= 0 && (!adev->apu_prefer_gtt || adev->gmc.is_app_apu)) {
+ uint64_t vram_available =
+ vram_size - reserved_for_pt - reserved_for_ras -
+ atomic64_read(&adev->vram_pin_size);
+ if (adev->kfd.vram_used[xcp_id] + vram_needed > vram_available) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
+
/* Update memory accounting by decreasing available system
* memory, TTM memory and GPU memory as computed above
*/
@@ -234,7 +250,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] +=
- (adev->flags & AMD_IS_APU) ?
+ adev->apu_prefer_gtt ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
@@ -262,7 +278,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
@@ -370,40 +386,32 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+/**
+ * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
+ * @bo: the BO where to remove the evictions fences from.
+ *
+ * This functions should only be used on release when all references to the BO
+ * are already dropped. We remove the eviction fence from the private copy of
+ * the dma_resv object here since that is what is used during release to
+ * determine of the BO is idle or not.
+ */
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- struct amdgpu_bo *root = bo;
- struct amdgpu_vm_bo_base *vm_bo;
- struct amdgpu_vm *vm;
- struct amdkfd_process_info *info;
- struct amdgpu_amdkfd_fence *ef;
- int ret;
-
- /* we can always get vm_bo from root PD bo.*/
- while (root->parent)
- root = root->parent;
+ struct dma_resv *resv = &bo->tbo.base._resv;
+ struct dma_fence *fence, *stub;
+ struct dma_resv_iter cursor;
- vm_bo = root->vm_bo;
- if (!vm_bo)
- return 0;
+ dma_resv_assert_held(resv);
- vm = vm_bo->vm;
- if (!vm)
- return 0;
-
- info = vm->process_info;
- if (!info || !info->eviction_fence)
- return 0;
-
- ef = container_of(dma_fence_get(&info->eviction_fence->base),
- struct amdgpu_amdkfd_fence, base);
-
- BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
- ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
- dma_resv_unlock(bo->tbo.base.resv);
+ stub = dma_fence_get_stub();
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (!to_amdgpu_amdkfd_fence(fence))
+ continue;
- dma_fence_put(&ef->base);
- return ret;
+ dma_resv_replace_fences(resv, fence->context, stub,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ dma_fence_put(stub);
}
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
@@ -499,10 +507,11 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return amdgpu_sync_fence(sync, vm->last_update);
+ return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
-static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct kgd_mem *mem)
{
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
@@ -512,7 +521,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ return mapping_flags;
}
/**
@@ -603,12 +612,6 @@ kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
{
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
- int ret;
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret)
- return ret;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -890,7 +893,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
- if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
+ if ((adev != bo_adev && !adev->apu_prefer_gtt) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@@ -975,7 +978,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
goto unwind;
}
attachment[i]->va = va;
- attachment[i]->pte_flags = get_pte_flags(adev, mem);
+ attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
attachment[i]->adev = adev;
list_add(&attachment[i]->list, &mem->attachments);
@@ -1054,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
@@ -1086,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range)) {
+ ret = -ENOMEM;
+ goto unregister_out;
+ }
+
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
if (ret) {
+ amdgpu_hmm_range_free(range);
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
else
@@ -1100,6 +1110,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -1107,7 +1120,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo);
release_out:
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+ amdgpu_hmm_range_free(range);
unregister_out:
if (ret)
amdgpu_hmm_unregister(bo);
@@ -1261,9 +1274,13 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
(void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ /* VM entity stopped if process killed, don't clear freed pt bo */
+ if (!amdgpu_vm_ready(vm))
+ return 0;
+
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- (void)amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
return 0;
}
@@ -1287,7 +1304,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
}
- return amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
}
static int map_bo_to_gpuvm(struct kgd_mem *mem,
@@ -1529,27 +1546,6 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
amdgpu_bo_unreserve(bo);
}
-int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct amdgpu_vm *avm, u32 pasid)
-
-{
- int ret;
-
- /* Free the original amdgpu allocated pasid,
- * will be replaced with kfd allocated pasid.
- */
- if (avm->pasid) {
- amdgpu_pasid_free(avm->pasid);
- amdgpu_vm_set_pasid(adev, avm, 0);
- }
-
- ret = amdgpu_vm_set_pasid(adev, avm, pasid);
- if (ret)
- return ret;
-
- return 0;
-}
-
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
@@ -1607,27 +1603,6 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
- void *drm_priv)
-{
- struct amdgpu_vm *avm;
-
- if (WARN_ON(!adev || !drm_priv))
- return;
-
- avm = drm_priv_to_vm(drm_priv);
-
- pr_debug("Releasing process vm %p\n", avm);
-
- /* The original pasid of amdgpu vm has already been
- * released during making a amdgpu vm to a compute vm
- * The current pasid is managed by kfd and will be
- * released on kfd process destroy. Set amdgpu pasid
- * to 0 to avoid duplicate release.
- */
- amdgpu_vm_release_compute(adev, avm);
-}
-
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1682,13 +1657,17 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint64_t vram_available, system_mem_available, ttm_mem_available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
- vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
- - adev->kfd.vram_used_aligned[xcp_id]
- - atomic64_read(&adev->vram_pin_size)
- - reserved_for_pt
- - reserved_for_ras;
+ if (adev->apu_prefer_gtt && !adev->gmc.is_app_apu)
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id];
+ else
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id]
+ - atomic64_read(&adev->vram_pin_size)
+ - reserved_for_pt
+ - reserved_for_ras;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
@@ -1736,7 +1715,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1948,7 +1927,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
amdgpu_hmm_unregister(mem->bo);
mutex_lock(&process_info->notifier_lock);
- amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mutex_unlock(&process_info->notifier_lock);
}
@@ -1986,9 +1965,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
*/
if (size) {
if (!is_imported &&
- (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- ((adev->flags & AMD_IS_APU) &&
- mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
*size = bo_size;
else
*size = 0;
@@ -2361,10 +2338,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *mem)
{
- if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
- mb(); /* make sure read happened */
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
}
@@ -2414,7 +2390,7 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
- !(adev->flags & AMD_IS_APU) ?
+ !adev->apu_prefer_gtt ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;
@@ -2575,7 +2551,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
- amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
/* BO reservations and getting user pages (hmm_range_fault)
@@ -2599,10 +2575,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
}
}
+ mem->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!mem->range))
+ return -ENOMEM;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
if (ret) {
+ amdgpu_hmm_range_free(mem->range);
+ mem->range = NULL;
pr_debug("Failed %d to get user pages\n", ret);
/* Return -EFAULT bad address error as success. It will
@@ -2615,9 +2595,28 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (ret != -EFAULT)
return ret;
+ /* If applications unmap memory before destroying the userptr
+ * from the KFD, trigger a segmentation fault in VM debug mode.
+ */
+ if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ struct kfd_process *p;
+
+ pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
+ pid_nr(process_info->pid), mem->va);
+
+ // Send GPU VM fault to user space
+ p = kfd_lookup_process_by_pid(process_info->pid);
+ if (p) {
+ kfd_signal_vm_fault_event_with_userptr(p, mem->va);
+ kfd_unref_process(p);
+ }
+ }
+
ret = 0;
}
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
+
mutex_lock(&process_info->notifier_lock);
/* Mark the BO as valid unless it was invalidated
@@ -2756,8 +2755,8 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
continue;
/* Only check mem with hmm range associated */
- valid = amdgpu_ttm_tt_get_user_pages_done(
- mem->bo->tbo.ttm, mem->range);
+ valid = amdgpu_hmm_range_valid(mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
if (!valid) {
@@ -2969,7 +2968,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
DMA_RESV_USAGE_KERNEL, fence) {
- ret = amdgpu_sync_fence(&sync_obj, fence);
+ ret = amdgpu_sync_fence(&sync_obj, fence, GFP_KERNEL);
if (ret) {
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
@@ -3013,9 +3012,22 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
struct amdgpu_device *adev = amdgpu_ttm_adev(
peer_vm->root.bo->tbo.bdev);
+ struct amdgpu_fpriv *fpriv =
+ container_of(peer_vm, struct amdgpu_fpriv, vm);
+
+ ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+
ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
if (ret) {
- pr_debug("Memory eviction: handle moved failed. Try again\n");
+ dev_dbg(adev->dev,
+ "Memory eviction: handle moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
goto validate_map_fail;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 093141ad6ed0..763f2b8dcf13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -36,13 +36,6 @@
#include "atombios_encoders.h"
#include "bif/bif_4_1_d.h"
-static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev,
- ATOM_GPIO_I2C_ASSIGMENT *gpio,
- u8 index)
-{
-
-}
-
static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
{
struct amdgpu_i2c_bus_rec i2c;
@@ -108,9 +101,6 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
-
- amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
-
if (gpio->sucI2cId.ucAccess == id) {
i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
break;
@@ -142,8 +132,6 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
-
i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
if (i2c.valid) {
@@ -156,6 +144,38 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
}
}
+void amdgpu_atombios_oem_i2c_init(struct amdgpu_device *adev, u8 i2c_id)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ struct amdgpu_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset, size;
+ int i, num_indices;
+ char stmp[32];
+
+ if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ gpio = &i2c_info->asGPIO_Info[0];
+ for (i = 0; i < num_indices; i++) {
+ i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
+
+ if (i2c.valid && i2c.i2c_id == i2c_id) {
+ sprintf(stmp, "OEM 0x%x", i2c.i2c_id);
+ adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
+ break;
+ }
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
+ }
+ }
+}
+
struct amdgpu_gpio_rec
amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
u8 id)
@@ -686,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
- adev->clock.current_dispclk = adev->clock.default_dispclk;
adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (adev->clock.max_pixel_clock == 0)
@@ -1796,16 +1815,43 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
+static ssize_t amdgpu_atombios_get_vbios_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return sysfs_emit(buf, "%s\n", ctx->build_num);
+}
+
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
+static DEVICE_ATTR(vbios_build, 0444, amdgpu_atombios_get_vbios_build, NULL);
static struct attribute *amdgpu_vbios_version_attrs[] = {
- &dev_attr_vbios_version.attr,
- NULL
+ &dev_attr_vbios_version.attr, &dev_attr_vbios_build.attr, NULL
};
+static umode_t amdgpu_vbios_version_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ if (attr == &dev_attr_vbios_build.attr && !strlen(ctx->build_num))
+ return 0;
+
+ return attr->mode;
+}
+
const struct attribute_group amdgpu_vbios_version_attr_group = {
- .attrs = amdgpu_vbios_version_attrs
+ .attrs = amdgpu_vbios_version_attrs,
+ .is_visible = amdgpu_vbios_version_attrs_is_visible,
};
int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 0e16432d9a72..867bc5c5ce67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -136,6 +136,7 @@ amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
uint8_t id);
void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
+void amdgpu_atombios_oem_i2c_init(struct amdgpu_device *adev, u8 i2c_id);
bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index f873dd3cae16..636385c80f64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -181,19 +181,22 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
u8 frev, crev;
int usage_bytes = 0;
- if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
- if (frev == 2 && crev == 1) {
- fw_usage_v2_1 =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_1(adev,
- fw_usage_v2_1,
- &usage_bytes);
- } else if (frev >= 2 && crev >= 2) {
- fw_usage_v2_2 =
- (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_2(adev,
- fw_usage_v2_2,
- &usage_bytes);
+ /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */
+ if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) {
+ if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
+ if (frev == 2 && crev == 1) {
+ fw_usage_v2_1 =
+ (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_1(adev,
+ fw_usage_v2_1,
+ &usage_bytes);
+ } else if (frev >= 2 && crev >= 2) {
+ fw_usage_v2_2 =
+ (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_2(adev,
+ fw_usage_v2_2,
+ &usage_bytes);
+ }
}
}
@@ -281,6 +284,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
case ATOM_DGPU_VRAM_TYPE_GDDR6:
vram_type = AMDGPU_VRAM_TYPE_GDDR6;
break;
+ case ATOM_DGPU_VRAM_TYPE_HBM3E:
+ vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;
@@ -549,9 +555,10 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
u16 data_offset, size;
union umc_info *umc_info;
u8 frev, crev;
- bool ecc_default_enabled = false;
+ bool mem_ecc_enabled = false;
u8 umc_config;
u32 umc_config1;
+ adev->ras_default_ecc_enabled = false;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
umc_info);
@@ -563,20 +570,22 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
switch (crev) {
case 1:
umc_config = le32_to_cpu(umc_info->v31.umc_config);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
case 2:
umc_config = le32_to_cpu(umc_info->v32.umc_config);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
case 3:
umc_config = le32_to_cpu(umc_info->v33.umc_config);
umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
- ecc_default_enabled =
+ mem_ecc_enabled =
((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
+ adev->ras_default_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
default:
/* unsupported crev */
@@ -585,9 +594,12 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
} else if (frev == 4) {
switch (crev) {
case 0:
+ umc_config = le32_to_cpu(umc_info->v40.umc_config);
umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
+ adev->ras_default_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
default:
/* unsupported crev */
@@ -599,7 +611,7 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
}
}
- return ecc_default_enabled;
+ return mem_ecc_enabled;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 423fd2eebe1e..35d04e69aec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -84,18 +84,26 @@ static bool check_atom_bios(struct amdgpu_device *adev, size_t size)
return false;
}
+void amdgpu_bios_release(struct amdgpu_device *adev)
+{
+ kfree(adev->bios);
+ adev->bios = NULL;
+ adev->bios_size = 0;
+}
+
/* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
- * For SR-IOV, the vbios image is also put in VRAM in the VF.
+ * For SR-IOV, if dynamic critical region is not enabled,
+ * the vbios image is also put at the start of VRAM in the VF.
*/
static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
+ uint8_t __iomem *bios = NULL;
resource_size_t vram_base;
- resource_size_t size = 256 * 1024; /* ??? */
+ u32 size = 256U * 1024U; /* ??? */
if (!(adev->flags & AMD_IS_APU))
if (amdgpu_device_need_post(adev))
@@ -107,21 +115,36 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
- bios = ioremap_wc(vram_base, size);
- if (!bios)
- return false;
adev->bios = kmalloc(size, GFP_KERNEL);
- if (!adev->bios) {
- iounmap(bios);
+ if (!adev->bios)
return false;
+
+ /* For SRIOV with dynamic critical region is enabled,
+ * the vbios image is put at a dynamic offset of VRAM in the VF.
+ * If dynamic critical region is disabled, follow the existing logic as on baremetal.
+ */
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+ } else {
+ bios = ioremap_wc(vram_base, size);
+ if (!bios) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+
+ memcpy_fromio(adev->bios, bios, size);
+ iounmap(bios);
}
+
adev->bios_size = size;
- memcpy_fromio(adev->bios, bios, size);
- iounmap(bios);
if (!check_atom_bios(adev, size)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
@@ -149,7 +172,7 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
pci_unmap_rom(adev->pdev, bios);
if (!check_atom_bios(adev, size)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
@@ -189,7 +212,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
if (!check_atom_bios(adev, len)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
@@ -225,7 +248,8 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
return true;
free_bios:
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
+
return false;
}
@@ -327,7 +351,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
}
if (!check_atom_bios(adev, size)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
adev->bios_size = size;
@@ -392,7 +416,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
GFP_KERNEL);
if (!check_atom_bios(adev, vhdr->ImageLength)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
adev->bios_size = vhdr->ImageLength;
@@ -439,6 +463,13 @@ success:
return true;
}
+static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev)
+{
+ struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE];
+
+ return (res->flags & IORESOURCE_ROM_SHADOW);
+}
+
static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
{
if (amdgpu_atrm_get_bios(adev)) {
@@ -457,14 +488,27 @@ static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
goto success;
}
- if (amdgpu_read_platform_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from platform\n");
- goto success;
- }
+ if (amdgpu_prefer_rom_resource(adev)) {
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
- if (amdgpu_read_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
- goto success;
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ } else {
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
}
if (amdgpu_read_bios_from_rom(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 702f6610d024..66fb37b64388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param)
{
- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t bo_info_size = in->bo_info_size;
+ const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info;
- int r;
-
- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
- r = -EFAULT;
- if (likely(info_size == in->bo_info_size)) {
- unsigned long bytes = in->bo_number *
- in->bo_info_size;
-
- if (copy_from_user(info, uptr, bytes))
- goto error_free;
-
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
} else {
- unsigned long bytes = min(in->bo_info_size, info_size);
+ const uint32_t bytes = min(bo_info_size, info_size);
unsigned i;
- memset(info, 0, in->bo_number * info_size);
- for (i = 0; i < in->bo_number; ++i) {
- if (copy_from_user(&info[i], uptr, bytes))
- goto error_free;
+ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- uptr += in->bo_info_size;
+ memset(info, 0, bo_number * info_size);
+ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
+ if (copy_from_user(&info[i], uptr, bytes)) {
+ kvfree(info);
+ return -EFAULT;
+ }
}
}
*info_param = info;
return 0;
-
-error_free:
- kvfree(info);
- return r;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 555cd6d877c3..2b5e7c46a39d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -38,8 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct page **user_pages;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 68bce6a6d09d..004a6a9d6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -252,83 +252,22 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
- case CHIP_TAHITI:
- strcpy(fw_name, "radeon/tahiti_smc.bin");
- break;
- case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/pitcairn_smc.bin");
- }
- break;
- case CHIP_VERDE:
- if (((adev->pdev->device == 0x6820) &&
- ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83))) ||
- ((adev->pdev->device == 0x6821) &&
- ((adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87))) ||
- ((adev->pdev->revision == 0x87) &&
- ((adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682b)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/verde_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/verde_smc.bin");
- }
- break;
- case CHIP_OLAND:
- if (((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6600) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605) ||
- (adev->pdev->device == 0x6610))) ||
- ((adev->pdev->revision == 0x83) &&
- (adev->pdev->device == 0x6610))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/oland_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/oland_smc.bin");
- }
- break;
- case CHIP_HAINAN:
- if (((adev->pdev->revision == 0x81) &&
- (adev->pdev->device == 0x6660)) ||
- ((adev->pdev->revision == 0x83) &&
- ((adev->pdev->device == 0x6660) ||
- (adev->pdev->device == 0x6663) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/hainan_k_smc.bin");
- } else if ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665)) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/banks_k_2_smc.bin");
- } else {
- strcpy(fw_name, "radeon/hainan_smc.bin");
- }
- break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/bonaire_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/hawaii_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
@@ -338,76 +277,76 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
- strcpy(fw_name, "amdgpu/fiji_smc.bin");
+ strscpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k_smc.bin");
} else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_smc.bin");
}
break;
case CHIP_VEGAM:
- strcpy(fw_name, "amdgpu/vegam_smc.bin");
+ strscpy(fw_name, "amdgpu/vegam_smc.bin");
break;
case CHIP_VEGA10:
if ((adev->pdev->device == 0x687f) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc1) ||
(adev->pdev->revision == 0xc3)))
- strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_acg_smc.bin");
else
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_smc.bin");
break;
case CHIP_VEGA12:
- strcpy(fw_name, "amdgpu/vega12_smc.bin");
+ strscpy(fw_name, "amdgpu/vega12_smc.bin");
break;
case CHIP_VEGA20:
- strcpy(fw_name, "amdgpu/vega20_smc.bin");
+ strscpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 344e0a9ee08a..9f96d568acf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -398,30 +398,28 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
- static const struct mode_size {
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
int w;
int h;
- } common_modes[17] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
};
- for (i = 0; i < 17; i++) {
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
if (common_modes[i].w > 1024 ||
common_modes[i].h > 768)
@@ -434,12 +432,11 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
common_modes[i].h == native_mode->vdisplay))
continue;
}
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
if (!mode)
return;
+ strscpy(mode->name, common_modes[i].name, DRM_DISPLAY_MODE_LEN);
drm_mode_probed_add(connector, mode);
}
@@ -674,7 +671,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
@@ -737,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -839,7 +834,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -922,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1149,10 +1142,8 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1195,29 +1186,69 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
amdgpu_connector->use_digital = true;
}
+/**
+ * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock
+ * @adev: pointer to amdgpu_device
+ *
+ * Return: maximum supported HDMI (TMDS) pixel clock in KHz.
+ */
+static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev)
+{
+ if (adev->asic_type >= CHIP_POLARIS10)
+ return 600000;
+ else if (adev->asic_type >= CHIP_TONGA)
+ return 300000;
+ else
+ return 297000;
+}
+
+/**
+ * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors
+ * @connector: DRM connector to validate the mode on
+ * @mode: display mode to validate
+ *
+ * Validate the given display mode on DVI and HDMI connectors, including
+ * analog signals on DVI-I.
+ *
+ * Return: drm_mode_status indicating whether the mode is valid.
+ */
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev);
+ const int max_dvi_single_link_pixel_clock = 165000;
+ int max_digital_pixel_clock_khz;
/* XXX check mode bandwidth */
- if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
- return MODE_OK;
- } else if (connector->display_info.is_hdmi) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
- return MODE_CLOCK_HIGH;
+ if (amdgpu_connector->use_digital) {
+ switch (amdgpu_connector->connector_object_id) {
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2;
+ break;
}
+
+ /* When the display EDID claims that it's an HDMI display,
+ * we use the HDMI encoder mode of the display HW,
+ * so we should verify against the max HDMI clock here.
+ */
+ if (connector->display_info.is_hdmi)
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+
+ if (mode->clock > max_digital_pixel_clock_khz)
+ return MODE_CLOCK_HIGH;
}
/* check against the max pixel clock */
@@ -1449,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1464,7 +1493,7 @@ out:
}
static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
new file mode 100644
index 000000000000..425a3e564360
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/list.h>
+#include "amdgpu.h"
+
+static const guid_t MCE = CPER_NOTIFY_MCE;
+static const guid_t CMC = CPER_NOTIFY_CMC;
+static const guid_t BOOT = BOOT_TYPE;
+
+static const guid_t CRASHDUMP = AMD_CRASHDUMP;
+static const guid_t RUNTIME = AMD_GPU_NONSTANDARD_ERROR;
+
+static void __inc_entry_length(struct cper_hdr *hdr, uint32_t size)
+{
+ hdr->record_length += size;
+}
+
+static void amdgpu_cper_get_timestamp(struct cper_timestamp *timestamp)
+{
+ struct tm tm;
+ time64_t now = ktime_get_real_seconds();
+
+ time64_to_tm(now, 0, &tm);
+ timestamp->seconds = tm.tm_sec;
+ timestamp->minutes = tm.tm_min;
+ timestamp->hours = tm.tm_hour;
+ timestamp->flag = 0;
+ timestamp->day = tm.tm_mday;
+ timestamp->month = 1 + tm.tm_mon;
+ timestamp->year = (1900 + tm.tm_year) % 100;
+ timestamp->century = (1900 + tm.tm_year) / 100;
+}
+
+void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ enum amdgpu_cper_type type,
+ enum cper_error_severity sev)
+{
+ char record_id[16];
+
+ hdr->signature[0] = 'C';
+ hdr->signature[1] = 'P';
+ hdr->signature[2] = 'E';
+ hdr->signature[3] = 'R';
+ hdr->revision = CPER_HDR_REV_1;
+ hdr->signature_end = 0xFFFFFFFF;
+ hdr->error_severity = sev;
+
+ hdr->valid_bits.platform_id = 1;
+ hdr->valid_bits.timestamp = 1;
+
+ amdgpu_cper_get_timestamp(&hdr->timestamp);
+
+ snprintf(record_id, 9, "%d:%X",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0,
+ atomic_inc_return(&adev->cper.unique_id));
+ memcpy(hdr->record_id, record_id, 8);
+
+ snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
+ adev->pdev->vendor, adev->pdev->device);
+ /* pmfw version should be part of creator_id according to CPER spec */
+ snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID_AMDGPU);
+
+ switch (type) {
+ case AMDGPU_CPER_TYPE_BOOT:
+ hdr->notify_type = BOOT;
+ break;
+ case AMDGPU_CPER_TYPE_FATAL:
+ case AMDGPU_CPER_TYPE_BP_THRESHOLD:
+ hdr->notify_type = MCE;
+ break;
+ case AMDGPU_CPER_TYPE_RUNTIME:
+ if (sev == CPER_SEV_NON_FATAL_CORRECTED)
+ hdr->notify_type = CMC;
+ else
+ hdr->notify_type = MCE;
+ break;
+ default:
+ dev_err(adev->dev, "Unknown CPER Type\n");
+ break;
+ }
+
+ __inc_entry_length(hdr, HDR_LEN);
+}
+
+static int amdgpu_cper_entry_fill_section_desc(struct amdgpu_device *adev,
+ struct cper_sec_desc *section_desc,
+ bool bp_threshold,
+ bool poison,
+ enum cper_error_severity sev,
+ guid_t sec_type,
+ uint32_t section_length,
+ uint32_t section_offset)
+{
+ section_desc->revision_minor = CPER_SEC_MINOR_REV_1;
+ section_desc->revision_major = CPER_SEC_MAJOR_REV_22;
+ section_desc->sec_offset = section_offset;
+ section_desc->sec_length = section_length;
+ section_desc->valid_bits.fru_text = 1;
+ section_desc->flag_bits.primary = 1;
+ section_desc->severity = sev;
+ section_desc->sec_type = sec_type;
+
+ snprintf(section_desc->fru_text, 20, "OAM%d",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0);
+
+ if (bp_threshold)
+ section_desc->flag_bits.exceed_err_threshold = 1;
+ if (poison)
+ section_desc->flag_bits.latent_err = 1;
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ struct cper_sec_crashdump_reg_data reg_data)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_crashdump_fatal *section;
+
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_crashdump_fatal *)((uint8_t *)hdr +
+ FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, false,
+ CPER_SEV_FATAL, CRASHDUMP, FATAL_SEC_LEN,
+ FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ section->body.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->body.reg_arr_size = sizeof(reg_data);
+ section->body.data = reg_data;
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + FATAL_SEC_LEN);
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ enum cper_error_severity sev,
+ uint32_t *reg_dump,
+ uint32_t reg_count)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_nonstd_err *section;
+ bool poison;
+
+ poison = sev != CPER_SEV_NON_FATAL_CORRECTED;
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, poison,
+ sev, RUNTIME, NONSTD_SEC_LEN,
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ reg_count = umin(reg_count, CPER_ACA_REG_COUNT);
+
+ section->hdr.valid_bits.err_info_cnt = 1;
+ section->hdr.valid_bits.err_context_cnt = 1;
+
+ section->info.error_type = RUNTIME;
+ section->info.ms_chk_bits.err_type_valid = 1;
+ section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
+
+ memcpy(section->ctx.reg_dump, reg_dump, reg_count * sizeof(uint32_t));
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_nonstd_err *section;
+ uint32_t socket_id;
+
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
+ CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN,
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ section->hdr.valid_bits.err_info_cnt = 1;
+ section->hdr.valid_bits.err_context_cnt = 1;
+
+ section->info.error_type = RUNTIME;
+ section->info.valid_bits.ms_chk = 1;
+ section->info.ms_chk_bits.err_type_valid = 1;
+ section->info.ms_chk_bits.err_type = 1;
+ section->info.ms_chk_bits.pcc = 1;
+ section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
+
+ /* Hardcoded Reg dump for bad page threshold CPER */
+ socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0;
+ section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
+ section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
+ section->ctx.reg_dump[CPER_ACA_REG_STATUS_HI] = 0xB0000000;
+ section->ctx.reg_dump[CPER_ACA_REG_ADDR_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_ADDR_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_MISC0_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
+ section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12);
+ section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
+
+ return 0;
+}
+
+struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
+ enum amdgpu_cper_type type,
+ uint16_t section_count)
+{
+ struct cper_hdr *hdr;
+ uint32_t size = 0;
+
+ size += HDR_LEN;
+ size += (SEC_DESC_LEN * section_count);
+
+ switch (type) {
+ case AMDGPU_CPER_TYPE_RUNTIME:
+ case AMDGPU_CPER_TYPE_BP_THRESHOLD:
+ size += (NONSTD_SEC_LEN * section_count);
+ break;
+ case AMDGPU_CPER_TYPE_FATAL:
+ size += (FATAL_SEC_LEN * section_count);
+ break;
+ case AMDGPU_CPER_TYPE_BOOT:
+ size += (BOOT_SEC_LEN * section_count);
+ break;
+ default:
+ dev_err(adev->dev, "Unknown CPER Type!\n");
+ return NULL;
+ }
+
+ hdr = kzalloc(size, GFP_KERNEL);
+ if (!hdr)
+ return NULL;
+
+ /* Save this early */
+ hdr->sec_cnt = section_count;
+
+ return hdr;
+}
+
+int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
+ struct aca_bank *bank)
+{
+ struct cper_hdr *fatal = NULL;
+ struct cper_sec_crashdump_reg_data reg_data = { 0 };
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ int ret;
+
+ fatal = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_FATAL, 1);
+ if (!fatal) {
+ dev_err(adev->dev, "fail to alloc cper entry for ue record\n");
+ return -ENOMEM;
+ }
+
+ reg_data.status_lo = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data.status_hi = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data.addr_lo = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data.addr_hi = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data.ipid_lo = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data.ipid_hi = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data.synd_lo = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+ reg_data.synd_hi = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+
+ amdgpu_cper_entry_fill_hdr(adev, fatal, AMDGPU_CPER_TYPE_FATAL, CPER_SEV_FATAL);
+ ret = amdgpu_cper_entry_fill_fatal_section(adev, fatal, 0, reg_data);
+ if (ret)
+ return ret;
+
+ amdgpu_cper_ring_write(ring, fatal, fatal->record_length);
+ kfree(fatal);
+
+ return 0;
+}
+
+int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
+{
+ struct cper_hdr *bp_threshold = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ int ret;
+
+ bp_threshold = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_BP_THRESHOLD, 1);
+ if (!bp_threshold) {
+ dev_err(adev->dev, "fail to alloc cper entry for bad page threshold record\n");
+ return -ENOMEM;
+ }
+
+ amdgpu_cper_entry_fill_hdr(adev, bp_threshold,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+ CPER_SEV_FATAL);
+ ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
+ if (ret)
+ return ret;
+
+ amdgpu_cper_ring_write(ring, bp_threshold, bp_threshold->record_length);
+ kfree(bp_threshold);
+
+ return 0;
+}
+
+static enum cper_error_severity amdgpu_aca_err_type_to_cper_sev(struct amdgpu_device *adev,
+ enum aca_error_type aca_err_type)
+{
+ switch (aca_err_type) {
+ case ACA_ERROR_TYPE_UE:
+ return CPER_SEV_FATAL;
+ case ACA_ERROR_TYPE_CE:
+ return CPER_SEV_NON_FATAL_CORRECTED;
+ case ACA_ERROR_TYPE_DEFERRED:
+ return CPER_SEV_NON_FATAL_UNCORRECTED;
+ default:
+ dev_err(adev->dev, "Unknown ACA error type!\n");
+ return CPER_SEV_FATAL;
+ }
+}
+
+int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
+ struct aca_banks *banks,
+ uint16_t bank_count)
+{
+ struct cper_hdr *corrected = NULL;
+ enum cper_error_severity sev = CPER_SEV_NON_FATAL_CORRECTED;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t reg_data[CPER_ACA_REG_COUNT] = { 0 };
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ uint32_t i = 0;
+ int ret;
+
+ corrected = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_RUNTIME, bank_count);
+ if (!corrected) {
+ dev_err(adev->dev, "fail to allocate cper entry for ce records\n");
+ return -ENOMEM;
+ }
+
+ /* Raise severity if any DE is detected in the ACA bank list */
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
+ sev = CPER_SEV_NON_FATAL_UNCORRECTED;
+ break;
+ }
+ }
+
+ amdgpu_cper_entry_fill_hdr(adev, corrected, AMDGPU_CPER_TYPE_RUNTIME, sev);
+
+ /* Combine CE and DE in cper record */
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ reg_data[CPER_ACA_REG_CTL_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CTL]);
+ reg_data[CPER_ACA_REG_CTL_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CTL]);
+ reg_data[CPER_ACA_REG_STATUS_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data[CPER_ACA_REG_STATUS_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data[CPER_ACA_REG_ADDR_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data[CPER_ACA_REG_ADDR_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data[CPER_ACA_REG_MISC0_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
+ reg_data[CPER_ACA_REG_MISC0_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
+ reg_data[CPER_ACA_REG_CONFIG_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
+ reg_data[CPER_ACA_REG_CONFIG_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
+ reg_data[CPER_ACA_REG_IPID_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data[CPER_ACA_REG_IPID_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data[CPER_ACA_REG_SYND_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+ reg_data[CPER_ACA_REG_SYND_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+
+ ret = amdgpu_cper_entry_fill_runtime_section(adev, corrected, i++,
+ amdgpu_aca_err_type_to_cper_sev(adev, bank->aca_err_type),
+ reg_data, CPER_ACA_REG_COUNT);
+ if (ret)
+ return ret;
+ }
+
+ amdgpu_cper_ring_write(ring, corrected, corrected->record_length);
+ kfree(corrected);
+
+ return 0;
+}
+
+static bool amdgpu_cper_is_hdr(struct amdgpu_ring *ring, u64 pos)
+{
+ struct cper_hdr *chdr;
+
+ chdr = (struct cper_hdr *)&(ring->ring[pos]);
+ return strcmp(chdr->signature, "CPER") ? false : true;
+}
+
+static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos)
+{
+ struct cper_hdr *chdr;
+ u64 p;
+ u32 chunk, rec_len = 0;
+
+ chdr = (struct cper_hdr *)&(ring->ring[pos]);
+ chunk = ring->ring_size - (pos << 2);
+
+ if (!strcmp(chdr->signature, "CPER")) {
+ rec_len = chdr->record_length;
+ goto calc;
+ }
+
+ /* ring buffer is not full, no cper data after ring->wptr */
+ if (ring->count_dw)
+ goto calc;
+
+ for (p = pos + 1; p <= ring->buf_mask; p++) {
+ chdr = (struct cper_hdr *)&(ring->ring[p]);
+ if (!strcmp(chdr->signature, "CPER")) {
+ rec_len = (p - pos) << 2;
+ goto calc;
+ }
+ }
+
+calc:
+ if (!rec_len)
+ return chunk;
+ else
+ return umin(rec_len, chunk);
+}
+
+void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
+{
+ u64 pos, wptr_old, rptr;
+ int rec_cnt_dw = count >> 2;
+ u32 chunk, ent_sz;
+ u8 *s = (u8 *)src;
+
+ if (count >= ring->ring_size - 4) {
+ dev_err(ring->adev->dev,
+ "CPER data size(%d) is larger than ring size(%d)\n",
+ count, ring->ring_size - 4);
+
+ return;
+ }
+
+ mutex_lock(&ring->adev->cper.ring_lock);
+
+ wptr_old = ring->wptr;
+ rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
+
+ while (count) {
+ ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
+ chunk = umin(ent_sz, count);
+
+ memcpy(&ring->ring[ring->wptr], s, chunk);
+
+ ring->wptr += (chunk >> 2);
+ ring->wptr &= ring->ptr_mask;
+ count -= chunk;
+ s += chunk;
+ }
+
+ if (ring->count_dw < rec_cnt_dw)
+ ring->count_dw = 0;
+
+ /* the buffer is overflow, adjust rptr */
+ if (((wptr_old < rptr) && (rptr <= ring->wptr)) ||
+ ((ring->wptr < wptr_old) && (wptr_old < rptr)) ||
+ ((rptr <= ring->wptr) && (ring->wptr < wptr_old))) {
+ pos = (ring->wptr + 1) & ring->ptr_mask;
+
+ do {
+ ent_sz = amdgpu_cper_ring_get_ent_sz(ring, pos);
+
+ rptr += (ent_sz >> 2);
+ rptr &= ring->ptr_mask;
+ *ring->rptr_cpu_addr = rptr;
+
+ pos = rptr;
+ } while (!amdgpu_cper_is_hdr(ring, rptr));
+ }
+
+ if (ring->count_dw >= rec_cnt_dw)
+ ring->count_dw -= rec_cnt_dw;
+ mutex_unlock(&ring->adev->cper.ring_lock);
+}
+
+static u64 amdgpu_cper_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ return *(ring->rptr_cpu_addr);
+}
+
+static u64 amdgpu_cper_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ return ring->wptr;
+}
+
+static const struct amdgpu_ring_funcs cper_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_CPER,
+ .align_mask = 0xff,
+ .support_64bit_ptrs = false,
+ .get_rptr = amdgpu_cper_ring_get_rptr,
+ .get_wptr = amdgpu_cper_ring_get_wptr,
+};
+
+static int amdgpu_cper_ring_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &(adev->cper.ring_buf);
+
+ mutex_init(&adev->cper.ring_lock);
+
+ ring->adev = NULL;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = false;
+ ring->no_scheduler = true;
+ ring->funcs = &cper_ring_funcs;
+
+ sprintf(ring->name, "cper");
+ return amdgpu_ring_init(adev, ring, CPER_MAX_RING_SIZE, NULL, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+}
+
+int amdgpu_cper_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
+ return 0;
+
+ r = amdgpu_cper_ring_init(adev);
+ if (r) {
+ dev_err(adev->dev, "failed to initialize cper ring, r = %d\n", r);
+ return r;
+ }
+
+ mutex_init(&adev->cper.cper_lock);
+
+ adev->cper.enabled = true;
+ adev->cper.max_count = CPER_MAX_ALLOWED_COUNT;
+
+ return 0;
+}
+
+int amdgpu_cper_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
+ return 0;
+
+ adev->cper.enabled = false;
+
+ amdgpu_ring_fini(&(adev->cper.ring_buf));
+ adev->cper.count = 0;
+ adev->cper.wptr = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
new file mode 100644
index 000000000000..353421807387
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_CPER_H__
+#define __AMDGPU_CPER_H__
+
+#include "amd_cper.h"
+#include "amdgpu_aca.h"
+
+#define CPER_MAX_ALLOWED_COUNT 0x1000
+#define CPER_MAX_RING_SIZE 0X100000
+#define HDR_LEN (sizeof(struct cper_hdr))
+#define SEC_DESC_LEN (sizeof(struct cper_sec_desc))
+
+#define BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot))
+#define FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal))
+#define NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err))
+
+#define SEC_DESC_OFFSET(idx) (HDR_LEN + (SEC_DESC_LEN * idx))
+
+#define BOOT_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (BOOT_SEC_LEN * idx))
+#define FATAL_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (FATAL_SEC_LEN * idx))
+#define NONSTD_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (NONSTD_SEC_LEN * idx))
+
+enum amdgpu_cper_type {
+ AMDGPU_CPER_TYPE_RUNTIME,
+ AMDGPU_CPER_TYPE_FATAL,
+ AMDGPU_CPER_TYPE_BOOT,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+};
+
+struct amdgpu_cper {
+ bool enabled;
+
+ atomic_t unique_id;
+ struct mutex cper_lock;
+
+ /* Lifetime CPERs generated */
+ uint32_t count;
+ uint32_t max_count;
+
+ uint32_t wptr;
+
+ void *ring[CPER_MAX_ALLOWED_COUNT];
+ struct amdgpu_ring ring_buf;
+ struct mutex ring_lock;
+};
+
+void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ enum amdgpu_cper_type type,
+ enum cper_error_severity sev);
+int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ struct cper_sec_crashdump_reg_data reg_data);
+int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ enum cper_error_severity sev,
+ uint32_t *reg_dump,
+ uint32_t reg_count);
+int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t section_idx);
+
+struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
+ enum amdgpu_cper_type type,
+ uint16_t section_count);
+/* UE must be encoded into separated cper entries, 1 UE 1 cper */
+int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
+ struct aca_bank *bank);
+/* CEs and DEs are combined into 1 cper entry */
+int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
+ struct aca_banks *banks,
+ uint16_t bank_count);
+/* Bad page threshold is encoded into separated cper entry */
+int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev);
+void amdgpu_cper_ring_write(struct amdgpu_ring *ring,
+ void *src, int count);
+int amdgpu_cper_init(struct amdgpu_device *adev);
+int amdgpu_cper_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5cc5f59e3018..ecdfe6cb36cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -40,6 +40,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
+#include "amdgpu_hmm.h"
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
struct amdgpu_device *adev,
@@ -178,25 +179,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
size_t size;
int ret;
int i;
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
- GFP_KERNEL);
- if (!chunk_array)
- return -ENOMEM;
-
- /* get chunks */
- chunk_array_user = u64_to_user_ptr(cs->in.chunks);
- if (copy_from_user(chunk_array, chunk_array_user,
- sizeof(uint64_t)*cs->in.num_chunks)) {
- ret = -EFAULT;
- goto free_chunk;
- }
+ chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
+ cs->in.num_chunks,
+ sizeof(uint64_t));
+ if (IS_ERR(chunk_array))
+ return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
@@ -209,7 +202,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
- uint32_t __user *cdata;
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
@@ -222,20 +214,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
- GFP_KERNEL);
- if (p->chunks[i].kdata == NULL) {
- ret = -ENOMEM;
+ p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
+ size,
+ sizeof(uint32_t));
+ if (IS_ERR(p->chunks[i].kdata)) {
+ ret = PTR_ERR(p->chunks[i].kdata);
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t);
- if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- ret = -EFAULT;
- goto free_partial_kdata;
- }
/* Assume the worst on the following checks */
ret = -EINVAL;
@@ -286,17 +274,36 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size) {
+ if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
ret = -EINVAL;
goto free_all_kdata;
}
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
- num_ibs[i], &p->jobs[i]);
+ num_ibs[i], &p->jobs[i],
+ p->filp->client_id);
if (ret)
goto free_all_kdata;
- p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
+ switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
+ case AMDGPU_ENFORCE_ISOLATION_DISABLE:
+ default:
+ p->jobs[i]->enforce_isolation = false;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = true;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ }
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -349,10 +356,20 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
ring = amdgpu_job_ring(job);
ib = &job->ibs[job->num_ibs++];
+ /* submissions to kernel queues are disabled */
+ if (ring->no_user_submission)
+ return -EINVAL;
+
/* MM engine doesn't support user fences */
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
+ if (!p->adev->debug_enable_ce_cs &&
+ chunk_ib->flags & AMDGPU_IB_FLAG_CE) {
+ dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n");
+ return -EINVAL;
+ }
+
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
@@ -373,7 +390,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
chunk_ib->ib_bytes : 0,
AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
- DRM_ERROR("Failed to get ib !\n");
+ drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
return r;
}
@@ -428,7 +445,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
dma_fence_put(old);
}
- r = amdgpu_sync_fence(&p->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
dma_fence_put(fence);
if (r)
return r;
@@ -445,12 +462,12 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r;
}
- r = amdgpu_sync_fence(&p->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
dma_fence_put(fence);
return r;
}
@@ -691,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*/
const s64 us_upper_bound = 200000;
- if (!adev->mm_stats.log2_max_MBps) {
+ if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {
*max_bytes = 0;
*max_vis_bytes = 0;
return;
@@ -873,26 +890,18 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- int i;
-
- e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!e->user_pages) {
- DRM_ERROR("kvmalloc_array failure\n");
- r = -ENOMEM;
- goto out_free_user_pages;
- }
- r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
- if (r) {
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ e->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!e->range))
+ return -ENOMEM;
+
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
+ if (r)
goto out_free_user_pages;
- }
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
+ if (bo->tbo.ttm->pages[i] !=
+ hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
@@ -936,7 +945,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
- e->user_invalidated && e->user_pages) {
+ e->user_invalidated) {
amdgpu_bo_placement_from_domain(e->bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
@@ -945,11 +954,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
- e->user_pages);
+ e->range);
}
-
- kvfree(e->user_pages);
- e->user_pages = NULL;
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -960,7 +966,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate() failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
@@ -989,13 +995,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = e->bo;
-
- if (!e->user_pages)
- continue;
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1038,13 +1038,13 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
- DRM_ERROR("IB va_start is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
return r;
}
if ((va_start + ib->length_dw * 4) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -1111,11 +1111,14 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
- if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub))
+ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
return -EINVAL;
}
}
+ if (!amdgpu_vm_ready(vm))
+ return -EINVAL;
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -1124,7 +1127,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
@@ -1135,7 +1139,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
@@ -1154,7 +1159,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
@@ -1167,7 +1173,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, vm->last_update);
+ r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL);
if (r)
return r;
@@ -1209,7 +1215,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
@@ -1248,7 +1254,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
continue;
}
- r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence,
+ GFP_KERNEL);
dma_fence_put(fence);
if (r)
return r;
@@ -1319,8 +1326,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
*/
r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
- e->range);
+ r |= !amdgpu_hmm_range_valid(e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
if (r) {
@@ -1421,7 +1428,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
- DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
+ drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
return r;
}
@@ -1436,9 +1443,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
+ drm_err(dev, "Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
- DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
goto error_fini;
}
@@ -1737,30 +1744,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
{
struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
- uint32_t fence_count = wait->in.fence_count;
- struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
- fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
- GFP_KERNEL);
- if (fences == NULL)
- return -ENOMEM;
-
- fences_user = u64_to_user_ptr(wait->in.fences);
- if (copy_from_user(fences, fences_user,
- sizeof(struct drm_amdgpu_fence) * fence_count)) {
- r = -EFAULT;
- goto err_free_fences;
- }
+ fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
+ wait->in.fence_count,
+ sizeof(struct drm_amdgpu_fence));
+ if (IS_ERR(fences))
+ return PTR_ERR(fences);
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
-err_free_fences:
kfree(fences);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index cfdf558b48b6..02138aa55793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c43d1b6e5d66..afedea02188d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -236,7 +236,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
&num_scheds, &scheds);
if (r)
- goto cleanup_entity;
+ goto error_free_entity;
}
/* disable load balance if the hw engine retains context among dependent jobs */
@@ -919,7 +919,7 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
return timeout;
}
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
@@ -944,24 +944,13 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
drm_sched_entity_fini(entity);
}
}
+ kref_put(&ctx->refcount, amdgpu_ctx_fini);
}
}
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
- struct amdgpu_ctx *ctx;
- struct idr *idp;
- uint32_t id;
-
amdgpu_ctx_mgr_entity_fini(mgr);
-
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp, ctx, id) {
- if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
- DRM_ERROR("ctx %p is still alive\n", ctx);
- }
-
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 85376baaa92f..090dfe86f75b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -92,7 +92,6 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
struct amdgpu_device *adev);
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 49ca8c814455..62d43b8cbe58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
@@ -179,7 +178,6 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
if (rd->id.use_grbm) {
if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
(rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
mutex_unlock(&rd->lock);
@@ -310,7 +307,6 @@ end:
mutex_unlock(&rd->lock);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r) {
@@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
while (size) {
@@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
up_write(&adev->reset_domain->sem);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val)
r = amdgpu_benchmark(adev, val);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
@@ -1786,7 +1762,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
- seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
amdgpu_vm_put_task_info(ti);
}
@@ -1902,7 +1878,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence) == fence)
+ if (preempted && (&job->hw_fence->base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -1990,7 +1966,7 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
uint32_t max_freq, min_freq;
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return -EINVAL;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
@@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
ret = -EINVAL;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
@@ -2105,6 +2080,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
+ amdgpu_psp_debugfs_init(adev);
debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
@@ -2122,14 +2098,68 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
debugfs_create_blob("amdgpu_vbios", 0444, root,
&adev->debugfs_vbios_blob);
- adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
- adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
- debugfs_create_blob("amdgpu_discovery", 0444, root,
- &adev->debugfs_discovery_blob);
+ if (adev->discovery.debugfs_blob.size)
+ debugfs_create_blob("amdgpu_discovery", 0444, root,
+ &adev->discovery.debugfs_blob);
return 0;
}
+static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
+{
+ struct drm_file *file;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_device *adev;
+ int r;
+
+ file = m->private;
+ if (!file)
+ return -EINVAL;
+
+ adev = drm_to_adev(file->minor->dev);
+ fpriv = file->driver_priv;
+ if (!fpriv || !fpriv->vm.root.bo)
+ return -ENODEV;
+
+ root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+ if (r) {
+ amdgpu_bo_unref(&root_bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
+ seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn);
+ seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level);
+ seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size);
+ seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size);
+
+ amdgpu_bo_unreserve(root_bo);
+ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+}
+
+static int amdgpu_pt_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_pt_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_pt_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_pt_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+ debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
+ &amdgpu_pt_info_fops);
+}
+
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
@@ -2139,4 +2169,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 0425432d8659..e7b3c38e5186 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_vm_init(struct drm_file *file);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 824f9da5b6ce..4e2fe6674db8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -217,13 +217,12 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
- coredump->reset_time.tv_nsec);
+ drm_printf(&p, "time: %ptSp\n", &coredump->reset_time);
- if (coredump->reset_task_info.pid)
+ if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
+ coredump->reset_task_info.task.pid);
/* SOC Information */
drm_printf(&p, "\nSOC Information\n");
@@ -364,5 +363,9 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+
+ drm_info(dev, "AMDGPU device coredump file has been created\n");
+ drm_info(dev, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
+ dev->primary->index);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 36053b3d48b3..58c3ffe707d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -71,6 +71,7 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_pmu.h"
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_reset.h"
@@ -85,6 +86,7 @@
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
@@ -94,6 +96,7 @@ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
#define AMDGPU_MAX_RETRY_LIMIT 2
@@ -102,6 +105,9 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
+#define AMDGPU_VBIOS_SKIP (1U << 0)
+#define AMDGPU_VBIOS_OPTIONAL (1U << 1)
+
static const struct drm_driver amdgpu_kms_driver;
const char *amdgpu_asic_name[] = {
@@ -174,6 +180,12 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
BIT(AMD_IP_BLOCK_TYPE_PSP)
};
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev);
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev);
+static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev);
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
+
static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
enum amd_ip_block_type block)
{
@@ -224,8 +236,26 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
static DEVICE_ATTR(pcie_replay_count, 0444,
amdgpu_device_get_pcie_replay_count, NULL);
+static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
+ ret = sysfs_create_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
+ sysfs_remove_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+}
+
static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t ppos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -261,8 +291,8 @@ static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
return bytes_read;
}
-BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
- AMDGPU_SYS_REG_STATE_END);
+static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
+ AMDGPU_SYS_REG_STATE_END);
int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
{
@@ -389,19 +419,16 @@ static const struct attribute_group amdgpu_board_attrs_group = {
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
-
/**
* amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_px(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
@@ -410,15 +437,13 @@ bool amdgpu_device_supports_px(struct drm_device *dev)
/**
* amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_boco(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
@@ -431,29 +456,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
/**
* amdgpu_device_supports_baco - Does the device support BACO
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Return:
* 1 if the device supports BACO;
* 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
-int amdgpu_device_supports_baco(struct drm_device *dev)
+int amdgpu_device_supports_baco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
return amdgpu_asic_supports_baco(adev);
}
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
{
- struct drm_device *dev;
int bamaco_support;
- dev = adev_to_drm(adev);
-
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
- bamaco_support = amdgpu_device_supports_baco(dev);
+ bamaco_support = amdgpu_device_supports_baco(adev);
switch (amdgpu_runtime_pm) {
case 2:
@@ -473,10 +493,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case -1:
case -2:
- if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
+ if (amdgpu_device_supports_px(adev)) {
+ /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
- } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
+ } else if (amdgpu_device_supports_boco(adev)) {
+ /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else {
@@ -490,12 +512,13 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
- if (!adev->gmc.noretry)
+ if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ if (!amdgpu_passthrough(adev))
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
@@ -524,14 +547,14 @@ no_runtime_pm:
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with Smart Shift support,
* otherwise returns false.
*/
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
{
- return (amdgpu_device_supports_boco(dev) &&
+ return (amdgpu_device_supports_boco(adev) &&
amdgpu_acpi_is_power_shift_control_supported());
}
@@ -1265,14 +1288,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
*/
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
BUG();
return 0;
}
static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1289,15 +1312,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg
*/
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%04X with 0x%08X\n", reg,
+ v);
BUG();
}
static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%llX with 0x%08X\n", reg,
+ v);
BUG();
}
@@ -1313,14 +1338,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui
*/
static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
+ reg);
BUG();
return 0;
}
static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1337,15 +1363,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r
*/
static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+ reg, v);
BUG();
}
static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
+ reg, v);
BUG();
}
@@ -1363,8 +1391,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg,
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
uint32_t block, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
- reg, block);
+ dev_err(adev->dev,
+ "Invalid callback to read register 0x%04X in block 0x%04X\n",
+ reg, block);
BUG();
return 0;
}
@@ -1384,11 +1413,23 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
uint32_t block,
uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
- reg, block, v);
+ dev_err(adev->dev,
+ "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
+ reg, block, v);
BUG();
}
+static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
+{
+ if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
+ return AMDGPU_VBIOS_SKIP;
+
+ if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
+ return AMDGPU_VBIOS_OPTIONAL;
+
+ return 0;
+}
+
/**
* amdgpu_device_asic_init - Wrapper for atom asic_init
*
@@ -1398,18 +1439,28 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
+ uint32_t flags;
+ bool optional;
int ret;
amdgpu_asic_pre_asic_init(adev);
+ flags = amdgpu_device_get_vbios_flags(adev);
+ optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
+ if (optional && !adev->bios)
+ return 0;
+
ret = amdgpu_atomfirmware_asic_init(adev, true);
return ret;
} else {
+ if (optional && !adev->bios)
+ return 0;
+
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}
@@ -1627,9 +1678,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
+ int max_size, r;
unsigned int i;
u16 cmd;
- int r;
if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
return 0;
@@ -1638,9 +1689,21 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ if (!amdgpu_rebar)
+ return 0;
+
+ /* resizing on Dell G5 SE platforms causes problems with runtime pm */
+ if ((amdgpu_runtime_pm != 0) &&
+ adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
+ adev->pdev->device == 0x731f &&
+ adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ return 0;
+
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
- DRM_WARN("System can't access extended configuration space, please check!!\n");
+ dev_warn(
+ adev->dev,
+ "System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@@ -1663,28 +1726,27 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
return 0;
/* Limit the BAR size to what is available */
- rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
- rbar_size);
+ max_size = pci_rebar_get_max_size(adev->pdev, 0);
+ if (max_size < 0)
+ return 0;
+ rbar_size = min(max_size, rbar_size);
/* Disable memory decoding while we change the BAR addresses and size */
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(adev->pdev, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
- /* Free the VRAM and doorbell BAR, we most likely need to move both. */
+ /* Tear down doorbell as resizing will release BARs */
amdgpu_doorbell_fini(adev);
- if (adev->asic_type >= CHIP_BONAIRE)
- pci_release_resource(adev->pdev, 2);
-
- pci_release_resource(adev->pdev, 0);
- r = pci_resize_resource(adev->pdev, 0, rbar_size);
+ r = pci_resize_resource(adev->pdev, 0, rbar_size,
+ (adev->asic_type >= CHIP_BONAIRE) ? 1 << 5
+ : 1 << 2);
if (r == -ENOSPC)
- DRM_INFO("Not enough PCI address space for a large BAR.");
+ dev_info(adev->dev,
+ "Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
- DRM_ERROR("Problem resizing BAR0 (%d).", r);
-
- pci_assign_unassigned_bus_resources(adev->pdev->bus);
+ dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
/* When the doorbell or fb BAR isn't available we have no chance of
* using the device.
@@ -1698,14 +1760,6 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
-{
- if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
- return false;
-
- return true;
-}
-
/*
* GPU helpers function.
*/
@@ -1720,12 +1774,15 @@ static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
*/
bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
- uint32_t reg;
+ uint32_t reg, flags;
if (amdgpu_sriov_vf(adev))
return false;
- if (!amdgpu_device_read_bios(adev))
+ flags = amdgpu_device_get_vbios_flags(adev);
+ if (flags & AMDGPU_VBIOS_SKIP)
+ return false;
+ if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
return false;
if (amdgpu_passthrough(adev)) {
@@ -1789,8 +1846,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
case 0:
return false;
default:
- DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
- amdgpu_seamless);
+ dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
+ amdgpu_seamless);
return false;
}
@@ -1826,6 +1883,42 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
return true;
}
+static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
+{
+ /* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
+ * It's unclear if this is a platform-specific or GPU-specific issue.
+ * Disable ASPM on SI for the time being.
+ */
+ if (adev->family == AMDGPU_FAMILY_SI)
+ return true;
+
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
+ return false;
+
+ if (c->x86 == 6 &&
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
+ switch (c->x86_model) {
+ case VFM_MODEL(INTEL_ALDERLAKE):
+ case VFM_MODEL(INTEL_ALDERLAKE_L):
+ case VFM_MODEL(INTEL_RAPTORLAKE):
+ case VFM_MODEL(INTEL_RAPTORLAKE_P):
+ case VFM_MODEL(INTEL_RAPTORLAKE_S):
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*
@@ -1850,7 +1943,7 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
}
if (adev->flags & AMD_IS_APU)
return false;
- if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
+ if (amdgpu_device_aspm_support_quirk(adev))
return false;
return pcie_aspm_enabled(adev->pdev);
}
@@ -1937,7 +2030,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
if (!is_os_64) {
- DRM_WARN("Not 64-bit OS, feature not supported\n");
+ dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
goto def_value;
}
si_meminfo(&si);
@@ -1952,7 +2045,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
if (total_memory < dram_size_seven_GB)
goto def_value1;
} else {
- DRM_WARN("Smu memory pool size not supported\n");
+ dev_warn(adev->dev, "Smu memory pool size not supported\n");
goto def_value;
}
adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
@@ -1960,7 +2053,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
def_value1:
- DRM_WARN("No enough system memory\n");
+ dev_warn(adev->dev, "No enough system memory\n");
def_value:
adev->pm.smu_prv_buffer_size = 0;
}
@@ -2068,8 +2161,31 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- for (i = 0; i < MAX_XCP; i++)
- adev->enforce_isolation[i] = !!enforce_isolation;
+ for (i = 0; i < MAX_XCP; i++) {
+ switch (amdgpu_enforce_isolation) {
+ case -1:
+ case 0:
+ default:
+ /* disable */
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ /* enable */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ /* enable legacy mode */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ /* enable only process isolation without submitting cleaner shader */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
return 0;
}
@@ -2089,7 +2205,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
+ state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2101,12 +2218,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
amdgpu_device_load_pci_state(pdev);
r = pci_enable_device(pdev);
if (r)
- DRM_WARN("pci_enable_device failed (%d)\n", r);
+ dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
+ r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ dev_info(&pdev->dev, "switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_prepare(dev);
amdgpu_device_suspend(dev, true);
@@ -2173,8 +2291,9 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2207,8 +2326,9 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2233,7 +2353,8 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
- adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
+ adev->ip_blocks[i].version->funcs->get_clockgating_state(
+ &adev->ip_blocks[i], flags);
}
}
@@ -2269,7 +2390,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_ip_is_valid - is the hardware IP enabled
+ * amdgpu_device_ip_is_hw - is the hardware IP enabled
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
@@ -2277,6 +2398,27 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
* Check if the hardware IP is enable or not.
* Returns true if it the IP is enable, false if not.
*/
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].status.hw;
+ }
+ return false;
+}
+
+/**
+ * amdgpu_device_ip_is_valid - is the hardware IP valid
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Check if the hardware IP is valid or not.
+ * Returns true if it the IP is valid, false if not.
+ */
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
@@ -2337,6 +2479,34 @@ int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static const char *ip_block_names[] = {
+ [AMD_IP_BLOCK_TYPE_COMMON] = "common",
+ [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
+ [AMD_IP_BLOCK_TYPE_IH] = "ih",
+ [AMD_IP_BLOCK_TYPE_SMC] = "smu",
+ [AMD_IP_BLOCK_TYPE_PSP] = "psp",
+ [AMD_IP_BLOCK_TYPE_DCE] = "dce",
+ [AMD_IP_BLOCK_TYPE_GFX] = "gfx",
+ [AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
+ [AMD_IP_BLOCK_TYPE_UVD] = "uvd",
+ [AMD_IP_BLOCK_TYPE_VCE] = "vce",
+ [AMD_IP_BLOCK_TYPE_ACP] = "acp",
+ [AMD_IP_BLOCK_TYPE_VCN] = "vcn",
+ [AMD_IP_BLOCK_TYPE_MES] = "mes",
+ [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
+ [AMD_IP_BLOCK_TYPE_VPE] = "vpe",
+ [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
+ [AMD_IP_BLOCK_TYPE_ISP] = "isp",
+ [AMD_IP_BLOCK_TYPE_RAS] = "ras",
+};
+
+static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
+{
+ int idx = (int)type;
+
+ return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
+}
+
/**
* amdgpu_device_ip_block_add
*
@@ -2365,8 +2535,13 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
- ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
+ adev->num_ip_blocks,
+ ip_block_name(adev, ip_block_version->type),
+ ip_block_version->major,
+ ip_block_version->minor,
+ ip_block_version->rev,
+ ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
@@ -2423,9 +2598,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(
+ adev->dev,
+ "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -2436,8 +2613,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- DRM_INFO("virtual_display:%d, num_crtc:%d\n",
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
+ adev->enable_virtual_display,
+ adev->mode_info.num_crtc);
}
}
@@ -2459,9 +2637,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin)
- return 0;
-
switch (adev->asic_type) {
default:
return 0;
@@ -2483,8 +2658,15 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
+ if (adev->discovery.bin)
+ return 0;
chip_name = "navi12";
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->discovery.bin)
+ return 0;
+ chip_name = "cyan_skillfish";
+ break;
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
@@ -2564,6 +2746,24 @@ out:
return err;
}
+static void amdgpu_uid_init(struct amdgpu_device *adev)
+{
+ /* Initialize the UID for the device */
+ adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
+ if (!adev->uid_info) {
+ dev_warn(adev->dev, "Failed to allocate memory for UID\n");
+ return;
+ }
+ adev->uid_info->adev = adev;
+}
+
+static void amdgpu_uid_fini(struct amdgpu_device *adev)
+{
+ /* Free the UID memory */
+ kfree(adev->uid_info);
+ adev->uid_info = NULL;
+}
+
/**
* amdgpu_device_ip_early_init - run early init for hardware IPs
*
@@ -2578,8 +2778,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
struct amdgpu_ip_block *ip_block;
struct pci_dev *parent;
+ bool total, skip_bios;
+ uint32_t bios_flags;
int i, r;
- bool total;
amdgpu_device_enable_virtual_display(adev);
@@ -2587,6 +2788,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return r;
+
+ r = amdgpu_virt_init_critical_region(adev);
+ if (r)
+ return r;
}
switch (adev->asic_type) {
@@ -2643,6 +2848,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
}
+ /* Check for IP version 9.4.3 with A0 hardware */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ !amdgpu_device_get_rev_id(adev)) {
+ dev_err(adev->dev, "Unsupported A0 hardware\n");
+ return -ENODEV; /* device unsupported - no device error */
+ }
+
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
@@ -2655,7 +2867,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
-
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -2664,21 +2875,29 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+ adev->virt.is_xgmi_node_migrate_enabled = false;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->virt.is_xgmi_node_migrate_enabled =
+ amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
+ }
+
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
ip_block = &adev->ip_blocks[i];
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
- DRM_WARN("disabled ip block: %d <%s>\n",
- i, adev->ip_blocks[i].version->funcs->name);
+ dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
+ adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
} else if (ip_block->version->funcs->early_init) {
r = ip_block->version->funcs->early_init(ip_block);
if (r == -ENOENT) {
adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
total = false;
} else {
adev->ip_blocks[i].status.valid = true;
@@ -2692,16 +2911,31 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ bios_flags = amdgpu_device_get_vbios_flags(adev);
+ skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
/* Read BIOS */
- if (amdgpu_device_read_bios(adev)) {
- if (!amdgpu_get_bios(adev))
+ if (!skip_bios) {
+ bool optional =
+ !!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
+ if (!amdgpu_get_bios(adev) && !optional)
return -EINVAL;
- r = amdgpu_atombios_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
- return r;
+ if (optional && !adev->bios)
+ dev_info(
+ adev->dev,
+ "VBIOS image optional, proceeding without VBIOS image");
+
+ if (adev->bios) {
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(
+ adev,
+ AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
+ 0, 0);
+ return r;
+ }
}
}
@@ -2714,6 +2948,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!total)
return -ENODEV;
+ if (adev->gmc.xgmi.supported)
+ amdgpu_xgmi_early_init(adev);
+
+ if (amdgpu_is_multi_aid(adev))
+ amdgpu_uid_init(adev);
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block->status.valid != false)
amdgpu_amdkfd_device_probe(adev);
@@ -2741,8 +2980,10 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2766,8 +3007,9 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2805,8 +3047,11 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
} else {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i]
+ .version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2823,6 +3068,12 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
{
+ struct drm_sched_init_args args = {
+ .ops = &amdgpu_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .timeout_wq = adev->reset_domain->wq,
+ .dev = adev->dev,
+ };
long timeout;
int r, i;
@@ -2848,32 +3099,36 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
break;
}
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- ring->num_hw_submission, 0,
- timeout, adev->reset_domain->wq,
- ring->sched_score, ring->name,
- adev->dev);
+ args.timeout = timeout;
+ args.credit_limit = ring->num_hw_submission;
+ args.score = ring->sched_score;
+ args.name = ring->name;
+
+ r = drm_sched_init(&ring->sched, &args);
if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create scheduler on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_uvd_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create UVD scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_vce_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create VCE scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
}
- amdgpu_xcp_update_partition_sched_list(adev);
+ if (adev->xcp_mgr)
+ amdgpu_xcp_update_partition_sched_list(adev);
return 0;
}
@@ -2905,8 +3160,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->sw_init) {
r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
goto init_failed;
}
}
@@ -2920,7 +3177,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* need to do common hw init early so everything is set up for gmc */
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -2932,17 +3190,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_mem_scratch_init failed %d\n",
+ r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_device_wb_init failed %d\n", r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -2954,14 +3216,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_CSA_SIZE);
if (r) {
- DRM_ERROR("allocate CSA failed %d\n", r);
+ dev_err(adev->dev,
+ "allocate CSA failed %d\n", r);
goto init_failed;
}
}
r = amdgpu_seq64_init(adev);
if (r) {
- DRM_ERROR("allocate seq64 failed %d\n", r);
+ dev_err(adev->dev, "allocate seq64 failed %d\n",
+ r);
goto init_failed;
}
}
@@ -3056,6 +3320,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_fru_get_product_info(adev);
+ if (!amdgpu_sriov_vf(adev) || amdgpu_sriov_ras_cper_en(adev))
+ r = amdgpu_cper_init(adev);
+
init_failed:
return r;
@@ -3099,6 +3366,8 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LEGACY:
+ case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
@@ -3147,8 +3416,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3174,18 +3445,21 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
- /* skip CG for VCE/UVD, it's handled specially */
+ /* skip CG for VCE/UVD/VPE, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3212,7 +3486,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
- if (!(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
!gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
@@ -3251,8 +3525,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3261,7 +3537,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_ras_late_init(adev);
if (r) {
- DRM_ERROR("amdgpu_ras_late_init failed %d", r);
+ dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
return r;
}
@@ -3275,7 +3551,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
- DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
if (amdgpu_passthrough(adev) &&
@@ -3308,7 +3584,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
AMDGPU_XGMI_PSTATE_MIN);
if (r) {
- DRM_ERROR("pstate setting failed (%d).\n", r);
+ dev_err(adev->dev,
+ "pstate setting failed (%d).\n",
+ r);
break;
}
}
@@ -3322,17 +3600,19 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
int r;
if (!ip_block->version->funcs->hw_fini) {
- DRM_ERROR("hw_fini of IP block <%s> not defined\n",
- ip_block->version->funcs->name);
+ dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
} else {
r = ip_block->version->funcs->hw_fini(ip_block);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- ip_block->version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
}
}
@@ -3373,15 +3653,17 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
if (r) {
- DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
+ amdgpu_userq_suspend(adev);
/* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
@@ -3395,7 +3677,22 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
- DRM_ERROR("failed to release exclusive mode on fini\n");
+ dev_err(adev->dev,
+ "failed to release exclusive mode on fini\n");
+ }
+
+ /*
+ * Driver reload on the APU can fail due to firmware validation because
+ * the PSP is always running, as it is shared across the whole SoC.
+ * This same issue does not occur on dGPU because it has a mechanism
+ * that checks whether the PSP is running. A solution for those issues
+ * in the APU is to trigger a GPU reset, but this should be done during
+ * the unload phase to avoid adding boot latency and screen flicker.
+ */
+ if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) {
+ r = amdgpu_asic_reset(adev);
+ if (r)
+ dev_err(adev->dev, "asic reset on %s failed\n", __func__);
}
return 0;
@@ -3416,6 +3713,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_cper_fini(adev);
+
if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
amdgpu_virt_release_ras_err_handler_data(adev);
@@ -3435,13 +3734,16 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_device_mem_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_seq64_fini(adev);
+ amdgpu_doorbell_fini(adev);
}
if (adev->ip_blocks[i].version->funcs->sw_fini) {
r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
}
}
adev->ip_blocks[i].status.sw = false;
@@ -3457,6 +3759,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
}
amdgpu_ras_fini(adev);
+ amdgpu_uid_fini(adev);
return 0;
}
@@ -3474,7 +3777,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
r = amdgpu_ib_ring_tests(adev);
if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -3502,7 +3805,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
*/
static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, rec;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
@@ -3523,13 +3826,25 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
if (r)
- return r;
+ goto unwind;
}
return 0;
+unwind:
+ rec = amdgpu_device_ip_resume_phase3(adev);
+ if (rec)
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase3 failed during unwind: %d\n",
+ rec);
+
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW);
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+
+ return r;
}
/**
@@ -3545,7 +3860,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
*/
static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, rec;
if (adev->in_s0ix)
amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
@@ -3568,6 +3883,13 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev, adev->ip_blocks[i].version->type))
continue;
+ /* Since we skip suspend for S0i3, we need to cancel the delayed
+ * idle work here as the suspend callback never gets called.
+ */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
/* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
@@ -3599,24 +3921,52 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
- adev->ip_blocks[i].status.hw = false;
+ if (r)
+ goto unwind;
/* handle putting the SMC in the appropriate state */
if (!amdgpu_sriov_vf(adev)) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
+ dev_err(adev->dev,
+ "SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ goto unwind;
}
}
}
}
return 0;
+unwind:
+ /* suspend phase 2 = resume phase 1 + resume phase 2 */
+ rec = amdgpu_device_ip_resume_phase1(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase1 failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ rec = amdgpu_device_fw_loading(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_fw_loading failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ rec = amdgpu_device_ip_resume_phase2(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase2 failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ return r;
}
/**
@@ -3630,7 +3980,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
@@ -3893,12 +4243,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
/**
* amdgpu_device_asic_has_dc_support - determine if DC supports the asic
*
+ * @pdev : pci device context
* @asic_type: AMD asic type
*
* Check if there is DC (new modesetting infrastructre) support for an asic.
* returns true if DC has support, false if not.
*/
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type)
{
switch (asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -3912,25 +4264,13 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
- /*
- * We have systems in the wild with these ASICs that require
- * LVDS and VGA support which is not supported with DC.
- *
- * Fallback to the non-DC driver here by default so as not to
- * cause regressions.
- */
-#if defined(CONFIG_DRM_AMD_DC_SI)
- return amdgpu_dc > 0;
-#else
- return false;
-#endif
- case CHIP_BONAIRE:
+ return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI);
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
/*
* We have systems in the wild with these ASICs that require
- * VGA support which is not supported with DC.
+ * TRAVIS and NUTMEG support which is not supported with DC.
*
* Fallback to the non-DC driver here by default so as not to
* cause regressions.
@@ -3941,7 +4281,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#else
default:
if (amdgpu_dc > 0)
- DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
+ dev_info_once(
+ &pdev->dev,
+ "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
return false;
#endif
}
@@ -3960,7 +4302,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
- return amdgpu_device_asic_has_dc_support(adev->asic_type);
+ return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
}
static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
@@ -3982,13 +4324,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev);
if (adev->asic_reset_res)
goto fail;
task_barrier_exit(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev);
if (adev->asic_reset_res)
goto fail;
@@ -4002,7 +4344,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail:
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
+ dev_warn(adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev_to_drm(adev)->unique);
amdgpu_put_xgmi_hive(hive);
}
@@ -4015,66 +4358,53 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
long timeout;
int ret = 0;
- /*
- * By default timeout for non compute jobs is 10000
- * and 60000 for compute jobs.
- * In SR-IOV or passthrough mode, timeout for compute
- * jobs are 60000 by default.
- */
- adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev))
- adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
- msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
- else
- adev->compute_timeout = msecs_to_jiffies(60000);
+ /* By default timeout for all queues is 2 sec */
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = msecs_to_jiffies(2000);
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
+ if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
+ return 0;
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- dev_warn(adev->dev, "lockup timeout disabled");
- add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ dev_warn(adev->dev, "lockup timeout disabled");
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+ } else {
+ timeout = msecs_to_jiffies(timeout);
}
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1) {
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
- adev->compute_timeout = adev->gfx_timeout;
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
}
}
+ /* When only one value specified apply it to all queues. */
+ if (index == 1)
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = timeout;
+
return ret;
}
@@ -4115,11 +4445,6 @@ static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
}
#endif
-static const struct attribute *amdgpu_dev_attributes[] = {
- &dev_attr_pcie_replay_count.attr,
- NULL
-};
-
static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
{
if (amdgpu_mcbp == 1)
@@ -4131,7 +4456,56 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
adev->gfx.mcbp = true;
if (adev->gfx.mcbp)
- DRM_INFO("MCBP is enabled\n");
+ dev_info(adev->dev, "MCBP is enabled\n");
+}
+
+static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_atombios_sysfs_init(adev);
+ if (r)
+ drm_err(&adev->ddev,
+ "registering atombios sysfs failed (%d).\n", r);
+
+ r = amdgpu_pm_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
+
+ r = amdgpu_ucode_sysfs_init(adev);
+ if (r) {
+ adev->ucode_sysfs_en = false;
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
+ } else
+ adev->ucode_sysfs_en = true;
+
+ r = amdgpu_device_attr_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
+
+ r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
+ if (r)
+ dev_err(adev->dev,
+ "Could not create amdgpu board attributes\n");
+
+ amdgpu_fru_sysfs_init(adev);
+ amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
+
+ return r;
+}
+
+static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
+{
+ if (adev->pm.sysfs_initialized)
+ amdgpu_pm_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
+ amdgpu_device_attr_sysfs_fini(adev);
+ amdgpu_fru_sysfs_fini(adev);
+
+ amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
}
/**
@@ -4147,7 +4521,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags)
{
- struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
bool px = false;
@@ -4199,9 +4572,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
- amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
+ dev_info(
+ adev->dev,
+ "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues
@@ -4216,7 +4591,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
- mutex_init(&adev->virt.rlcg_reg_lock);
hash_init(adev->mn_hash);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
@@ -4225,7 +4599,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.reset_sem_mutex);
/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
mutex_init(&adev->enforce_isolation_mutex);
- mutex_init(&adev->gfx.kfd_sch_mutex);
+ for (i = 0; i < MAX_XCP; ++i) {
+ adev->isolation[i].spearhead = dma_fence_get_stub();
+ amdgpu_sync_create(&adev->isolation[i].active);
+ amdgpu_sync_create(&adev->isolation[i].prev);
+ }
+ mutex_init(&adev->gfx.userq_sch_mutex);
+ mutex_init(&adev->gfx.workload_profile_mutex);
+ mutex_init(&adev->vcn.workload_profile_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4242,14 +4623,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->mm_stats.lock);
+ spin_lock_init(&adev->virt.rlcg_reg_lock);
spin_lock_init(&adev->wb.lock);
+ xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
+
INIT_LIST_HEAD(&adev->reset_list);
INIT_LIST_HEAD(&adev->ras_list);
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
+ xa_init(&adev->userq_doorbell_xa);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -4271,6 +4657,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+ INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work);
adev->gfx.gfx_off_req_count = 1;
adev->gfx.gfx_off_residency = 0;
@@ -4286,10 +4673,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* for throttling interrupt) = 60 seconds.
*/
ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
- ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1);
ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
- ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -4308,8 +4693,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!adev->rmmio)
return -ENOMEM;
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
+ dev_info(adev->dev, "register mmio base: 0x%08X\n",
+ (uint32_t)adev->rmmio_base);
+ dev_info(adev->dev, "register mmio size: %u\n",
+ (unsigned int)adev->rmmio_size);
/*
* Reset domain needs to be present early, before XGMI hive discovered
@@ -4321,7 +4708,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return -ENOMEM;
/* detect hw virtualization here */
- amdgpu_detect_virtualization(adev);
+ amdgpu_virt_init(adev);
amdgpu_device_get_pcie_info(adev);
@@ -4344,10 +4731,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
- /* Get rid of things like offb */
- r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
- if (r)
- return r;
+ /*
+ * No need to remove conflicting FBs for non-display class devices.
+ * This prevents the sysfb from being freed accidently.
+ */
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ /* Get rid of things like offb */
+ r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
+ if (r)
+ return r;
+ }
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -4439,7 +4833,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU posting now...\n");
+ dev_info(adev->dev, "GPU posting now...\n");
r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
@@ -4465,8 +4859,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
/* init i2c buses */
- if (!amdgpu_device_has_dc_support(adev))
- amdgpu_atombios_i2c_init(adev);
+ amdgpu_i2c_init(adev);
}
}
@@ -4538,39 +4931,14 @@ fence_driver_init:
flush_delayed_work(&adev->delayed_init_work);
}
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
/*
* Place those sysfs registering after `late_init`. As some of those
* operations performed in `late_init` might affect the sysfs
* interfaces creating.
*/
- r = amdgpu_atombios_sysfs_init(adev);
- if (r)
- drm_err(&adev->ddev,
- "registering atombios sysfs failed (%d).\n", r);
-
- r = amdgpu_pm_sysfs_init(adev);
- if (r)
- DRM_ERROR("registering pm sysfs failed (%d).\n", r);
-
- r = amdgpu_ucode_sysfs_init(adev);
- if (r) {
- adev->ucode_sysfs_en = false;
- DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
- } else
- adev->ucode_sysfs_en = true;
-
- r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
- if (r)
- dev_err(adev->dev, "Could not create amdgpu device attr\n");
-
- r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
- if (r)
- dev_err(adev->dev,
- "Could not create amdgpu board attributes\n");
-
- amdgpu_fru_sysfs_init(adev);
- amdgpu_reg_state_sysfs_init(adev);
- amdgpu_xcp_cfg_sysfs_init(adev);
+ r = amdgpu_device_sys_interface_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4588,7 +4956,7 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- px = amdgpu_device_supports_px(ddev);
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4598,9 +4966,6 @@ fence_driver_init:
if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
- amdgpu_xgmi_reset_on_init(adev);
-
amdgpu_device_check_iommu_direct_map(adev);
adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
@@ -4692,15 +5057,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->pm.sysfs_initialized)
- amdgpu_pm_sysfs_fini(adev);
- if (adev->ucode_sysfs_en)
- amdgpu_ucode_sysfs_fini(adev);
- sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
- amdgpu_fru_sysfs_fini(adev);
-
- amdgpu_reg_state_sysfs_fini(adev);
- amdgpu_xcp_cfg_sysfs_fini(adev);
+ amdgpu_device_sys_interface_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4723,7 +5080,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
- int idx;
+ int i, idx;
bool px;
amdgpu_device_ip_fini(adev);
@@ -4731,23 +5088,30 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
+ for (i = 0; i < MAX_XCP; ++i) {
+ dma_fence_put(adev->isolation[i].spearhead);
+ amdgpu_sync_free(&adev->isolation[i].active);
+ amdgpu_sync_free(&adev->isolation[i].prev);
+ }
amdgpu_reset_fini(adev);
/* free i2c buses */
- if (!amdgpu_device_has_dc_support(adev))
- amdgpu_i2c_fini(adev);
+ amdgpu_i2c_fini(adev);
- if (amdgpu_emu_mode != 1)
- amdgpu_atombios_fini(adev);
-
- kfree(adev->bios);
- adev->bios = NULL;
+ if (adev->bios) {
+ if (amdgpu_emu_mode != 1)
+ amdgpu_atombios_fini(adev);
+ amdgpu_bios_release(adev);
+ }
kfree(adev->fru_info);
adev->fru_info = NULL;
- px = amdgpu_device_supports_px(adev_to_drm(adev));
+ kfree(adev->xcp_mgr);
+ adev->xcp_mgr = NULL;
+
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4763,20 +5127,20 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
drm_dev_exit(idx);
}
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- if (adev->mman.discovery_bin)
+ if (adev->discovery.bin)
amdgpu_discovery_fini(adev);
amdgpu_reset_put_reset_domain(adev->reset_domain);
adev->reset_domain = NULL;
kfree(adev->pci_state);
-
+ kfree(adev->pcie_reset_ctx.swds_pcistate);
+ kfree(adev->pcie_reset_ctx.swus_pcistate);
}
/**
@@ -4796,9 +5160,21 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
+ /* No need to evict when going to S5 through S4 callbacks */
+ if (system_state == SYSTEM_POWER_OFF)
+ return 0;
+
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- if (ret)
- DRM_WARN("evicting device resources failed\n");
+ if (ret) {
+ dev_warn(adev->dev, "evicting device resources failed\n");
+ return ret;
+ }
+
+ if (adev->in_s4) {
+ ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
+ if (ret)
+ dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
+ }
return ret;
}
@@ -4812,28 +5188,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* @data: data
*
* This function is called when the system is about to suspend or hibernate.
- * It is used to evict resources from the device before the system goes to
- * sleep while there is still access to swap.
+ * It is used to set the appropriate flags so that eviction can be optimized
+ * in the pm prepare callback.
*/
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
void *data)
{
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
- int r;
switch (mode) {
case PM_HIBERNATION_PREPARE:
adev->in_s4 = true;
- fallthrough;
- case PM_SUSPEND_PREPARE:
- r = amdgpu_device_evict_resources(adev);
- /*
- * This is considered non-fatal at this time because
- * amdgpu_device_prepare() will also fatally evict resources.
- * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
- */
- if (r)
- drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
+ break;
+ case PM_POST_HIBERNATION:
+ adev->in_s4 = false;
break;
}
@@ -4854,15 +5222,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
- amdgpu_choose_low_power_state(adev);
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- goto unprepare;
+ return r;
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
@@ -4873,15 +5239,32 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
- goto unprepare;
+ return r;
}
return 0;
+}
-unprepare:
- adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
+/**
+ * amdgpu_device_complete - complete power state transition
+ *
+ * @dev: drm dev pointer
+ *
+ * Undo the changes from amdgpu_device_prepare. This will be
+ * called on all resume transitions, including those that failed.
+ */
+void amdgpu_device_complete(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- return r;
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].version->funcs->complete)
+ continue;
+ adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
+ }
}
/**
@@ -4897,7 +5280,7 @@ unprepare:
int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- int r = 0;
+ int r, rec;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4905,44 +5288,125 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
+ if (!adev->in_runpm)
+ amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
if (r)
return r;
}
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
- DRM_WARN("smart shift update failed\n");
+ r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3);
+ if (r)
+ goto unwind_sriov;
if (notify_clients)
- drm_client_dev_suspend(adev_to_drm(adev), false);
+ drm_client_dev_suspend(adev_to_drm(adev));
cancel_delayed_work_sync(&adev->delayed_init_work);
amdgpu_ras_suspend(adev);
- amdgpu_device_ip_suspend_phase1(adev);
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ goto unwind_smartshift;
- if (!adev->in_s0ix)
- amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ r = amdgpu_userq_suspend(adev);
+ if (r)
+ goto unwind_ip_phase1;
r = amdgpu_device_evict_resources(adev);
if (r)
- return r;
+ goto unwind_userq;
amdgpu_ttm_set_buffer_funcs_status(adev, false);
amdgpu_fence_driver_hw_fini(adev);
- amdgpu_device_ip_suspend_phase2(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
+ if (r)
+ goto unwind_evict;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- r = amdgpu_dpm_notify_rlc_state(adev, false);
+ return 0;
+
+unwind_evict:
+ if (adev->mman.buffer_funcs_ring->sched.ready)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ amdgpu_fence_driver_hw_init(adev);
+
+unwind_userq:
+ rec = amdgpu_userq_resume(adev);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec);
+ return r;
+ }
+ rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec);
+ return r;
+ }
+
+unwind_ip_phase1:
+ /* suspend phase 1 = resume phase 3 */
+ rec = amdgpu_device_ip_resume_phase3(adev);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec);
+ return r;
+ }
+
+unwind_smartshift:
+ rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec);
+ return r;
+ }
+
+ if (notify_clients)
+ drm_client_dev_resume(adev_to_drm(adev));
+
+ amdgpu_ras_resume(adev);
+
+unwind_sriov:
+ if (amdgpu_sriov_vf(adev)) {
+ rec = amdgpu_virt_request_full_gpu(adev, true);
+ if (rec) {
+ dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec);
+ return r;
+ }
+ }
+
+ adev->in_suspend = adev->in_s0ix = adev->in_s3 = false;
+
+ return r;
+}
+
+static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
+{
+ int r;
+ unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
+
+ /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
+ * may not work. The access could be blocked by nBIF protection as VF isn't in
+ * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
+ * so that QEMU reprograms MSIX table.
+ */
+ amdgpu_restore_msix(adev);
+
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
return r;
+ dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
+ prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
+
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+
return 0;
}
@@ -4967,6 +5431,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
return r;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ r = amdgpu_virt_resume(adev);
+ if (r)
+ goto exit;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4987,11 +5457,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
- if (r)
- goto exit;
- }
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
+
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -5003,6 +5475,9 @@ exit:
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+
+ if (!r && !adev->in_runpm)
+ r = amdgpu_amdkfd_resume_process(adev);
}
if (r)
@@ -5012,7 +5487,7 @@ exit:
flush_delayed_work(&adev->delayed_init_work);
if (notify_clients)
- drm_client_dev_resume(adev_to_drm(adev), false);
+ drm_client_dev_resume(adev_to_drm(adev));
amdgpu_ras_resume(adev);
@@ -5037,13 +5512,12 @@ exit:
dev->dev->power.disable_depth--;
#endif
}
- adev->in_suspend = false;
- if (adev->enable_mes)
- amdgpu_mes_self_test(adev);
+ amdgpu_vram_mgr_clear_reset_blocks(adev);
+ adev->in_suspend = false;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
+ dev_warn(adev->dev, "smart shift update failed\n");
return 0;
}
@@ -5278,6 +5752,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
amdgpu_ras_resume(adev);
@@ -5368,7 +5843,8 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
u32 i;
int ret = 0;
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+ if (adev->bios)
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
dev_info(adev->dev, "GPU mode1 reset\n");
@@ -5410,7 +5886,8 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
goto mode1_reset_failed;
}
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ if (adev->bios)
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
@@ -5419,6 +5896,29 @@ mode1_reset_failed:
return ret;
}
+int amdgpu_device_link_reset(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU link reset\n");
+
+ if (!amdgpu_reset_in_dpc(adev))
+ ret = amdgpu_dpm_link_reset(adev);
+
+ if (ret)
+ goto link_reset_failed;
+
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto link_reset_failed;
+
+ return 0;
+
+link_reset_failed:
+ dev_err(adev->dev, "GPU link reset failed\n");
+ return ret;
+}
+
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
@@ -5443,11 +5943,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!amdgpu_ring_sched_ready(ring))
continue;
- /* Clear job fence from fence drv to avoid force_completion
- * leave NULL and vm flush fence in fence drv
- */
- amdgpu_fence_driver_clear_job_fences(ring);
-
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
@@ -5531,6 +6026,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_set_init_level(tmp_adev, init_level);
if (full_reset) {
/* post card */
+ amdgpu_reset_set_dpc_status(tmp_adev, false);
amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
@@ -5548,7 +6044,9 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
- DRM_INFO("VRAM is lost due to GPU reset!\n");
+ dev_info(
+ tmp_adev->dev,
+ "VRAM is lost due to GPU reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
}
@@ -5589,7 +6087,11 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
if (r)
goto out;
- drm_client_dev_resume(adev_to_drm(tmp_adev), false);
+ r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
+ if (r)
+ goto out;
+
+ drm_client_dev_resume(adev_to_drm(tmp_adev));
/*
* The GPU enters bad state once faulty pages
@@ -5723,6 +6225,7 @@ static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
+ case AMD_RESET_METHOD_LINK:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
break;
case AMD_RESET_METHOD_MODE2:
@@ -5810,6 +6313,7 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
cancel_work(&adev->reset_work);
#endif
+ cancel_work(&adev->userq_reset_work);
if (adev->kfd.dev)
cancel_work(&adev->kfd.reset_work);
@@ -5826,117 +6330,76 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
{
struct amdgpu_device *tmp_adev;
int ret = 0;
- u32 status;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
- if (PCI_POSSIBLE_ERROR(status)) {
- dev_err(tmp_adev->dev, "device lost from bus!");
- ret = -ENODEV;
- }
+ ret |= amdgpu_device_bus_status_check(tmp_adev);
}
return ret;
}
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu_device pointer
- * @job: which job trigger hang
- * @reset_context: amdgpu reset context pointer
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Attempt to do soft-reset or full-reset and reinitialize Asic
- * Returns 0 for success or an error on failure.
- */
-
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context)
+static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive)
{
- struct list_head device_list, *device_list_handle = NULL;
- bool job_signaled = false;
- struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
- int i, r = 0;
- bool need_emergency_restart = false;
- bool audio_suspended = false;
- int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
-
- /*
- * If it reaches here because of hang/timeout and a RAS error is
- * detected at the same time, let RAS recovery take care of it.
- */
- if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
- !amdgpu_sriov_vf(adev) &&
- reset_context->src != AMDGPU_RESET_SRC_RAS) {
- dev_dbg(adev->dev,
- "Gpu recovery from source: %d yielding to RAS error recovery handling",
- reset_context->src);
- return 0;
- }
- /*
- * Special case: RAS triggered and full reset isn't supported
- */
- need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
-
- /*
- * Flush RAM to disk so that after reboot
- * the user can read log and see why the system rebooted.
- */
- if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
- amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
-
- ksys_sync_helper();
- emergency_restart();
- }
-
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
- if (!amdgpu_sriov_vf(adev))
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive)
- mutex_lock(&hive->hive_lock);
-
- reset_context->job = job;
- reset_context->hive = hive;
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
*/
- INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- list_add_tail(&tmp_adev->reset_list, &device_list);
+ list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
+ if (amdgpu_reset_in_dpc(adev))
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
- if (!list_is_first(&adev->reset_list, &device_list))
- list_rotate_to_front(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ if (!list_is_first(&adev->reset_list, device_list))
+ list_rotate_to_front(&adev->reset_list, device_list);
} else {
- list_add_tail(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ list_add_tail(&adev->reset_list, device_list);
}
+}
- if (!amdgpu_sriov_vf(adev)) {
- r = amdgpu_device_health_check(device_list_handle);
- if (r)
- goto end_reset;
- }
+static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
- /* We need to lock reset domain only once both for XGMI and single device */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+}
- /* block all schedulers and reset given job's ring */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i;
+
+ /* block all schedulers and reset given job's ring */
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
amdgpu_device_set_mp1_state(tmp_adev);
/*
@@ -5950,7 +6413,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* some audio codec errors.
*/
if (!amdgpu_device_suspend_display_audio(tmp_adev))
- audio_suspended = true;
+ tmp_adev->pcie_reset_ctx.audio_suspended = true;
amdgpu_ras_set_error_query_ready(tmp_adev, false);
@@ -5964,13 +6427,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
+ drm_client_dev_suspend(adev_to_drm(tmp_adev));
/* disable ras on ALL IPs */
- if (!need_emergency_restart &&
- amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
+ amdgpu_userq_pre_reset(tmp_adev);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -5984,24 +6449,18 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
}
atomic_inc(&tmp_adev->gpu_reset_counter);
}
+}
- if (need_emergency_restart)
- goto skip_sched_resume;
-
- /*
- * Must check guilty signal here since after this point all old
- * HW fences are force signaled.
- *
- * job->base holds a reference to parent fence
- */
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
- job_signaled = true;
- dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
- goto skip_hw_reset;
- }
+static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
+ int r = 0;
retry: /* Rest of adevs pre asic reset from XGMI hive. */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
/*TODO Should we stop ?*/
if (r) {
@@ -6014,6 +6473,11 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
+
+ /* Bail out of reset early */
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
amdgpu_ras_set_fed(adev, true);
@@ -6028,12 +6492,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(device_list_handle, reset_context);
+ r = amdgpu_do_asic_reset(device_list, reset_context);
if (r && r == -EAGAIN)
goto retry;
}
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/*
* Drop any pending non scheduler resets queued before reset is done.
* Any reset scheduled after this point would be valid. Scheduler resets
@@ -6043,10 +6507,18 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_stop_pending_resets(tmp_adev);
}
-skip_hw_reset:
+ return r;
+}
+
+static int amdgpu_device_sched_resume(struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context,
+ bool job_signaled)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
/* Post ASIC reset for all devs .*/
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -6060,30 +6532,45 @@ skip_hw_reset:
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
- if (tmp_adev->asic_reset_res)
- r = tmp_adev->asic_reset_res;
-
- tmp_adev->asic_reset_res = 0;
-
- if (r) {
+ if (tmp_adev->asic_reset_res) {
/* bad news, how to tell it to userspace ?
* for ras error, we should report GPU bad status instead of
* reset failure
*/
if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
!amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
- atomic_read(&tmp_adev->gpu_reset_counter));
- amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ dev_info(
+ tmp_adev->dev,
+ "GPU reset(%d) failed with error %d \n",
+ atomic_read(
+ &tmp_adev->gpu_reset_counter),
+ tmp_adev->asic_reset_res);
+ amdgpu_vf_error_put(tmp_adev,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
+ tmp_adev->asic_reset_res);
+ if (!r)
+ r = tmp_adev->asic_reset_res;
+ tmp_adev->asic_reset_res = 0;
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
- if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
+ if (amdgpu_acpi_smart_shift_update(tmp_adev,
+ AMDGPU_SS_DEV_D0))
+ dev_warn(tmp_adev->dev,
+ "smart shift update failed\n");
}
}
-skip_sched_resume:
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ return r;
+}
+
+static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
@@ -6094,18 +6581,124 @@ skip_sched_resume:
if (!adev->kfd.init_complete)
amdgpu_amdkfd_device_init(adev);
- if (audio_suspended)
+ if (tmp_adev->pcie_reset_ctx.audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unset_mp1_state(tmp_adev);
amdgpu_ras_set_error_query_ready(tmp_adev, true);
+
}
+}
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
- amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu_device pointer
+ * @job: which job trigger hang
+ * @reset_context: amdgpu reset context pointer
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head device_list;
+ bool job_signaled = false;
+ struct amdgpu_hive_info *hive = NULL;
+ int r = 0;
+ bool need_emergency_restart = false;
+
+ /*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+
+ /*
+ * Special case: RAS triggered and full reset isn't supported
+ */
+ need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
+ amdgpu_ras_get_context(adev)->reboot) {
+ dev_warn(adev->dev, "Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
+
+ dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
+ need_emergency_restart ? "jobs stop" : "reset",
+ reset_context->src);
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+
+ reset_context->job = job;
+ reset_context->hive = hive;
+ INIT_LIST_HEAD(&device_list);
+
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(&device_list);
+ if (r)
+ goto end_reset;
+ }
+
+ /* Cannot be called after locking reset domain */
+ amdgpu_ras_pre_reset(adev, &device_list);
+
+ /* We need to lock reset domain only once both for XGMI and single device */
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+
+ amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
+ if (need_emergency_restart)
+ goto skip_sched_resume;
+ /*
+ * Must check guilty signal here since after this point all old
+ * HW fences are force signaled.
+ *
+ * job->base holds a reference to parent fence
+ */
+ if (job && dma_fence_is_signaled(&job->hw_fence->base)) {
+ job_signaled = true;
+ dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ goto skip_hw_reset;
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
+ if (r)
+ goto reset_unlock;
+skip_hw_reset:
+ r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
+ if (r)
+ goto reset_unlock;
+skip_sched_resume:
+ amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
+reset_unlock:
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ amdgpu_ras_post_reset(adev, &device_list);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6116,6 +6709,19 @@ end_reset:
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
atomic_set(&adev->reset_domain->reset_res, r);
+
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
+ }
+
return r;
}
@@ -6158,6 +6764,44 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @speed: pointer to the speed of the link
+ * @width: pointer to the width of the link
+ *
+ * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
+ * AMD dGPU which may be a virtual upstream bridge.
+ */
+static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ struct pci_dev *parent = adev->pdev;
+
+ if (!speed || !width)
+ return;
+
+ parent = pci_upstream_bridge(parent);
+ if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ while ((parent = pci_upstream_bridge(parent))) {
+ if (parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ }
+ }
+ } else {
+ /* use the device itself */
+ *speed = pcie_get_speed_cap(adev->pdev);
+ *width = pcie_get_width_cap(adev->pdev);
+ }
+}
+
+/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
* @adev: amdgpu_device pointer
@@ -6168,9 +6812,8 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*/
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
- struct pci_dev *pdev;
enum pci_bus_speed speed_cap, platform_speed_cap;
- enum pcie_link_width platform_link_width;
+ enum pcie_link_width platform_link_width, link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -6192,11 +6835,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
&platform_link_width);
+ amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
if (adev->pm.pcie_gen_mask == 0) {
/* asic caps */
- pdev = adev->pdev;
- speed_cap = pcie_get_speed_cap(pdev);
if (speed_cap == PCI_SPEED_UNKNOWN) {
adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
@@ -6252,51 +6894,103 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
if (adev->pm.pcie_mlw_mask == 0) {
+ /* asic caps */
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X16:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X12:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X8:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X4:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X2:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X1:
+ adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* platform caps */
if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
} else {
switch (platform_link_width) {
case PCIE_LNK_X32:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X16:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X12:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X8:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X4:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X2:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X1:
- adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
@@ -6345,12 +7039,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
#endif
}
-int amdgpu_device_baco_enter(struct drm_device *dev)
+int amdgpu_device_baco_enter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -6360,13 +7053,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
return amdgpu_dpm_baco_enter(adev);
}
-int amdgpu_device_baco_exit(struct drm_device *dev)
+int amdgpu_device_baco_exit(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -6397,45 +7089,52 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
-
- DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
+ struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
+ amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_reset_context reset_context;
+ struct list_head device_list;
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- DRM_WARN("No support for XGMI hive yet...");
- return PCI_ERS_RESULT_DISCONNECT;
- }
+ dev_info(adev->dev, "PCI error: detected callback!!\n");
adev->pci_channel_state = state;
switch (state) {
case pci_channel_io_normal:
+ dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
return PCI_ERS_RESULT_CAN_RECOVER;
- /* Fatal error, prepare for slot reset */
case pci_channel_io_frozen:
- /*
- * Locking adev->reset_domain->sem will prevent any external access
- * to GPU during PCI error recovery
- */
- amdgpu_device_lock_reset_domain(adev->reset_domain);
- amdgpu_device_set_mp1_state(adev);
-
- /*
- * Block any work scheduling as we do for regular GPU reset
- * for the duration of the recovery
- */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!amdgpu_ring_sched_ready(ring))
- continue;
+ /* Fatal error, prepare for slot reset */
+ dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+ if (hive) {
+ /* Hive devices should be able to support FW based
+ * link reset on other devices, if not return.
+ */
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev,
+ "No support for XGMI hive yet...\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ /* Set dpc status only if device is part of hive
+ * Non-hive devices should be able to recover after
+ * link reset.
+ */
+ amdgpu_reset_set_dpc_status(adev, true);
- drm_sched_stop(&ring->sched, NULL);
+ mutex_lock(&hive->hive_lock);
}
- atomic_inc(&adev->gpu_reset_counter);
+ memset(&reset_context, 0, sizeof(reset_context));
+ INIT_LIST_HEAD(&device_list);
+
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+ amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
+ if (hive)
+ mutex_unlock(&hive->hive_lock);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
+ dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6448,8 +7147,10 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
*/
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
- DRM_INFO("PCI error: mmio enabled callback!!\n");
+ dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
@@ -6473,27 +7174,38 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int r, i;
struct amdgpu_reset_context reset_context;
- u32 memsize;
+ struct amdgpu_device *tmp_adev;
+ struct amdgpu_hive_info *hive;
struct list_head device_list;
+ struct pci_dev *link_dev;
+ int r = 0, i, timeout;
+ u32 memsize;
+ u16 status;
- /* PCI error slot reset should be skipped During RAS recovery */
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev))
- return PCI_ERS_RESULT_RECOVERED;
-
- DRM_INFO("PCI error: slot reset callback!!\n");
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->reset_list, &device_list);
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+ else
+ link_dev = adev->pdev;
+ /* wait for asic to come out of reset, timeout = 10s */
+ timeout = 10000;
+ do {
+ usleep_range(10000, 10500);
+ r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
+ timeout -= 10;
+ } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
+ (status != PCI_VENDOR_ID_AMD));
- /* wait for asic to come out of reset */
- msleep(500);
+ if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
+ r = -ETIME;
+ goto out;
+ }
+ amdgpu_device_load_switch_state(adev);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -6513,26 +7225,40 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
-
- adev->no_hw_access = true;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- adev->no_hw_access = false;
- if (r)
- goto out;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+ INIT_LIST_HEAD(&device_list);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else {
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ list_add_tail(&adev->reset_list, &device_list);
+ }
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
out:
if (!r) {
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(adev->pdev);
-
- DRM_INFO("PCIe error recovery succeeded\n");
+ dev_info(adev->dev, "PCIe error recovery succeeded\n");
} else {
- DRM_ERROR("PCIe error recovery failed, err:%d", r);
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
+ if (hive) {
+ list_for_each_entry(tmp_adev, &device_list, reset_list)
+ amdgpu_device_unset_mp1_state(tmp_adev);
+ }
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ }
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
@@ -6549,26 +7275,95 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
-
+ struct list_head device_list;
+ struct amdgpu_hive_info *hive = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
- DRM_INFO("PCI error: resume callback!!\n");
+ dev_info(adev->dev, "PCI error: resume callback!!\n");
/* Only continue execution for the case of pci_channel_io_frozen */
if (adev->pci_channel_state != pci_channel_io_frozen)
return;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ INIT_LIST_HEAD(&device_list);
- if (!amdgpu_ring_sched_ready(ring))
- continue;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = false;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else
+ list_add_tail(&adev->reset_list, &device_list);
+
+ amdgpu_device_sched_resume(&device_list, NULL, NULL);
+ amdgpu_device_gpu_resume(adev, &device_list, false);
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
+
+static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *swus, *swds;
+ int r;
+
+ swds = pci_upstream_bridge(adev->pdev);
+ if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
+ pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+ swus = pci_upstream_bridge(swds);
+ if (!swus ||
+ (swus->vendor != PCI_VENDOR_ID_ATI &&
+ swus->vendor != PCI_VENDOR_ID_AMD) ||
+ pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
+ return;
+
+ /* If already saved, return */
+ if (adev->pcie_reset_ctx.swus)
+ return;
+ /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
+ r = pci_save_state(swds);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
- drm_sched_start(&ring->sched, 0);
+ r = pci_save_state(swus);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
+
+ adev->pcie_reset_ctx.swus = swus;
+}
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev;
+ int r;
+
+ if (!adev->pcie_reset_ctx.swds_pcistate ||
+ !adev->pcie_reset_ctx.swus_pcistate)
+ return;
+
+ pdev = adev->pcie_reset_ctx.swus;
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
+ return;
}
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ pdev = pci_upstream_bridge(adev->pdev);
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
+ if (!r)
+ pci_restore_state(pdev);
+ else
+ dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
}
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
@@ -6587,14 +7382,16 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
adev->pci_state = pci_store_saved_state(pdev);
if (!adev->pci_state) {
- DRM_ERROR("Failed to store PCI saved state");
+ dev_err(adev->dev, "Failed to store PCI saved state");
return false;
}
} else {
- DRM_WARN("Failed to save PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
return false;
}
+ amdgpu_device_cache_switch_state(adev);
+
return true;
}
@@ -6612,7 +7409,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
if (!r) {
pci_restore_state(pdev);
} else {
- DRM_WARN("Failed to load PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
return false;
}
@@ -6629,10 +7426,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- if (ring && ring->funcs->emit_hdp_flush)
+ if (ring && ring->funcs->emit_hdp_flush) {
amdgpu_ring_emit_hdp_flush(ring);
- else
- amdgpu_asic_flush_hdp(adev, ring);
+ return;
+ }
+
+ if (!ring && amdgpu_sriov_runtime(adev)) {
+ if (!amdgpu_kiq_hdp_flush(adev))
+ return;
+ }
+
+ amdgpu_hdp_flush(adev, ring);
}
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
@@ -6645,7 +7449,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- amdgpu_asic_invalidate_hdp(adev, ring);
+ amdgpu_hdp_invalidate(adev, ring);
}
int amdgpu_in_reset(struct amdgpu_device *adev)
@@ -6756,22 +7560,117 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
{
struct dma_fence *old = NULL;
+ dma_fence_get(gang);
do {
dma_fence_put(old);
old = amdgpu_device_get_gang(adev);
if (old == gang)
break;
- if (!dma_fence_is_signaled(old))
+ if (!dma_fence_is_signaled(old)) {
+ dma_fence_put(gang);
return old;
+ }
} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
old, gang) != old);
+ /*
+ * Drop it once for the exchanged reference in adev and once for the
+ * thread local reference acquired in amdgpu_device_get_gang().
+ */
+ dma_fence_put(old);
dma_fence_put(old);
return NULL;
}
+/**
+ * amdgpu_device_enforce_isolation - enforce HW isolation
+ * @adev: the amdgpu device pointer
+ * @ring: the HW ring the job is supposed to run on
+ * @job: the job which is about to be pushed to the HW ring
+ *
+ * Makes sure that only one client at a time can use the GFX block.
+ * Returns: The dependency to wait on before the job can be pushed to the HW.
+ * The function is called multiple times until NULL is returned.
+ */
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
+ struct drm_sched_fence *f = job->base.s_fence;
+ struct dma_fence *dep;
+ void *owner;
+ int r;
+
+ /*
+ * For now enforce isolation only for the GFX block since we only need
+ * the cleaner shader on those rings.
+ */
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
+ ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return NULL;
+
+ /*
+ * All submissions where enforce isolation is false are handled as if
+ * they come from a single client. Use ~0l as the owner to distinct it
+ * from kernel submissions where the owner is NULL.
+ */
+ owner = job->enforce_isolation ? f->owner : (void *)~0l;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+
+ /*
+ * The "spearhead" submission is the first one which changes the
+ * ownership to its client. We always need to wait for it to be
+ * pushed to the HW before proceeding with anything.
+ */
+ if (&f->scheduled != isolation->spearhead &&
+ !dma_fence_is_signaled(isolation->spearhead)) {
+ dep = isolation->spearhead;
+ goto out_grab_ref;
+ }
+
+ if (isolation->owner != owner) {
+
+ /*
+ * Wait for any gang to be assembled before switching to a
+ * different owner or otherwise we could deadlock the
+ * submissions.
+ */
+ if (!job->gang_submit) {
+ dep = amdgpu_device_get_gang(adev);
+ if (!dma_fence_is_signaled(dep))
+ goto out_return_dep;
+ dma_fence_put(dep);
+ }
+
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(&f->scheduled);
+ amdgpu_sync_move(&isolation->active, &isolation->prev);
+ trace_amdgpu_isolation(isolation->owner, owner);
+ isolation->owner = owner;
+ }
+
+ /*
+ * Specifying the ring here helps to pipeline submissions even when
+ * isolation is enabled. If that is not desired for testing NULL can be
+ * used instead of the ring to enforce a CPU round trip while switching
+ * between clients.
+ */
+ dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
+ r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
+ if (r)
+ dev_warn(adev->dev, "OOM tracking isolation\n");
+
+out_grab_ref:
+ dma_fence_get(dep);
+out_return_dep:
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ return dep;
+}
+
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -6831,9 +7730,11 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
tmp_ = RREG32(reg_addr);
loop--;
if (!loop) {
- DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
- inst, reg_name, (uint32_t)expected_value,
- (uint32_t)(tmp_ & (mask)));
+ dev_warn(
+ adev->dev,
+ "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
+ inst, reg_name, (uint32_t)expected_value,
+ (uint32_t)(tmp_ & (mask)));
ret = -ETIMEDOUT;
break;
}
@@ -6884,3 +7785,53 @@ ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
size += sysfs_emit_at(buf, size, "\n");
return size;
}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid)
+{
+ if (!uid_info)
+ return;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return;
+ }
+
+ if (uid_info->uid[type][inst] != 0) {
+ dev_warn_once(
+ uid_info->adev->dev,
+ "Overwriting existing UID %llu for type %d instance %d\n",
+ uid_info->uid[type][inst], type, inst);
+ }
+
+ uid_info->uid[type][inst] = uid;
+}
+
+u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst)
+{
+ if (!uid_info)
+ return 0;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return 0;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return 0;
+ }
+
+ return uid_info->uid[type][inst];
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 949d74eff294..fa2a22dfa048 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -107,14 +107,22 @@
#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
#include "jpeg_v5_0_1.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif
-#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
-MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
@@ -247,9 +255,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
/* This region is read-only and reserved from system use */
- discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC);
if (discv_regn) {
- memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memcpy(binary, discv_regn, adev->discovery.size);
memunmap(discv_regn);
return 0;
}
@@ -263,12 +271,13 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
+ bool sz_valid = true;
uint64_t vram_size;
- u32 msg;
int i, ret = 0;
+ u32 msg;
if (!amdgpu_sriov_vf(adev)) {
- /* It can take up to a second for IFWI init to complete on some dGPUs,
+ /* It can take up to two second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
@@ -276,7 +285,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
@@ -284,38 +293,62 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
}
}
- vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ sz_valid = false;
+ else
+ vram_size <<= 20;
+
+ /*
+ * If in VRAM, discovery TMR is marked for reservation. If it is in system mem,
+ * then it is not required to be reserved.
+ */
+ if (sz_valid) {
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ /* For SRIOV VFs with dynamic critical region enabled,
+ * we will get the IPD binary via below call.
+ * If dynamic critical is disabled, fall through to normal seq.
+ */
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
+ &adev->discovery.size)) {
+ dev_err(adev->dev,
+ "failed to read discovery info from dynamic critical region.");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
- if (vram_size) {
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->mman.discovery_tmr_size, false);
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->discovery.size, false);
+ adev->discovery.reserve_tmr = true;
+ }
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
+ if (ret)
+ dev_err(adev->dev,
+ "failed to read discovery info from memory, vram size read: %llx",
+ vram_size);
+exit:
return ret;
}
-static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
+ uint8_t *binary,
+ const char *fw_name)
{
const struct firmware *fw;
- const char *fw_name;
int r;
- switch (amdgpu_discovery) {
- case 2:
- fw_name = FIRMWARE_IP_DISCOVERY;
- break;
- default:
- dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
- return -EINVAL;
- }
-
- r = request_firmware(&fw, fw_name, adev->dev);
+ r = firmware_request_nowarn(&fw, fw_name, adev->dev);
if (r) {
- dev_err(adev->dev, "can't load firmware \"%s\"\n",
- fw_name);
+ if (amdgpu_discovery == 2)
+ dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
+ else
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
return r;
}
@@ -378,6 +411,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
struct binary_header *bhdr)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct table_info *info;
uint16_t checksum;
uint16_t offset;
@@ -387,14 +421,14 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
checksum = le16_to_cpu(info->checksum);
struct nps_info_header *nhdr =
- (struct nps_info_header *)(adev->mman.discovery_bin + offset);
+ (struct nps_info_header *)(discovery_bin + offset);
if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
return -EINVAL;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
le32_to_cpu(nhdr->size_bytes),
checksum)) {
dev_dbg(adev->dev, "invalid nps info data table checksum\n");
@@ -404,55 +438,88 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
return 0;
}
+static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
+{
+ if (amdgpu_discovery == 2) {
+ /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */
+ adev->discovery.reserve_tmr = true;
+ return "amdgpu/ip_discovery.bin";
+ }
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return "amdgpu/vega10_ip_discovery.bin";
+ case CHIP_VEGA12:
+ return "amdgpu/vega12_ip_discovery.bin";
+ case CHIP_RAVEN:
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "amdgpu/raven2_ip_discovery.bin";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "amdgpu/picasso_ip_discovery.bin";
+ else
+ return "amdgpu/raven_ip_discovery.bin";
+ case CHIP_VEGA20:
+ return "amdgpu/vega20_ip_discovery.bin";
+ case CHIP_ARCTURUS:
+ return "amdgpu/arcturus_ip_discovery.bin";
+ case CHIP_ALDEBARAN:
+ return "amdgpu/aldebaran_ip_discovery.bin";
+ default:
+ return NULL;
+ }
+}
+
static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
+ uint8_t *discovery_bin;
+ const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
int r;
- adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
- adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
- if (!adev->mman.discovery_bin)
+ adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
+ if (!adev->discovery.bin)
return -ENOMEM;
+ adev->discovery.size = DISCOVERY_TMR_SIZE;
+ adev->discovery.debugfs_blob.data = adev->discovery.bin;
+ adev->discovery.debugfs_blob.size = adev->discovery.size;
+ discovery_bin = adev->discovery.bin;
/* Read from file if it is the preferred option */
- if (amdgpu_discovery == 2) {
- dev_info(adev->dev, "use ip discovery information from file");
- r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
-
- if (r) {
- dev_err(adev->dev, "failed to read ip discovery binary from file\n");
- r = -EINVAL;
+ fw_name = amdgpu_discovery_get_fw_name(adev);
+ if (fw_name != NULL) {
+ drm_dbg(&adev->ddev, "use ip discovery information from file");
+ r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin,
+ fw_name);
+ if (r)
goto out;
- }
-
} else {
- r = amdgpu_discovery_read_binary_from_mem(
- adev, adev->mman.discovery_bin);
+ drm_dbg(&adev->ddev, "use ip discovery information from memory");
+ r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin);
if (r)
goto out;
}
/* check the ip discovery binary signature */
- if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) {
dev_err(adev->dev,
"get invalid ip discovery binary signature\n");
r = -EINVAL;
goto out;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
size = le16_to_cpu(bhdr->binary_size) - offset;
checksum = le16_to_cpu(bhdr->binary_checksum);
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- size, checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size,
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery binary checksum\n");
r = -EINVAL;
goto out;
@@ -464,15 +531,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct ip_discovery_header *ihdr =
- (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ (struct ip_discovery_header *)(discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le16_to_cpu(ihdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le16_to_cpu(ihdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
@@ -485,7 +553,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct gpu_info_header *ghdr =
- (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+ (struct gpu_info_header *)(discovery_bin + offset);
if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery gc table id\n");
@@ -493,8 +561,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(ghdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le32_to_cpu(ghdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
@@ -507,7 +576,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct harvest_info_header *hhdr =
- (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+ (struct harvest_info_header *)(discovery_bin + offset);
if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
@@ -515,8 +584,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- sizeof(struct harvest_table), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
dev_err(adev->dev, "invalid harvest data table checksum\n");
r = -EINVAL;
goto out;
@@ -529,7 +599,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct vcn_info_header *vhdr =
- (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+ (struct vcn_info_header *)(discovery_bin + offset);
if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery vcn table id\n");
@@ -537,8 +607,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(vhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid vcn data table checksum\n");
r = -EINVAL;
goto out;
@@ -551,7 +622,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (0 && offset) {
struct mall_info_header *mhdr =
- (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+ (struct mall_info_header *)(discovery_bin + offset);
if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery mall table id\n");
@@ -559,8 +630,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(mhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid mall data table checksum\n");
r = -EINVAL;
goto out;
@@ -570,8 +642,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
if ((amdgpu_discovery != 2) &&
(RREG32(mmIP_DISCOVERY_VERSION) == 4))
amdgpu_ras_query_boot_status(adev, 4);
@@ -583,20 +655,23 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
amdgpu_discovery_sysfs_fini(adev);
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
}
-static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
+static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
+ uint8_t instance, uint16_t hw_id)
{
- if (ip->instance_number >= HWIP_MAX_INSTANCE) {
- DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
- ip->instance_number);
+ if (instance >= HWIP_MAX_INSTANCE) {
+ dev_err(adev->dev,
+ "Unexpected instance_number (%d) from ip discovery blob\n",
+ instance);
return -EINVAL;
}
- if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
- DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
- le16_to_cpu(ip->hw_id));
+ if (hw_id >= HW_ID_MAX) {
+ dev_err(adev->dev,
+ "Unexpected hw_id (%d) from ip discovery blob\n",
+ hw_id);
return -EINVAL;
}
@@ -606,36 +681,41 @@ static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct ip_v4 *ip;
+ struct ip *ip;
uint16_t die_offset, ip_offset, num_dies, num_ips;
+ uint16_t hw_id;
+ uint8_t inst;
int i, j;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
/* scan harvest bit of all IP data structures */
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
-
- if (amdgpu_discovery_validate_ip(ip))
+ ip = (struct ip *)(discovery_bin + ip_offset);
+ inst = ip->number_instance;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
goto next_ip;
- if (le16_to_cpu(ip->variant) == 1) {
- switch (le16_to_cpu(ip->hw_id)) {
+ if (ip->harvest == 1) {
+ switch (hw_id) {
case VCN_HWID:
(*vcn_harvest_count)++;
- if (ip->instance_number == 0) {
+ if (inst == 0) {
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
adev->vcn.inst_mask &=
~AMDGPU_VCN_HARVEST_VCN0;
@@ -657,10 +737,8 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
}
}
next_ip:
- if (ihdr->base_addr_64_bit)
- ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
- else
- ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
}
}
}
@@ -669,13 +747,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count,
uint32_t *umc_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct harvest_table *harvest_info;
u16 offset;
int i;
uint32_t umc_harvest_config = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
if (!offset) {
@@ -683,7 +762,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
return;
}
- harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+ harvest_info = (struct harvest_table *)(discovery_bin + offset);
for (i = 0; i < 32; i++) {
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
@@ -979,8 +1058,8 @@ static void ip_disc_release(struct kobject *kobj)
kobj);
struct amdgpu_device *adev = ip_top->adev;
- adev->ip_top = NULL;
kfree(ip_top);
+ adev->discovery.ip_top = NULL;
}
static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
@@ -991,7 +1070,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
/* Until a uniform way is figured, get mask based on hwid */
switch (hw_id) {
case VCN_HWID:
- harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ /* VCN vs UVD+VCE */
+ if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
break;
case DMU_HWID:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
@@ -1018,7 +1099,10 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
const size_t _ip_offset, const int num_ips,
bool reg_base_64)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
int ii, jj, kk, res;
+ uint16_t hw_id;
+ uint8_t inst;
DRM_DEBUG("num_ips:%d", num_ips);
@@ -1033,9 +1117,11 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_v4 *ip;
struct ip_hw_instance *ip_hw_instance;
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
- if (amdgpu_discovery_validate_ip(ip) ||
- le16_to_cpu(ip->hw_id) != ii)
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
+ inst = ip->instance_number;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
+ hw_id != ii)
goto next_ip;
DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
@@ -1118,17 +1204,20 @@ next_ip:
static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct kset *die_kset = &adev->ip_top->die_kset;
+ struct kset *die_kset = &ip_top->die_kset;
u16 num_dies, die_offset, num_ips;
size_t ip_offset;
int ii, res;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1137,7 +1226,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
struct ip_die_entry *ip_die_entry;
die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1171,30 +1260,32 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
+ struct ip_discovery_top *ip_top;
struct kset *die_kset;
int res, ii;
- if (!adev->mman.discovery_bin)
+ if (!discovery_bin)
return -EINVAL;
- adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
- if (!adev->ip_top)
+ ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL);
+ if (!ip_top)
return -ENOMEM;
- adev->ip_top->adev = adev;
-
- res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ ip_top->adev = adev;
+ adev->discovery.ip_top = ip_top;
+ res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype,
&adev->dev->kobj, "ip_discovery");
if (res) {
DRM_ERROR("Couldn't init and add ip_discovery/");
goto Err;
}
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
kobject_set_name(&die_kset->kobj, "%s", "die");
- die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.parent = &ip_top->kobj;
die_kset->kobj.ktype = &die_kobj_ktype;
- res = kset_register(&adev->ip_top->die_kset);
+ res = kset_register(&ip_top->die_kset);
if (res) {
DRM_ERROR("Couldn't register die_kset");
goto Err;
@@ -1208,7 +1299,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
return res;
Err:
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->kobj);
return res;
}
@@ -1253,10 +1344,11 @@ static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
struct list_head *el, *tmp;
struct kset *die_kset;
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
spin_lock(&die_kset->list_lock);
list_for_each_prev_safe(el, tmp, &die_kset->list) {
list_del_init(el);
@@ -1265,8 +1357,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
spin_lock(&die_kset->list_lock);
}
spin_unlock(&die_kset->list_lock);
- kobject_put(&adev->ip_top->die_kset.kobj);
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->die_kset.kobj);
+ kobject_put(&ip_top->kobj);
}
/* ================================================== */
@@ -1277,35 +1369,39 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
+ uint8_t *discovery_bin;
struct ip_v4 *ip;
uint16_t die_offset;
uint16_t ip_offset;
uint16_t num_dies;
+ uint32_t wafl_ver;
uint16_t num_ips;
+ uint16_t hw_id;
+ uint8_t inst;
int hw_ip;
int i, j, k;
int r;
r = amdgpu_discovery_init(adev);
- if (r) {
- DRM_ERROR("amdgpu_discovery_init failed\n");
+ if (r)
return r;
- }
-
+ discovery_bin = adev->discovery.bin;
+ wafl_ver = 0;
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
adev->jpeg.inst_mask = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1319,9 +1415,11 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
- if (amdgpu_discovery_validate_ip(ip))
+ inst = ip->instance_number;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
goto next_ip;
num_base_address = ip->num_base_address;
@@ -1390,6 +1488,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->gfx.xcc_mask |=
(1U << ip->instance_number);
+ if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
+ wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
+ ip->revision, 0, 0);
+
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
@@ -1455,22 +1557,32 @@ next_ip:
}
}
+ if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
+ adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
+
return 0;
}
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
+ struct ip_discovery_header *ihdr;
+ struct binary_header *bhdr;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
+ uint16_t offset, ihdr_ver;
+ bhdr = (struct binary_header *)discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
+ ihdr = (struct ip_discovery_header *)(discovery_bin + offset);
+ ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
* so read harvest bit per IP data structure to set
* harvest configuration.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
+ ihdr_ver <= 2) {
if ((adev->pdev->device == 0x731E &&
(adev->pdev->revision == 0xC6 ||
adev->pdev->revision == 0xC7)) ||
@@ -1509,22 +1621,23 @@ union gc_info {
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union gc_info *gc_info;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[GC].offset);
if (!offset)
return 0;
- gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+ gc_info = (union gc_info *)(discovery_bin + offset);
switch (le16_to_cpu(gc_info->v1.header.version_major)) {
case 1:
@@ -1617,24 +1730,25 @@ union mall_info {
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
if (!offset)
return 0;
- mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+ mall_info = (union mall_info *)(discovery_bin + offset);
switch (le16_to_cpu(mall_info->v1.header.version_major)) {
case 1:
@@ -1673,12 +1787,13 @@ union vcn_info {
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union vcn_info *vcn_info;
u16 offset;
int v;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
@@ -1693,13 +1808,13 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
if (!offset)
return 0;
- vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+ vcn_info = (union vcn_info *)(discovery_bin + offset);
switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
case 1:
@@ -1759,6 +1874,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
struct amdgpu_gmc_memrange **ranges,
int *range_cnt, bool refresh)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct amdgpu_gmc_memrange *mem_ranges;
struct binary_header *bhdr;
union nps_info *nps_info;
@@ -1775,13 +1891,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
return r;
nps_info = &nps_data;
} else {
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
dev_err(adev->dev,
"fetch mem range failed, ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
if (!offset)
@@ -1791,8 +1907,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
return -ENOENT;
- nps_info =
- (union nps_info *)(adev->mman.discovery_bin + offset);
+ nps_info = (union nps_info *)(discovery_bin + offset);
}
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
@@ -1864,6 +1979,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -1919,6 +2035,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -1998,6 +2115,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
break;
case IP_VERSION(11, 0, 8):
@@ -2029,6 +2147,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ case IP_VERSION(14, 0, 5):
amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
break;
default:
@@ -2056,13 +2175,17 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 8):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
+ case IP_VERSION(11, 0, 8):
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
@@ -2079,6 +2202,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
@@ -2086,6 +2210,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:
@@ -2137,6 +2262,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 1, 0):
/* TODO: Fix IP version. DC code expects version 4.0.1 */
if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
@@ -2215,6 +2341,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -2270,6 +2397,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 1, 1):
case IP_VERSION(6, 1, 2):
+ case IP_VERSION(6, 1, 3):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
case IP_VERSION(7, 0, 0):
@@ -2282,6 +2410,21 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_version(adev, SDMA0_HWIP, 0));
return -EINVAL;
}
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -2393,6 +2536,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
@@ -2480,9 +2624,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
@@ -2502,9 +2653,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
@@ -2524,10 +2682,17 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
+ adev->sdma.sdma_mask = 1;
adev->vcn.num_vcn_inst = 1;
adev->gmc.num_umc = 2;
+ adev->gfx.xcc_mask = 1;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
@@ -2565,9 +2730,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
}
break;
case CHIP_VEGA20:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
@@ -2588,10 +2760,17 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
+ adev->sdma.sdma_mask = 0xff;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
@@ -2616,10 +2795,17 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
+ adev->sdma.sdma_mask = 0x1f;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
@@ -2641,10 +2827,44 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ } else {
+ cyan_skillfish_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
+ adev->gfx.xcc_mask = 1;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
+ }
+ break;
default:
r = amdgpu_discovery_reg_base_init(adev);
- if (r)
- return -EINVAL;
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
@@ -2708,6 +2928,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->family = AMDGPU_FAMILY_GC_11_5_0;
break;
case IP_VERSION(12, 0, 0):
@@ -2733,19 +2954,13 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->flags |= AMD_IS_APU;
break;
default:
break;
}
- if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
- adev->gmc.xgmi.supported = true;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
- adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
-
/* set NBIO version */
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 1, 0):
@@ -2766,11 +2981,13 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
break;
case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
adev->nbio.funcs = &nbio_v7_9_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
break;
case IP_VERSION(7, 11, 0):
case IP_VERSION(7, 11, 1):
+ case IP_VERSION(7, 11, 2):
case IP_VERSION(7, 11, 3):
adev->nbio.funcs = &nbio_v7_11_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
@@ -2898,6 +3115,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 10):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 9):
case IP_VERSION(13, 0, 10):
@@ -2907,6 +3125,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->smuio.funcs = &smuio_v13_0_funcs;
break;
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 11):
adev->smuio.funcs = &smuio_v13_0_3_funcs;
if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
adev->flags |= AMD_IS_APU;
@@ -2986,6 +3205,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
+ r = amdgpu_discovery_set_ras_ip_blocks(adev);
+ if (r)
+ return r;
+
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev)) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index b44d56465c5b..4ce04486cc31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,9 +24,21 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#include <linux/debugfs.h>
+
#define DISCOVERY_TMR_SIZE (10 << 10)
#define DISCOVERY_TMR_OFFSET (64 << 10)
+struct ip_discovery_top;
+
+struct amdgpu_discovery_info {
+ struct debugfs_blob_wrapper debugfs_blob;
+ struct ip_discovery_top *ip_top;
+ uint32_t size;
+ uint8_t *bin;
+ bool reserve_tmr;
+};
+
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 35c778426a7c..b5d34797d606 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
if (crtc->enabled)
active = true;
- pm_runtime_mark_last_busy(dev->dev);
-
adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
* take the current one
@@ -1196,13 +1194,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd);
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1297,6 +1296,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct amdgpu_framebuffer *amdgpu_fb;
@@ -1317,7 +1317,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
bo = gem_to_amdgpu_bo(obj);
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
- if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
@@ -1330,7 +1330,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
- mode_cmd, obj);
+ info, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
@@ -1363,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
+/**
+ * DOC: property for adaptive backlight modulation
+ *
+ * The 'adaptive backlight modulation' property is used for the compositor to
+ * directly control the adaptive backlight modulation power savings feature
+ * that is part of DCN hardware.
+ *
+ * The property will be attached specifically to eDP panels that support it.
+ *
+ * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings'
+ * to be able to control it.
+ * If set to 'off' the compositor will ensure it stays off.
+ * The other values 'min', 'bias min', 'bias max', and 'max' will control the
+ * intensity of the power savings.
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev)
+{
+ const struct drm_prop_enum_list props[] = {
+ { ABM_SYSFS_CONTROL, "sysfs" },
+ { ABM_LEVEL_OFF, "off" },
+ { ABM_LEVEL_MIN, "min" },
+ { ABM_LEVEL_BIAS_MIN, "bias min" },
+ { ABM_LEVEL_BIAS_MAX, "bias max" },
+ { ABM_LEVEL_MAX, "max" },
+ };
+ struct drm_property *prop;
+ int i;
+
+ if (!adev->dc_enabled)
+ return 0;
+
+ prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM,
+ "adaptive backlight modulation",
+ 6);
+ if (!prop)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ ret = drm_property_add_enum(prop, props[i].type,
+ props[i].name);
+
+ if (ret) {
+ drm_property_destroy(adev_to_drm(adev), prop);
+
+ return ret;
+ }
+ }
+
+ adev->mode_info.abm_level_property = prop;
+
+ return 0;
+}
+
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
@@ -1409,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
- return 0;
+ return amdgpu_display_setup_abm_prop(adev);
}
void amdgpu_display_update_priority(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index dfa0d642ac16..49a29bf47a37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -44,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
@@ -54,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev);
int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb);
+#define ABM_SYSFS_CONTROL -1
+#define ABM_LEVEL_OFF 0
+#define ABM_LEVEL_MIN 1
+#define ABM_LEVEL_BIAS_MIN 2
+#define ABM_LEVEL_BIAS_MAX 3
+#define ABM_LEVEL_MAX 4
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 9f627caedc3f..e22cfa7c6d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -43,6 +43,29 @@
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
+
+/**
+ * dma_buf_attach_adev - Helper to get adev of an attachment
+ *
+ * @attach: attachment
+ *
+ * Returns:
+ * A struct amdgpu_device * if the attaching device is an amdgpu device or
+ * partition, NULL otherwise.
+ */
+static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
+{
+ if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ return amdgpu_ttm_adev(bo->tbo.bdev);
+ }
+
+ return NULL;
+}
+
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
*
@@ -54,15 +77,48 @@
static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
+ struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ int r;
- if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
attach->peer2peer = false;
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
+
+ if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
+ pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
+ attach->peer2peer = false;
+
+ r = dma_resv_lock(bo->tbo.base.resv, NULL);
+ if (r)
+ return r;
+
amdgpu_vm_bo_update_shared(bo);
+ dma_resv_unlock(bo->tbo.base.resv);
+
return 0;
}
@@ -75,11 +131,35 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct dma_buf *dmabuf = attach->dmabuf;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
+ u32 domains = bo->allowed_domains;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ /* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
+ * support if all attachments can do P2P. If any attachment can't do
+ * P2P just pin into GTT instead.
+ *
+ * To avoid with conflicting pinnings between GPUs and RDMA when move
+ * notifiers are disabled, only allow pinning in VRAM when move
+ * notiers are enabled.
+ */
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+ } else {
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (!attach->peer2peer)
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+ }
+
+ if (domains & AMDGPU_GEM_DOMAIN_VRAM)
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- /* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (WARN_ON(!domains))
+ return -EINVAL;
+
+ return amdgpu_bo_pin(bo, domains);
}
/**
@@ -134,9 +214,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
-
- } else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
- return ERR_PTR(-EBUSY);
}
switch (bo->tbo.resource->mem_type) {
@@ -153,6 +230,11 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
+ /* XGMI-accessible memory should never be DMA-mapped */
+ if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible(
+ dma_buf_attach_adev(attach), bo)))
+ return ERR_PTR(-EINVAL);
+
r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
bo->tbo.base.size, attach->dev,
dir, &sgt);
@@ -184,7 +266,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- if (sgt->sgl->page_link) {
+ if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
@@ -234,6 +316,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
+static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int ret;
+
+ /*
+ * Pin to keep buffer in place while it's vmap'ed. The actual
+ * domain is not that important as long as it's mapable. Using
+ * GTT and VRAM should be compatible with most use cases.
+ */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
+ if (ret)
+ return ret;
+ ret = drm_gem_dmabuf_vmap(dma_buf, map);
+ if (ret)
+ amdgpu_bo_unpin(bo);
+
+ return ret;
+}
+
+static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ drm_gem_dmabuf_vunmap(dma_buf, map);
+ amdgpu_bo_unpin(bo);
+}
+
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
.pin = amdgpu_dma_buf_pin,
@@ -243,8 +355,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
+ .vmap = amdgpu_dma_buf_vmap,
+ .vunmap = amdgpu_dma_buf_vunmap,
};
/**
@@ -262,11 +374,23 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->tbo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(gobj, flags);
if (!IS_ERR(buf))
buf->ops = &amdgpu_dmabuf_ops;
@@ -459,7 +583,10 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct drm_gem_object *obj = &bo->tbo.base;
struct drm_gem_object *gobj;
- if (obj->import_attach) {
+ if (!adev)
+ return false;
+
+ if (drm_gem_is_imported(obj)) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
index 3f3662e8b871..3040437d99c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return readl(adev->doorbell.cpu_addr + index);
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
if (index < adev->doorbell.num_kernel_doorbells)
writel(v, adev->doorbell.cpu_addr + index);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
if (index < adev->doorbell.num_kernel_doorbells)
atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
NULL,
(void **)&adev->doorbell.cpu_addr);
if (r) {
- DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r);
+ dev_err(adev->dev,
+ "Failed to allocate kernel doorbells, err=%d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 492b09d84571..2dfbddcef9ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -51,6 +51,8 @@
#include "amdgpu_reset.h"
#include "amdgpu_sched.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
#include "../amdxcp/amdgpu_xcp_drv.h"
/*
@@ -119,9 +121,14 @@
* - 3.57.0 - Compute tunneling on GFX10+
* - 3.58.0 - Add GFX12 DCC support
* - 3.59.0 - Cleared VRAM
+ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
+ * - 3.61.0 - Contains fix for RV/PCO compute queues
+ * - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT
+ * - 3.63.0 - GFX12 display DCC supports 256B max compressed block size
+ * - 3.64.0 - Userq IP support query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 59
+#define KMS_DRIVER_MINOR 64
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -134,6 +141,11 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
+ AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
+ AMDGPU_DEBUG_SMU_POOL = BIT(7),
+ AMDGPU_DEBUG_VM_USERPTR = BIT(8),
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9),
+ AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10)
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -170,7 +182,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
-bool enforce_isolation;
+int amdgpu_enforce_isolation = -1;
+int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
* migration and restoration of backing memory when handling
@@ -231,6 +244,8 @@ int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
+int amdgpu_rebar = -1; /* auto */
+int amdgpu_user_queue = -1;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -280,7 +295,8 @@ module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
/**
* DOC: gttsize (int)
* Restrict the size of GTT domain (for userspace use) in MiB for testing.
- * The default is -1 (Use 1/2 RAM, minimum value is 3GB).
+ * The default is -1 (Use value specified by TTM).
+ * This parameter is deprecated and will be removed in the future.
*/
MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
@@ -296,7 +312,7 @@ module_param_named(moverate, amdgpu_moverate, int, 0600);
* DOC: audio (int)
* Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
*/
-MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
+MODULE_PARM_DESC(audio, "HDMI/DP Audio enable for non DC displays (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);
/**
@@ -338,22 +354,16 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* DOC: lockup_timeout (string)
* Set GPU scheduler timeout value in ms.
*
- * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
- * multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to the default timeout.
+ * The format can be [single value] for setting all timeouts at once or
+ * [GFX,Compute,SDMA,Video] to set individual timeouts.
+ * Negative values mean infinity.
*
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX.
- * The second one is for Compute. The third and fourth ones are
- * for SDMA and Video.
- *
- * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
- * jobs is 10000. The timeout for compute is 60000.
+ * By default(with no lockup_timeout settings), the timeout for all queues is 2000.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
- "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
-module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
+MODULE_PARM_DESC(lockup_timeout,
+ "GPU lockup timeout in ms (default: 2000. 0: keep default value. negative: infinity timeout), format: [single value for all] or [GFX,Compute,SDMA,Video].");
+module_param_string(lockup_timeout, amdgpu_lockup_timeout,
+ sizeof(amdgpu_lockup_timeout), 0444);
/**
* DOC: dpm (int)
@@ -399,7 +409,7 @@ module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
* the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
*/
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
-module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+module_param_named_unsafe(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
/**
* DOC: bapm (int)
@@ -457,7 +467,7 @@ module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
* Enable experimental hw support (1 = enable). The default is 0 (disabled).
*/
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
-module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+module_param_named_unsafe(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
/**
* DOC: dc (int)
@@ -568,14 +578,14 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
-module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+module_param_named_unsafe(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
* DOC: emu_mode (int)
* Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
*/
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
-module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+module_param_named_unsafe(emu_mode, amdgpu_emu_mode, int, 0444);
/**
* DOC: ras_enable (int)
@@ -608,39 +618,39 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
/**
* DOC: si_support (int)
- * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
- * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
- * otherwise using amdgpu driver.
- */
+ * 1 = enabled, 0 = disabled, -1 = default
+ *
+ * SI (Southern Islands) are first generation GCN GPUs, supported by both
+ * drivers: radeon (old) and amdgpu (new). This parameter controls whether
+ * amdgpu should support SI.
+ * By default, SI dedicated GPUs are supported by amdgpu.
+ * Only relevant when CONFIG_DRM_AMDGPU_SI is enabled to build SI support in amdgpu.
+ * See also radeon.si_support which should be disabled when amdgpu.si_support is
+ * enabled, and vice versa.
+ */
+int amdgpu_si_support = -1;
#ifdef CONFIG_DRM_AMDGPU_SI
-
-#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_si_support;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
-#else
-int amdgpu_si_support = 1;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
-#endif
-
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif
/**
* DOC: cik_support (int)
- * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
- * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
- * otherwise using amdgpu driver.
- */
+ * 1 = enabled, 0 = disabled, -1 = default
+ *
+ * CIK (Sea Islands) are second generation GCN GPUs, supported by both
+ * drivers: radeon (old) and amdgpu (new). This parameter controls whether
+ * amdgpu should support CIK.
+ * By default:
+ * - CIK dedicated GPUs are supported by amdgpu.
+ * - CIK APUs are supported by radeon (except when radeon is not built).
+ * Only relevant when CONFIG_DRM_AMDGPU_CIK is enabled to build CIK support in amdgpu.
+ * See also radeon.cik_support which should be disabled when amdgpu.cik_support is
+ * enabled, and vice versa.
+ */
+int amdgpu_cik_support = -1;
#ifdef CONFIG_DRM_AMDGPU_CIK
-
-#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_cik_support;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
-#else
-int amdgpu_cik_support = 1;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
-#endif
-
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
@@ -730,7 +740,7 @@ module_param_named(noretry, amdgpu_noretry, int, 0644);
*/
MODULE_PARM_DESC(force_asic_type,
"A non negative value used to specify the asic type for all supported GPUs");
-module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+module_param_named_unsafe(force_asic_type, amdgpu_force_asic_type, int, 0444);
/**
* DOC: use_xgmi_p2p (int)
@@ -749,7 +759,7 @@ module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
* assigns queues to HQDs.
*/
int sched_policy = KFD_SCHED_POLICY_HWS;
-module_param(sched_policy, int, 0444);
+module_param_unsafe(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
@@ -799,7 +809,7 @@ MODULE_PARM_DESC(send_sigterm,
* Setting 1 enables halt on hang.
*/
int halt_if_hws_hang;
-module_param(halt_if_hws_hang, int, 0644);
+module_param_unsafe(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
/**
@@ -808,7 +818,7 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
* check says. Default value: false (rely on MEC2 firmware version check).
*/
bool hws_gws_support;
-module_param(hws_gws_support, bool, 0444);
+module_param_unsafe(hws_gws_support, bool, 0444);
MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
@@ -841,7 +851,7 @@ MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = defa
*/
int amdgpu_no_queue_eviction_on_vm_fault;
MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
-module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
+module_param_named_unsafe(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
#endif
/**
@@ -849,7 +859,7 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm
*/
int amdgpu_mtype_local;
MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
-module_param_named(mtype_local, amdgpu_mtype_local, int, 0444);
+module_param_named_unsafe(mtype_local, amdgpu_mtype_local, int, 0444);
/**
* DOC: pcie_p2p (bool)
@@ -871,7 +881,7 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
* DOC: dcdebugmask (uint)
- * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
*/
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
@@ -945,7 +955,7 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
*/
MODULE_PARM_DESC(
freesync_video,
- "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+ "Adds additional modes via VRR for refresh changes without a full modeset (0 = off (default), 1 = on)");
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
/**
@@ -953,7 +963,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
-module_param_named(reset_method, amdgpu_reset_method, int, 0644);
+module_param_named_unsafe(reset_method, amdgpu_reset_method, int, 0644);
/**
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
@@ -961,7 +971,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0644);
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
-MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)");
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = threshold determined by a formula, 0 < threshold < max records, user-defined threshold)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
@@ -1025,11 +1035,20 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
/**
- * DOC: enforce_isolation (bool)
- * enforce process isolation between graphics and compute via using the same reserved vmid.
+ * DOC: enforce_isolation (int)
+ * enforce process isolation between graphics and compute.
+ * (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)
*/
-module_param(enforce_isolation, bool, 0444);
-MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+module_param_named(enforce_isolation, amdgpu_enforce_isolation, int, 0444);
+MODULE_PARM_DESC(enforce_isolation,
+"enforce process isolation between graphics and compute. (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)");
+
+/**
+ * DOC: modeset (int)
+ * Override nomodeset (1 = override, -1 = auto). The default is -1 (auto).
+ */
+MODULE_PARM_DESC(modeset, "Override nomodeset (1 = enable, -1 = auto)");
+module_param_named(modeset, amdgpu_modeset, int, 0444);
/**
* DOC: seamless (int)
@@ -1047,9 +1066,14 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
* limits the VRAM size reported to ROCm applications to the visible
* size, usually 256MB.
* - 0x4: Disable GPU soft recovery, always do a full reset
+ * - 0x8: Use VRAM for firmware loading
+ * - 0x10: Enable ACA based RAS logging
+ * - 0x20: Enable experimental resets
+ * - 0x40: Disable ring resets
+ * - 0x80: Use VRAM for SMU pool
*/
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
-module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
+module_param_named_unsafe(debug_mask, amdgpu_debug_mask, uint, 0444);
/**
* DOC: agp (int)
@@ -1076,6 +1100,28 @@ MODULE_PARM_DESC(wbrf,
"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+/**
+ * DOC: rebar (int)
+ * Allow BAR resizing. Disable this to prevent the driver from attempting
+ * to resize the BAR if the GPU supports it and there is available MMIO space.
+ * Note that this just prevents the driver from resizing the BAR. The BIOS
+ * may have already resized the BAR at boot time.
+ */
+MODULE_PARM_DESC(rebar, "Resizable BAR (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(rebar, amdgpu_rebar, int, 0444);
+
+/**
+ * DOC: user_queue (int)
+ * Enable user queues on systems that support user queues. Possible values:
+ *
+ * - -1 = auto (ASIC specific default)
+ * - 0 = user queues disabled
+ * - 1 = user queues enabled and kernel queues enabled (if supported)
+ * - 2 = user queues enabled and kernel queues disabled
+ */
+MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
+module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -1789,7 +1835,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
};
static const struct pci_device_id pciidlist[] = {
-#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1862,8 +1907,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
@@ -1946,7 +1989,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
-#endif
/* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -2125,6 +2167,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
/* CYAN_SKILLFISH */
+ {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
@@ -2181,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
adev->pdev->bus->number, i);
if (p) {
pm_runtime_get_sync(&p->dev);
- pm_runtime_mark_last_busy(&p->dev);
pm_runtime_put_autosuspend(&p->dev);
pci_dev_put(p);
}
@@ -2219,6 +2265,29 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: enable experimental reset features\n");
adev->debug_exp_resets = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_RING_RESET) {
+ pr_info("debug: ring reset disabled\n");
+ adev->debug_disable_gpu_ring_reset = true;
+ }
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_SMU_POOL) {
+ pr_info("debug: use vram for smu pool\n");
+ adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM;
+ }
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_VM_USERPTR) {
+ pr_info("debug: VM mode debug for userptr is enabled\n");
+ adev->debug_vm_userptr = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) {
+ pr_info("debug: disable kernel logs of correctable errors\n");
+ adev->debug_disable_ce_logs = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) {
+ pr_info("debug: allowing command submission to CE engine\n");
+ adev->debug_enable_ce_cs = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2237,6 +2306,72 @@ static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long fl
return flags;
}
+static bool amdgpu_support_enabled(struct device *dev,
+ const enum amd_asic_type family)
+{
+ const char *gen;
+ const char *param;
+ int module_param = -1;
+ bool radeon_support_built = IS_ENABLED(CONFIG_DRM_RADEON);
+ bool amdgpu_support_built = false;
+ bool support_by_default = false;
+
+ switch (family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ gen = "SI";
+ param = "si_support";
+ module_param = amdgpu_si_support;
+ amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_SI);
+ support_by_default = true;
+ break;
+
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ support_by_default = true;
+ fallthrough;
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ gen = "CIK";
+ param = "cik_support";
+ module_param = amdgpu_cik_support;
+ amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_CIK);
+ break;
+
+ default:
+ /* All other chips are supported by amdgpu only */
+ return true;
+ }
+
+ if (!amdgpu_support_built) {
+ dev_info(dev, "amdgpu built without %s support\n", gen);
+ return false;
+ }
+
+ if ((module_param == -1 && (support_by_default || !radeon_support_built)) ||
+ module_param == 1) {
+ if (radeon_support_built)
+ dev_info(dev, "%s support provided by amdgpu.\n"
+ "Use radeon.%s=1 amdgpu.%s=0 to override.\n",
+ gen, param, param);
+
+ return true;
+ }
+
+ if (radeon_support_built)
+ dev_info(dev, "%s support provided by radeon.\n"
+ "Use radeon.%s=0 amdgpu.%s=1 to override.\n",
+ gen, param, param);
+ else if (module_param == 0)
+ dev_info(dev, "%s support disabled by module param\n", gen);
+
+ return false;
+}
+
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2246,6 +2381,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
int ret, retry = 0, i;
bool supports_atomic = false;
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ if (drm_firmware_drivers_only() && amdgpu_modeset == -1)
+ return -EINVAL;
+ }
+
/* skip devices which are owned by radeon */
for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
if (amdgpu_unsupported_pciidlist[i] == pdev->device)
@@ -2256,7 +2397,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
amdgpu_aspm = 0;
if (amdgpu_virtual_display ||
- amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
+ amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK))
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
@@ -2278,40 +2419,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENOTSUPP;
}
-#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- dev_info(&pdev->dev,
- "SI support provided by radeon.\n");
- dev_info(&pdev->dev,
- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dev_info(&pdev->dev,
- "CIK support provided by radeon.\n");
- dev_info(&pdev->dev,
- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
+ if (!amdgpu_support_enabled(&pdev->dev, flags & AMD_ASIC_MASK))
+ return -ENODEV;
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
@@ -2378,10 +2487,10 @@ retry_init:
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_px(ddev))
+ if (amdgpu_device_supports_px(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/* we want direct complete for BOCO */
- if (amdgpu_device_supports_boco(ddev))
+ if (amdgpu_device_supports_boco(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_MAY_SKIP_RESUME);
@@ -2390,7 +2499,6 @@ retry_init:
pm_runtime_allow(ddev->dev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
pci_wake_from_d3(pdev, TRUE);
@@ -2414,9 +2522,9 @@ retry_init:
* into D0 state. Then there will be a PMFW-aware D-state
* transition(D0->D3) on runpm suspend.
*/
- if (amdgpu_device_supports_baco(ddev) &&
+ if (amdgpu_device_supports_baco(adev) &&
!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type >= CHIP_NAVI10))
+ adev->asic_type >= CHIP_NAVI10)
amdgpu_get_secondary_funcs(adev);
}
@@ -2433,6 +2541,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ amdgpu_ras_eeprom_check_and_recover(adev);
amdgpu_xcp_dev_unplug(adev);
amdgpu_gmc_prepare_nps_mode_change(adev);
drm_dev_unplug(dev);
@@ -2462,6 +2571,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
if (amdgpu_ras_intr_triggered())
return;
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -2469,7 +2582,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
- amdgpu_device_ip_suspend(adev);
+ amdgpu_device_prepare(dev);
+ amdgpu_device_suspend(dev, true);
adev->mp1_state = PP_MP1_STATE_NONE;
}
@@ -2478,11 +2592,14 @@ static int amdgpu_pmops_prepare(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
+
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
- if (amdgpu_device_supports_boco(drm_dev) &&
- pm_runtime_suspended(dev))
+ if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev))
return 1;
/* if we will not support s3 or s2i for the device
@@ -2497,7 +2614,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
static void amdgpu_pmops_complete(struct device *dev)
{
- /* nothing to do */
+ amdgpu_device_complete(dev_get_drvdata(dev));
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -2509,8 +2626,24 @@ static int amdgpu_pmops_suspend(struct device *dev)
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
- if (!adev->in_s0ix && !adev->in_s3)
+ if (!adev->in_s0ix && !adev->in_s3) {
+#if IS_ENABLED(CONFIG_SUSPEND)
+ /* don't allow going deep first time followed by s2idle the next time */
+ if (adev->last_suspend_state != PM_SUSPEND_ON &&
+ adev->last_suspend_state != pm_suspend_target_state) {
+ drm_err_once(drm_dev, "Unsupported suspend state %d\n",
+ pm_suspend_target_state);
+ return -EINVAL;
+ }
+#endif
return 0;
+ }
+
+#if IS_ENABLED(CONFIG_SUSPEND)
+ /* cache the state last used for suspend */
+ adev->last_suspend_state = pm_suspend_target_state;
+#endif
+
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2518,9 +2651,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- if (amdgpu_acpi_should_gpu_reset(adev))
- return amdgpu_asic_reset(adev);
+ if (amdgpu_acpi_should_gpu_reset(adev)) {
+ amdgpu_device_lock_reset_domain(adev->reset_domain);
+ r = amdgpu_asic_reset(adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ return r;
+ }
return 0;
}
@@ -2553,7 +2691,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
int r;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s4 = false;
if (r)
return r;
@@ -2566,12 +2703,21 @@ static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ /* do not resume device if it's normal hibernation */
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
+ return 0;
+
return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2650,6 +2796,15 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
return 0;
}
+static int amdgpu_runtime_idle_check_userq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY;
+}
+
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2665,6 +2820,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
ret = amdgpu_runtime_idle_check_display(dev);
if (ret)
return ret;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+ if (ret)
+ return ret;
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -2718,7 +2876,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
/* nothing to do */
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_enter(drm_dev);
+ amdgpu_device_baco_enter(adev);
}
dev_dbg(&pdev->dev, "asic/device is runtime suspended\n");
@@ -2759,7 +2917,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_exit(drm_dev);
+ amdgpu_device_baco_exit(adev);
}
ret = amdgpu_device_resume(drm_dev, false);
if (ret) {
@@ -2786,12 +2944,32 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
}
ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ goto done;
- pm_runtime_mark_last_busy(dev);
+ ret = amdgpu_runtime_idle_check_userq(dev);
+done:
pm_runtime_autosuspend(dev);
return ret;
}
+static int amdgpu_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct drm_device *dev = file_priv->minor->dev;
+ int idx;
+
+ if (fpriv && drm_dev_enter(dev, &idx)) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ drm_dev_exit(idx);
+ }
+
+ return drm_release(inode, filp);
+}
+
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
@@ -2806,22 +2984,21 @@ long amdgpu_drm_ioctl(struct file *filp,
ret = drm_ioctl(filp, cmd, arg);
- pm_runtime_mark_last_busy(dev->dev);
out:
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .prepare = amdgpu_pmops_prepare,
- .complete = amdgpu_pmops_complete,
- .suspend = amdgpu_pmops_suspend,
- .suspend_noirq = amdgpu_pmops_suspend_noirq,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -2843,7 +3020,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.flush = amdgpu_flush,
- .release = drm_release,
+ .release = amdgpu_drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
@@ -2890,6 +3067,10 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -2963,7 +3144,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
.err_handler = &amdgpu_pci_err_handler,
.dev_groups = amdgpu_sysfs_groups,
};
@@ -2972,14 +3153,11 @@ static int __init amdgpu_init(void)
{
int r;
- if (drm_firmware_drivers_only())
- return -EINVAL;
-
r = amdgpu_sync_init();
if (r)
goto error_sync;
- r = amdgpu_fence_slab_init();
+ r = amdgpu_userq_fence_slab_init();
if (r)
goto error_fence;
@@ -3013,7 +3191,7 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_acpi_release();
amdgpu_sync_fini();
- amdgpu_fence_slab_fini();
+ amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
new file mode 100644
index 000000000000..23d7d0b0d625
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/sched.h>
+#include <drm/drm_exec.h>
+#include "amdgpu.h"
+
+#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
+#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
+
+static const char *
+amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu_eviction_fence";
+}
+
+static const char *
+amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence *ef;
+
+ ef = container_of(f, struct amdgpu_eviction_fence, base);
+ return ef->timeline_name;
+}
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec)
+{
+ struct amdgpu_eviction_fence *old_ef, *new_ef;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ if (evf_mgr->ev_fence &&
+ !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
+ return 0;
+ /*
+ * Steps to replace eviction fence:
+ * * lock all objects in exec (caller)
+ * * create a new eviction fence
+ * * update new eviction fence in evf_mgr
+ * * attach the new eviction fence to BOs
+ * * release the old fence
+ * * unlock the objects (caller)
+ */
+ new_ef = amdgpu_eviction_fence_create(evf_mgr);
+ if (!new_ef) {
+ DRM_ERROR("Failed to create new eviction fence\n");
+ return -ENOMEM;
+ }
+
+ /* Update the eviction fence now */
+ spin_lock(&evf_mgr->ev_fence_lock);
+ old_ef = evf_mgr->ev_fence;
+ evf_mgr->ev_fence = new_ef;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ /* Attach the new fence */
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (!bo)
+ continue;
+ ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
+ if (ret) {
+ DRM_ERROR("Failed to attch new eviction fence\n");
+ goto free_err;
+ }
+ }
+
+ /* Free old fence */
+ if (old_ef)
+ dma_fence_put(&old_ef->base);
+ return 0;
+
+free_err:
+ kfree(new_ef);
+ return ret;
+}
+
+static void
+amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
+ struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_fence_get(&ev_fence->base);
+ else
+ goto unlock;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ amdgpu_userq_evict(uq_mgr, ev_fence);
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ dma_fence_put(&ev_fence->base);
+ return;
+
+unlock:
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ if (!f)
+ return true;
+
+ ev_fence = to_ev_fence(f);
+ evf_mgr = ev_fence->evf_mgr;
+
+ schedule_delayed_work(&evf_mgr->suspend_work, 0);
+ return true;
+}
+
+static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
+ .get_driver_name = amdgpu_eviction_fence_get_driver_name,
+ .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
+};
+
+void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ spin_lock(&evf_mgr->ev_fence_lock);
+ dma_fence_signal(&ev_fence->base);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+}
+
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
+ if (!ev_fence)
+ return NULL;
+
+ ev_fence->evf_mgr = evf_mgr;
+ get_task_comm(ev_fence->timeline_name, current);
+ spin_lock_init(&ev_fence->lock);
+ dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
+ return ev_fence;
+}
+
+void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ /* Wait for any pending work to execute */
+ flush_delayed_work(&evf_mgr->suspend_work);
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ if (!ev_fence)
+ return;
+
+ dma_fence_wait(&ev_fence->base, false);
+
+ /* Last unref of ev_fence */
+ dma_fence_put(&ev_fence->base);
+}
+
+int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ int ret;
+
+ if (!resv)
+ return 0;
+
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to resv fence space\n");
+ return ret;
+ }
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ return 0;
+}
+
+void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct dma_fence *stub = dma_fence_get_stub();
+
+ dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx,
+ stub, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(stub);
+}
+
+int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ /* This needs to be done one time per open */
+ atomic_set(&evf_mgr->ev_fence_seq, 0);
+ evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
+ spin_lock_init(&evf_mgr->ev_fence_lock);
+
+ INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
new file mode 100644
index 000000000000..fcd867b7147d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_EV_FENCE_H_
+#define AMDGPU_EV_FENCE_H_
+
+struct amdgpu_eviction_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+};
+
+struct amdgpu_eviction_fence_mgr {
+ u64 ev_fence_ctx;
+ atomic_t ev_fence_seq;
+ spinlock_t ev_fence_lock;
+ struct amdgpu_eviction_fence *ev_fence;
+ struct delayed_work suspend_work;
+ uint8_t fd_closing;
+};
+
+/* Eviction fence helper functions */
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+int
+amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+void
+amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+int
+amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 91d638098889..b349bb3676d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -70,6 +70,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_GWS] = "gws",
[AMDGPU_PL_OA] = "oa",
[AMDGPU_PL_DOORBELL] = "doorbell",
+ [AMDGPU_PL_MMIO_REMAP] = "mmioremap",
};
unsigned int hw_ip, i;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 2f24a6aa13bf..c7843e336310 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -42,50 +42,14 @@
#include "amdgpu_reset.h"
/*
- * Fences mark an event in the GPUs pipeline and are used
- * for GPU/CPU synchronization. When the fence is written,
- * it is expected that all buffers associated with that fence
- * are no longer in use by the associated ring on the GPU and
- * that the relevant GPU caches have been flushed.
- */
-
-struct amdgpu_fence {
- struct dma_fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
- ktime_t start_timestamp;
-};
-
-static struct kmem_cache *amdgpu_fence_slab;
-
-int amdgpu_fence_slab_init(void)
-{
- amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- return 0;
-}
-
-void amdgpu_fence_slab_fini(void)
-{
- rcu_barrier();
- kmem_cache_destroy(amdgpu_fence_slab);
-}
-/*
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
-static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops ||
- __f->base.ops == &amdgpu_job_fence_ops)
- return __f;
-
- return NULL;
+ return __f;
}
/**
@@ -129,57 +93,32 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
- * @f: resulting fence object
- * @job: job the fence is embedded in
+ * @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
- struct amdgpu_fence *am_fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
- if (job == NULL) {
- /* create a sperate hw fence */
- am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
- if (am_fence == NULL)
- return -ENOMEM;
- fence = &am_fence->base;
- am_fence->ring = ring;
- } else {
- /* take use of job-embedded fence */
- fence = &job->hw_fence;
- }
+ fence = &af->base;
+ af->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- if (job && job->job_run_counter) {
- /* reinit seq for resubmitted jobs */
- fence->seqno = seq;
- /* TO be inline with external fence creation and other drivers */
- dma_fence_get(fence);
- } else {
- if (job) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -204,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
*/
rcu_assign_pointer(*ptr, dma_fence_get(fence));
- *f = fence;
-
return 0;
}
@@ -280,7 +217,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
- if (del_timer(&ring->fence_drv.fallback_timer) &&
+ if (timer_delete(&ring->fence_drv.fallback_timer) &&
seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
@@ -292,6 +229,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
do {
struct dma_fence *fence, **ptr;
+ struct amdgpu_fence *am_fence;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -304,9 +242,14 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
+ /* Save the wptr in the fence driver so we know what the last processed
+ * wptr was. This is required for re-emitting the ring state for
+ * queues that are reset but are not guilty and thus have no guilty fence.
+ */
+ am_fence = container_of(fence, struct amdgpu_fence, base);
+ drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
@@ -322,11 +265,13 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
*/
static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = from_timer(ring, t,
- fence_drv.fallback_timer);
+ struct amdgpu_ring *ring = timer_container_of(ring, t,
+ fence_drv.fallback_timer);
if (amdgpu_fence_process(ring))
- DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+ dev_warn(ring->adev->dev,
+ "Fence fallback timer expired on ring %s\n",
+ ring->name);
}
/**
@@ -618,7 +563,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- del_timer_sync(&ring->fence_drv.fallback_timer);
+ timer_delete_sync(&ring->fence_drv.fallback_timer);
}
}
@@ -698,36 +643,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
- *
- * @ring: fence of the ring to be cleared
- *
- */
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
-{
- int i;
- struct dma_fence *old, **ptr;
-
- for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
- ptr = &ring->fence_drv.fences[i];
- old = rcu_dereference_protected(*ptr, 1);
- if (old && old->ops == &amdgpu_job_fence_ops) {
- struct amdgpu_job *job;
-
- /* For non-scheduler bad job, i.e. failed ib test, we need to signal
- * it right here or we won't be able to track them in fence_drv
- * and they will remain unsignaled during sa_bo free.
- */
- job = container_of(old, struct amdgpu_job, hw_fence);
- if (!job->base.s_fence && !dma_fence_is_signaled(old))
- dma_fence_signal(old);
- RCU_INIT_POINTER(*ptr, NULL);
- dma_fence_put(old);
- }
- }
-}
-
-/**
* amdgpu_fence_driver_set_error - set error code on fences
* @ring: the ring which contains the fences
* @error: the error code to set
@@ -764,6 +679,120 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
}
+
+/*
+ * Kernel queue reset handling
+ *
+ * The driver can reset individual queues for most engines, but those queues
+ * may contain work from multiple contexts. Resetting the queue will reset
+ * lose all of that state. In order to minimize the collateral damage, the
+ * driver will save the ring contents which are not associated with the guilty
+ * context prior to resetting the queue. After resetting the queue the queue
+ * contents from the other contexts is re-emitted to the rings so that it can
+ * be processed by the engine. To handle this, we save the queue's write
+ * pointer (wptr) in the fences associated with each context. If we get a
+ * queue timeout, we can then use the wptrs from the fences to determine
+ * which data needs to be saved out of the queue's ring buffer.
+ */
+
+/**
+ * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
+ *
+ * @af: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ struct amdgpu_ring *ring = af->ring;
+ unsigned long flags;
+ u32 seq, last_seq;
+
+ last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
+ seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
+
+ /* mark all fences from the guilty context with an error */
+ spin_lock_irqsave(&ring->fence_drv.lock, flags);
+ do {
+ last_seq++;
+ last_seq &= ring->fence_drv.num_fences_mask;
+
+ ptr = &ring->fence_drv.fences[last_seq];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ if (fence == af)
+ dma_fence_set_error(&fence->base, -ETIME);
+ else if (fence->context == af->context)
+ dma_fence_set_error(&fence->base, -ECANCELED);
+ }
+ rcu_read_unlock();
+ } while (last_seq != seq);
+ spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
+ /* signal the guilty fence */
+ amdgpu_fence_write(ring, (u32)af->base.seqno);
+ amdgpu_fence_process(ring);
+}
+
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
+{
+ af->wptr = af->ring->wptr;
+}
+
+static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
+ u64 start_wptr, u32 end_wptr)
+{
+ unsigned int first_idx = start_wptr & ring->buf_mask;
+ unsigned int last_idx = end_wptr & ring->buf_mask;
+ unsigned int i;
+
+ /* Backup the contents of the ring buffer. */
+ for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
+ ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
+}
+
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ u64 wptr;
+ u32 seq, last_seq;
+
+ last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
+ seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
+ wptr = ring->fence_drv.signalled_wptr;
+ ring->ring_backup_entries_to_copy = 0;
+
+ do {
+ last_seq++;
+ last_seq &= ring->fence_drv.num_fences_mask;
+
+ ptr = &ring->fence_drv.fences[last_seq];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ /* save everything if the ring is not guilty, otherwise
+ * just save the content from other contexts.
+ */
+ if (!guilty_fence || (fence->context != guilty_fence->context))
+ amdgpu_ring_backup_unprocessed_command(ring, wptr,
+ fence->wptr);
+ wptr = fence->wptr;
+ }
+ rcu_read_unlock();
+ } while (last_seq != seq);
+}
+
/*
* Common fence implementation
*/
@@ -778,13 +807,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
return (const char *)to_amdgpu_fence(f)->ring->name;
}
-static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
-
- return (const char *)to_amdgpu_ring(job->base.sched)->name;
-}
-
/**
* amdgpu_fence_enable_signaling - enable signalling on fence
* @f: fence
@@ -802,23 +824,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
}
/**
- * amdgpu_job_fence_enable_signaling - enable signalling on job fence
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_enable_signaling above, it
- * only handles the job embedded fence.
- */
-static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
-
- if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
-
- return true;
-}
-
-/**
* amdgpu_fence_free - free up the fence memory
*
* @rcu: RCU callback head
@@ -830,22 +835,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free fence_slab if it's separated fence*/
- kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
-}
-
-/**
- * amdgpu_job_fence_free - free up the job with embedded fence
- *
- * @rcu: RCU callback head
- *
- * Free up the job with embedded fence after the RCU grace period.
- */
-static void amdgpu_job_fence_free(struct rcu_head *rcu)
-{
- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
-
- /* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence));
+ kfree(to_amdgpu_fence(f));
}
/**
@@ -861,19 +851,6 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
-/**
- * amdgpu_job_fence_release - callback that job embedded fence can be freed
- *
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_release above, it
- * only handles the job embedded fence.
- */
-static void amdgpu_job_fence_release(struct dma_fence *f)
-{
- call_rcu(&f->rcu, amdgpu_job_fence_free);
-}
-
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -881,13 +858,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
-static const struct dma_fence_ops amdgpu_job_fence_ops = {
- .get_driver_name = amdgpu_fence_get_driver_name,
- .get_timeline_name = amdgpu_job_fence_get_timeline_name,
- .enable_signaling = amdgpu_job_fence_enable_signaling,
- .release = amdgpu_job_fence_release,
-};
-
/*
* Fence debugfs
*/
@@ -957,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val)
*val = atomic_read(&adev->reset_domain->reset_res);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 09c9194d5bd5..b0082aa7f3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -31,6 +31,7 @@
#define FRU_EEPROM_MADDR_6 0x60000
#define FRU_EEPROM_MADDR_8 0x80000
+#define FRU_EEPROM_MADDR_INV 0xFFFFF
static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
{
@@ -63,10 +64,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
switch (adev->asic_type) {
case CHIP_VEGA20:
/* D161 and D163 are the VG20 server SKUs */
- if (strnstr(atom_ctx->vbios_pn, "D161",
- sizeof(atom_ctx->vbios_pn)) ||
- strnstr(atom_ctx->vbios_pn, "D163",
- sizeof(atom_ctx->vbios_pn))) {
+ if (atom_ctx && (strnstr(atom_ctx->vbios_pn, "D161",
+ sizeof(atom_ctx->vbios_pn)) ||
+ strnstr(atom_ctx->vbios_pn, "D163",
+ sizeof(atom_ctx->vbios_pn)))) {
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
@@ -78,8 +79,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
return false;
}
case IP_VERSION(11, 0, 7):
- if (strnstr(atom_ctx->vbios_pn, "D603",
- sizeof(atom_ctx->vbios_pn))) {
+ if (atom_ctx && strnstr(atom_ctx->vbios_pn, "D603",
+ sizeof(atom_ctx->vbios_pn))) {
if (strnstr(atom_ctx->vbios_pn, "D603GLXE",
sizeof(atom_ctx->vbios_pn))) {
return false;
@@ -94,8 +95,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
}
case IP_VERSION(13, 0, 2):
/* All Aldebaran SKUs have an FRU */
- if (!strnstr(atom_ctx->vbios_pn, "D673",
- sizeof(atom_ctx->vbios_pn)))
+ if (atom_ctx && !strnstr(atom_ctx->vbios_pn, "D673",
+ sizeof(atom_ctx->vbios_pn)))
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
@@ -104,6 +105,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_8;
return true;
+ case IP_VERSION(13, 0, 12):
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_INV;
+ return true;
default:
return false;
}
@@ -120,6 +125,10 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (!is_fru_eeprom_supported(adev, &fru_addr))
return 0;
+ /* FRU data avaialble, but no direct EEPROM access */
+ if (fru_addr == FRU_EEPROM_MADDR_INV)
+ return 0;
+
if (!adev->fru_info) {
adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
if (!adev->fru_info)
@@ -135,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
- DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ dev_warn(adev->dev,
+ "Cannot access FRU, EEPROM accessor not initialized");
return -ENODEV;
}
@@ -143,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
sizeof(buf));
if (len != 8) {
- DRM_ERROR("Couldn't read the IPMI Common Header: %d", len);
+ dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d",
+ len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x",
+ buf[0]);
return -EIO;
}
for (csum = 0; len > 0; len--)
csum += buf[len - 1];
if (csum) {
- DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x",
+ csum);
return -EIO;
}
@@ -170,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* Read the header of the PIA. */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
if (len != 3) {
- DRM_ERROR("Couldn't read the Product Info Area header: %d", len);
+ dev_err(adev->dev,
+ "Couldn't read the Product Info Area header: %d", len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x",
+ buf[0]);
return -EIO;
}
@@ -188,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
if (len != size) {
kfree(pia);
- DRM_ERROR("Couldn't read the Product Info Area: %d", len);
+ dev_err(adev->dev, "Couldn't read the Product Info Area: %d",
+ len);
return len < 0 ? len : -EIO;
}
for (csum = 0; size > 0; size--)
csum += pia[size - 1];
if (csum) {
- DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x",
+ csum);
kfree(pia);
return -EIO;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2d4b67175b55..328a1b963548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index b2033f8352f5..d2237ce9da70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -302,7 +302,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
- unsigned p;
int i, j;
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
@@ -316,8 +315,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- for (i = 0; i < pages; i++, p++) {
+ for (i = 0; i < pages; i++) {
page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
@@ -370,6 +368,42 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
}
/**
+ * amdgpu_gart_map_vram_range - map VRAM pages into the GART page table
+ *
+ * @adev: amdgpu_device pointer
+ * @pa: physical address of the first page to be mapped
+ * @start_page: first page to map in the GART aperture
+ * @num_pages: number of pages to be mapped
+ * @flags: page table entry flags
+ * @dst: CPU address of the GART table
+ *
+ * Binds a BO that is allocated in VRAM to the GART page table
+ * (all ASICs).
+ *
+ * Useful when a kernel BO is located in VRAM but
+ * needs to be accessed from the GART address space.
+ */
+void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+ uint64_t start_page, uint64_t num_pages,
+ uint64_t flags, void *dst)
+{
+ u32 i, idx;
+
+ /* The SYSTEM flag indicates the pages aren't in VRAM. */
+ WARN_ON_ONCE(flags & AMDGPU_PTE_SYSTEM);
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
+
+ for (i = 0; i < num_pages; ++i) {
+ amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags);
+ }
+
+ drm_dev_exit(idx);
+}
+
+/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 7cc980bf4725..d3118275ddae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -64,5 +64,8 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
void *dst);
void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+ uint64_t start_page, uint64_t num_pages,
+ uint64_t flags, void *dst);
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 103513b1d23f..3e38c5db2987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,6 +36,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -44,6 +45,114 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_vm.h"
+static int
+amdgpu_gem_add_input_fence(struct drm_file *filp,
+ uint64_t syncobj_handles_array,
+ uint32_t num_syncobj_handles)
+{
+ struct dma_fence *fence;
+ uint32_t *syncobj_handles;
+ int ret, i;
+
+ if (!num_syncobj_handles)
+ return 0;
+
+ syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
+ size_mul(sizeof(uint32_t), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ for (i = 0; i < num_syncobj_handles; i++) {
+
+ if (!syncobj_handles[i]) {
+ ret = -EINVAL;
+ goto free_memdup;
+ }
+
+ ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence);
+ if (ret)
+ goto free_memdup;
+
+ dma_fence_wait(fence, false);
+
+ /* TODO: optimize async handling */
+ dma_fence_put(fence);
+ }
+
+free_memdup:
+ kfree(syncobj_handles);
+ return ret;
+}
+
+static int
+amdgpu_gem_update_timeline_node(struct drm_file *filp,
+ uint32_t syncobj_handle,
+ uint64_t point,
+ struct drm_syncobj **syncobj,
+ struct dma_fence_chain **chain)
+{
+ if (!syncobj_handle)
+ return 0;
+
+ /* Find the sync object */
+ *syncobj = drm_syncobj_find(filp, syncobj_handle);
+ if (!*syncobj)
+ return -ENOENT;
+
+ if (!point)
+ return 0;
+
+ /* Allocate the chain node */
+ *chain = dma_fence_chain_alloc();
+ if (!*chain) {
+ drm_syncobj_put(*syncobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+amdgpu_gem_update_bo_mapping(struct drm_file *filp,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation,
+ uint64_t point,
+ struct dma_fence *fence,
+ struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain)
+{
+ struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct dma_fence *last_update;
+
+ if (!syncobj)
+ return;
+
+ /* Find the last update fence */
+ switch (operation) {
+ case AMDGPU_VA_OP_MAP:
+ case AMDGPU_VA_OP_REPLACE:
+ if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv))
+ last_update = vm->last_update;
+ else
+ last_update = bo_va->last_pt_update;
+ break;
+ case AMDGPU_VA_OP_UNMAP:
+ case AMDGPU_VA_OP_CLEAR:
+ last_update = fence;
+ break;
+ default:
+ return;
+ }
+
+ /* Add fence to timeline */
+ if (!point)
+ drm_syncobj_replace_fence(syncobj, last_update);
+ else
+ drm_syncobj_add_point(syncobj, chain, last_update, point);
+}
+
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -88,10 +197,8 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
- if (aobj) {
- amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
- }
+ amdgpu_hmm_unregister(aobj);
+ ttm_bo_fini(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
@@ -186,6 +293,15 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
else
++bo_va->ref_count;
+
+ /* attach gfx eviction fence */
+ r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo);
+ if (r) {
+ DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n");
+ amdgpu_bo_unreserve(abo);
+ return r;
+ }
+
amdgpu_bo_unreserve(abo);
/* Validate and add eviction fence to DMABuf imports with dynamic
@@ -201,7 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/
if (!vm->is_compute_context || !vm->process_info)
return 0;
- if (!obj->import_attach ||
+ if (!drm_gem_is_imported(obj) ||
!dma_buf_is_dynamic(obj->import_attach->dmabuf))
return 0;
mutex_lock_nested(&vm->process_info->lock, 1);
@@ -213,7 +329,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
if (ti) {
- dev_warn(adev->dev, "pid %d\n", ti->pid);
+ dev_warn(adev->dev, "pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
@@ -249,6 +365,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
}
+ if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+ amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo);
+
bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
@@ -323,20 +442,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle, initial_domain;
int r;
- /* reject DOORBELLs until userspace code to use it is available */
- if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
- return -EINVAL;
-
/* reject invalid gem flags */
- if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
- AMDGPU_GEM_CREATE_ENCRYPTED |
- AMDGPU_GEM_CREATE_GFX12_DCC |
- AMDGPU_GEM_CREATE_DISCARDABLE))
+ if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK)
return -EINVAL;
/* reject invalid gem domains */
@@ -351,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ return -EINVAL;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -421,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_userptr *args = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_gem_object *gobj;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
@@ -462,15 +572,20 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &range);
- if (r)
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range))
+ return -ENOMEM;
+ r = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (r) {
+ amdgpu_hmm_range_free(range);
goto release_object;
-
+ }
r = amdgpu_bo_reserve(bo, true);
if (r)
goto user_pages_done;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
@@ -486,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
-
+ amdgpu_hmm_range_free(range);
release_object:
drm_gem_object_put(gobj);
@@ -640,18 +754,23 @@ out:
*
* Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace.
+ *
+ * Returns resulting fence if freed BO(s) got cleared from the PT.
+ * otherwise stub fence in case of error.
*/
-static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_va *bo_va,
- uint32_t operation)
+static struct dma_fence *
+amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
+ struct dma_fence *fence = dma_fence_get_stub();
int r;
if (!amdgpu_vm_ready(vm))
- return;
+ return fence;
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r)
goto error;
@@ -667,36 +786,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
-}
-/**
- * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
- *
- * @adev: amdgpu_device pointer
- * @flags: GEM UAPI flags
- *
- * Returns the GEM UAPI flags mapped into hardware for the ASIC.
- */
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
- if (flags & AMDGPU_VM_PAGE_NOALLOC)
- pte_flag |= AMDGPU_PTE_NOALLOC;
-
- if (adev->gmc.gmc_funcs->map_mtype)
- pte_flag |= amdgpu_gmc_map_mtype(adev,
- flags & AMDGPU_VM_MTYPE_MASK);
-
- return pte_flag;
+ return fence;
}
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
@@ -715,8 +806,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence_chain *timeline_chain = NULL;
+ struct dma_fence *fence;
struct drm_exec exec;
- uint64_t va_flags;
uint64_t vm_size;
int r = 0;
@@ -776,6 +869,12 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
abo = NULL;
}
+ r = amdgpu_gem_add_input_fence(filp,
+ args->input_fence_syncobj_handles,
+ args->num_syncobj_handles);
+ if (r)
+ goto error_put_gobj;
+
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
@@ -804,12 +903,19 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = NULL;
}
+ r = amdgpu_gem_update_timeline_node(filp,
+ args->vm_timeline_syncobj_out,
+ args->vm_timeline_point,
+ &timeline_syncobj,
+ &timeline_chain);
+ if (r)
+ goto error;
+
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
case AMDGPU_VA_OP_UNMAP:
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
@@ -821,20 +927,31 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
default:
break;
}
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
- args->operation);
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+ fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
+ args->operation);
+
+ if (timeline_syncobj)
+ amdgpu_gem_update_bo_mapping(filp, bo_va,
+ args->operation,
+ args->vm_timeline_point,
+ fence, timeline_syncobj,
+ timeline_chain);
+ else
+ dma_fence_put(fence);
+
+ }
error:
drm_exec_fini(&exec);
+error_put_gobj:
drm_gem_object_put(gobj);
return r;
}
@@ -846,17 +963,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
+ struct drm_exec exec;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
+ if (args->padding)
+ return -EINVAL;
+
gobj = drm_gem_object_lookup(filp, args->handle);
if (!gobj)
return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
- r = amdgpu_bo_reserve(robj, false);
- if (unlikely(r))
- goto out;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+
+ if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+ }
+ }
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
@@ -867,29 +1001,26 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT;
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (robj->tbo.base.import_attach &&
+ if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- goto out;
+ goto out_exec;
}
@@ -902,17 +1033,146 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(robj, true);
+ drm_exec_fini(&exec);
+ break;
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
+ struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
+ struct drm_amdgpu_gem_vm_entry *vm_entries;
+ struct amdgpu_bo_va_mapping *mapping;
+ int num_mappings = 0;
+ /*
+ * num_entries is set as an input to the size of the user-allocated array of
+ * drm_amdgpu_gem_vm_entry stored at args->value.
+ * num_entries is sent back as output as the number of mappings the bo has.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ */
+ vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
+ if (!vm_entries)
+ return -ENOMEM;
+
+ amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
+
+ if (num_mappings > 0 && num_mappings <= args->num_entries)
+ if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries)))
+ r = -EFAULT;
+
+ args->num_entries = num_mappings;
+
+ kvfree(vm_entries);
break;
+ }
default:
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
r = -EINVAL;
}
-out:
drm_gem_object_put(gobj);
return r;
+out_exec:
+ drm_exec_fini(&exec);
+ drm_gem_object_put(gobj);
+ return r;
+}
+
+/**
+ * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_gem_list_handles
+ * @filp: drm file pointer
+ *
+ * num_entries is set as an input to the size of the entries array.
+ * num_entries is sent back as output as the number of bos in the process.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_list_handles *args = data;
+ struct drm_amdgpu_gem_list_handles_entry *bo_entries;
+ struct drm_gem_object *gobj;
+ int id, ret = 0;
+ int bo_index = 0;
+ int num_bos = 0;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id)
+ num_bos += 1;
+ spin_unlock(&filp->table_lock);
+
+ if (args->num_entries < num_bos) {
+ args->num_entries = num_bos;
+ return 0;
+ }
+
+ if (num_bos == 0) {
+ args->num_entries = 0;
+ return 0;
+ }
+
+ bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
+ if (!bo_entries)
+ return -ENOMEM;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct drm_amdgpu_gem_list_handles_entry *bo_entry;
+
+ if (bo_index >= num_bos) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ bo_entry = &bo_entries[bo_index];
+
+ bo_entry->size = amdgpu_bo_size(bo);
+ bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
+ bo_entry->preferred_domains = bo->preferred_domains;
+ bo_entry->gem_handle = id;
+ bo_entry->alignment = bo->tbo.page_alignment;
+
+ if (bo->tbo.base.import_attach)
+ bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
+
+ bo_index += 1;
+ }
+ spin_unlock(&filp->table_lock);
+
+ args->num_entries = bo_index;
+
+ if (!ret)
+ if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)))
+ ret = -EFAULT;
+
+ kvfree(bo_entries);
+
+ return ret;
}
static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 3a8f57900a3a..b558336bc4c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -63,13 +63,28 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+#define AMDGPU_GEM_CREATE_SETTABLE_MASK (AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS | \
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC | \
+ AMDGPU_GEM_CREATE_VRAM_CLEARED | \
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | \
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC | \
+ AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | \
+ AMDGPU_GEM_CREATE_ENCRYPTED | \
+ AMDGPU_GEM_CREATE_GFX12_DCC | \
+ AMDGPU_GEM_CREATE_DISCARDABLE | \
+ AMDGPU_GEM_CREATE_COHERENT | \
+ AMDGPU_GEM_CREATE_UNCACHED | \
+ AMDGPU_GEM_CREATE_EXT_COHERENT)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6d5d81f0dc4e..8b118c53f351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,8 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_mes.h"
+#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -74,14 +76,15 @@ bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
- int me, int pipe, int queue)
+static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
+ int me, int pipe, int queue)
{
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
int bit = 0;
bit += me * adev->gfx.me.num_pipe_per_me
- * adev->gfx.me.num_queue_per_pipe;
- bit += pipe * adev->gfx.me.num_queue_per_pipe;
+ * num_queue_per_pipe;
+ bit += pipe * num_queue_per_pipe;
bit += queue;
return bit;
@@ -147,7 +150,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
- DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n",
amdgpu_compute_multipipe);
return amdgpu_compute_multipipe == 1;
}
@@ -238,8 +241,8 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
- int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
- adev->gfx.me.num_queue_per_pipe;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+ int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
if (multipipe_policy) {
/* policy: amdgpu owns the first queue per pipe at this stage
@@ -247,9 +250,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
for (i = 0; i < max_queues_per_me; i++) {
pipe = i % adev->gfx.me.num_pipe_per_me;
queue = (i / adev->gfx.me.num_pipe_per_me) %
- adev->gfx.me.num_queue_per_pipe;
+ num_queue_per_pipe;
- set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
+ set_bit(pipe * num_queue_per_pipe + queue,
adev->gfx.me.queue_bitmap);
}
} else {
@@ -258,8 +261,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
}
/* update the number of active graphics rings */
- adev->gfx.num_gfx_rings =
- bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
+ if (adev->gfx.num_gfx_rings)
+ adev->gfx.num_gfx_rings =
+ bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
}
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
@@ -671,7 +675,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
if (WARN_ON(i > (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
+ dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
break;
}
@@ -680,15 +684,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
- kiq_ring->queue);
+ dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
+ kiq_ring->pipe, kiq_ring->queue);
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -709,7 +713,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ dev_err(adev->dev, "KCQ enable failed\n");
return r;
}
@@ -731,7 +735,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_mes_map_legacy_queue(adev,
&adev->gfx.gfx_ring[j]);
if (r) {
- DRM_ERROR("failed to map gfx queue\n");
+ dev_err(adev->dev, "failed to map gfx queue\n");
return r;
}
}
@@ -745,7 +749,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_gfx_rings);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -766,23 +770,13 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KGQ enable failed\n");
+ dev_err(adev->dev, "KGQ enable failed\n");
return r;
}
-/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
- *
- * @adev: amdgpu_device pointer
- * @bool enable true: enable gfx off feature, false: disable gfx off feature
- *
- * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
- * 2. other client can send request to disable gfx off feature, the request should be honored.
- * 3. other client can cancel their request of disable gfx off feature
- * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
- */
-
-void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable,
+ bool no_delay)
{
unsigned long delay = GFX_OFF_DELAY_ENABLE;
@@ -804,7 +798,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
/* If going to s2idle, no need to wait */
- if (adev->in_s0ix) {
+ if (no_delay) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
@@ -836,6 +830,43 @@ unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
+ *
+ * @adev: amdgpu_device pointer
+ * @bool enable true: enable gfx off feature, false: disable gfx off feature
+ *
+ * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
+ * 2. other client can send request to disable gfx off feature, the request should be honored.
+ * 3. other client can cancel their request of disable gfx off feature
+ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
+ *
+ * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
+ */
+void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+{
+ /* If going to s2idle, no need to wait */
+ bool no_delay = adev->in_s0ix ? true : false;
+
+ amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
+}
+
+/* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable
+ *
+ * @adev: amdgpu_device pointer
+ * @bool enable true: enable gfx off feature, false: disable gfx off feature
+ *
+ * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
+ * 2. other client can send request to disable gfx off feature, the request should be honored.
+ * 3. other client can cancel their request of disable gfx off feature
+ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
+ *
+ * gfx off allow will be issued immediately.
+ */
+void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_do_off_ctrl(adev, enable, true);
+}
+
int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
{
int r = 0;
@@ -1000,7 +1031,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
ih_data.head = *ras_if;
- DRM_ERROR("CP ECC ERROR IRQ\n");
+ dev_err(adev->dev, "CP ECC ERROR IRQ\n");
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -1072,6 +1103,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_read;
+
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
@@ -1141,6 +1175,8 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_write;
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -1159,6 +1195,75 @@ failed_kiq_write:
dev_err(adev->dev, "failed to write reg:%x\n", reg);
}
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
+ return amdgpu_mes_hdp_flush(adev);
+
+ if (!ring->funcs->emit_hdp_flush) {
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
+ amdgpu_ring_emit_hdp_flush(ring);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ goto failed_kiq_hdp_flush;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_hdp_flush;
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
+ dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_hdp_flush:
+ dev_err(adev->dev, "failed to flush HDP via KIQ\n");
+ return r < 0 ? r : -EIO;
+}
+
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
{
if (amdgpu_num_kcq == -1) {
@@ -1324,6 +1429,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int mode;
+ /* Only minimal precaution taken to reject requests while in reset.*/
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE);
@@ -1367,8 +1476,14 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return -EINVAL;
}
+ /* Don't allow a switch while under reset */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EPERM;
+
ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
+ up_read(&adev->reset_domain->sem);
+
if (ret)
return ret;
@@ -1411,9 +1526,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct drm_gpu_scheduler *sched = &ring->sched;
struct drm_sched_entity entity;
+ static atomic_t counter;
struct dma_fence *f;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ void *owner;
int i, r;
/* Initialize the scheduler entity */
@@ -1424,13 +1541,22 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
}
- r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
- 64, 0,
- &job);
+ /*
+ * Use some unique dummy value as the owner to make sure we execute
+ * the cleaner shader on each submission. The value just need to change
+ * for each submission and is otherwise meaningless.
+ */
+ owner = (void *)(unsigned long)atomic_inc_return(&counter);
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
+ 64, 0, &job,
+ AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
if (r)
goto err;
job->enforce_isolation = true;
+ /* always run the cleaner shader */
+ job->run_cleaner_shader = true;
ib = &job->ibs[0];
for (i = 0; i <= ring->funcs->align_mask; ++i)
@@ -1517,6 +1643,9 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if (adev->gfx.disable_kq)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1541,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
ret = amdgpu_gfx_run_cleaner_shader(adev, value);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
@@ -1559,7 +1687,8 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
* Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
* feature for each GPU partition. Reading from the 'enforce_isolation'
* sysfs file returns the isolation settings for all partitions, where '0'
- * indicates disabled and '1' indicates enabled.
+ * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
+ * and '3' indicates enabled without cleaner shader.
*
* Return: The number of bytes read from the sysfs file.
*/
@@ -1594,9 +1723,12 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
* @count: The size of the input data
*
* This function allows control over the 'enforce_isolation' feature, which
- * serializes access to the graphics engine. Writing '1' or '0' to the
- * 'enforce_isolation' sysfs file enables or disables process isolation for
- * each partition. The input should specify the setting for all partitions.
+ * serializes access to the graphics engine. Writing '0' to disable, '1' to
+ * enable isolation with cleaner shader, '2' to enable legacy isolation without
+ * cleaner shader, or '3' to enable process isolation without submitting the
+ * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
+ * for each partition. The input should specify the setting for all
+ * partitions.
*
* Return: The number of bytes written to the sysfs file.
*/
@@ -1633,27 +1765,38 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
return -EINVAL;
for (i = 0; i < num_partitions; i++) {
- if (partition_values[i] != 0 && partition_values[i] != 1)
+ if (partition_values[i] != 0 &&
+ partition_values[i] != 1 &&
+ partition_values[i] != 2 &&
+ partition_values[i] != 3)
return -EINVAL;
}
mutex_lock(&adev->enforce_isolation_mutex);
-
for (i = 0; i < num_partitions; i++) {
- if (adev->enforce_isolation[i] && !partition_values[i]) {
- /* Going from enabled to disabled */
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
- amdgpu_mes_set_enforce_isolation(adev, i, false);
- } else if (!adev->enforce_isolation[i] && partition_values[i]) {
- /* Going from disabled to enabled */
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- amdgpu_mes_set_enforce_isolation(adev, i, true);
+ switch (partition_values[i]) {
+ case 0:
+ default:
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
}
- adev->enforce_isolation[i] = partition_values[i];
}
-
mutex_unlock(&adev->enforce_isolation_mutex);
+ amdgpu_mes_update_enforce_isolation(adev);
+
return count;
}
@@ -1898,39 +2041,41 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
bool enable)
{
- mutex_lock(&adev->gfx.kfd_sch_mutex);
+ mutex_lock(&adev->gfx.userq_sch_mutex);
if (enable) {
/* If the count is already 0, it means there's an imbalance bug somewhere.
* Note that the bug may be in a different caller than the one which triggers the
* WARN_ON_ONCE.
*/
- if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
goto unlock;
}
- adev->gfx.kfd_sch_req_count[idx]--;
+ adev->gfx.userq_sch_req_count[idx]--;
- if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
- adev->gfx.kfd_sch_inactive[idx]) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0 &&
+ adev->gfx.userq_sch_inactive[idx]) {
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
}
} else {
- if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0) {
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
- if (!adev->gfx.kfd_sch_inactive[idx]) {
- amdgpu_amdkfd_stop_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = true;
+ if (!adev->gfx.userq_sch_inactive[idx]) {
+ amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = true;
}
}
- adev->gfx.kfd_sch_req_count[idx]++;
+ adev->gfx.userq_sch_req_count[idx]++;
}
unlock:
- mutex_unlock(&adev->gfx.kfd_sch_mutex);
+ mutex_unlock(&adev->gfx.userq_sch_mutex);
}
/**
@@ -1975,12 +2120,13 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
msecs_to_jiffies(1));
} else {
/* Tell KFD to resume the runqueue */
- if (adev->kfd.init_complete) {
- WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
- WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
- amdgpu_amdkfd_start_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = false;
- }
+ WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
+
+ amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_start_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = false;
}
mutex_unlock(&adev->enforce_isolation_mutex);
}
@@ -2004,7 +2150,7 @@ amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
bool wait = false;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
/* set the initial values if nothing is set */
if (!adev->gfx.enforce_isolation_jiffies[idx]) {
adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
@@ -2054,6 +2200,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2070,11 +2217,14 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
/**
@@ -2090,6 +2240,7 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2103,11 +2254,164 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
return;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+}
+
+void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, gfx.idle_work.work);
+ enum PP_SMC_POWER_PROFILE profile;
+ u32 i, fences = 0;
+ int r;
+
+ if (adev->gfx.num_gfx_rings)
+ profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ else
+ profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+ for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
+ for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
+ if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) {
+ mutex_lock(&adev->gfx.workload_profile_mutex);
+ if (adev->gfx.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, profile, false);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
+ profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
+ "fullscreen 3D" : "compute");
+ adev->gfx.workload_profile_active = false;
+ }
+ mutex_unlock(&adev->gfx.workload_profile_mutex);
+ } else {
+ schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
+ }
+}
+
+void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ enum PP_SMC_POWER_PROFILE profile;
+ int r;
+
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
+ if (adev->gfx.num_gfx_rings)
+ profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ else
+ profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+ atomic_inc(&adev->gfx.total_submission_cnt);
+
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
+ /* We can safely return early here because we've cancelled the
+ * the delayed work so there is no one else to set it to false
+ * and we don't care if someone else sets it to true.
+ */
+ if (adev->gfx.workload_profile_active)
+ return;
+
+ mutex_lock(&adev->gfx.workload_profile_mutex);
+ if (!adev->gfx.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, profile, true);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
+ profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
+ "fullscreen 3D" : "compute");
+ adev->gfx.workload_profile_active = true;
+ }
+ mutex_unlock(&adev->gfx.workload_profile_mutex);
+}
+
+void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
+ atomic_dec(&ring->adev->gfx.total_submission_cnt);
+
+ schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble setup.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
+{
+ u32 count = 0;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_data_parser - Parser CS data
+ *
+ * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ u32 i;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ }
+ }
+ }
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ */
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
+{
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
}
/*
@@ -2250,3 +2554,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
&amdgpu_debugfs_compute_sched_mask_fops);
#endif
}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 8b5bd63b5773..efd61a1ccc66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -57,6 +57,9 @@ enum amdgpu_gfx_pipe_priority {
#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+/* 1 second timeout */
+#define GFX_PROFILE_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
enum amdgpu_gfx_partition {
AMDGPU_SPX_PARTITION_MODE = 0,
AMDGPU_DPX_PARTITION_MODE = 1,
@@ -167,10 +170,46 @@ struct amdgpu_kiq {
#define AMDGPU_GFX_MAX_SE 4
#define AMDGPU_GFX_MAX_SH_PER_SE 2
+/**
+ * amdgpu_rb_config - Configure a single Render Backend (RB)
+ *
+ * Bad RBs are fused off and there is a harvest register the driver reads to
+ * determine which RB(s) are fused off so that the driver can configure the
+ * hardware state so that nothing gets sent to them. There are also user
+ * harvest registers that the driver can program to disable additional RBs,
+ * etc., for testing purposes.
+ */
struct amdgpu_rb_config {
+ /**
+ * @rb_backend_disable:
+ *
+ * The value captured from register RB_BACKEND_DISABLE indicates if the
+ * RB backend is disabled or not.
+ */
uint32_t rb_backend_disable;
+
+ /**
+ * @user_rb_backend_disable:
+ *
+ * The value captured from register USER_RB_BACKEND_DISABLE indicates
+ * if the User RB backend is disabled or not.
+ */
uint32_t user_rb_backend_disable;
+
+ /**
+ * @raster_config:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the first register.
+ */
uint32_t raster_config;
+
+ /**
+ * @raster_config_1:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the second register.
+ */
uint32_t raster_config_1;
};
@@ -218,6 +257,13 @@ struct amdgpu_gfx_config {
uint32_t macrotile_mode_array[16];
struct gb_addr_config gb_addr_config_fields;
+
+ /**
+ * @rb_config:
+ *
+ * Matrix that keeps all the Render Backend (color and depth buffer
+ * handling) configuration on the 3D engine.
+ */
struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
/* gfx configure feature */
@@ -302,7 +348,8 @@ struct amdgpu_gfx_funcs {
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
- struct amdgpu_gfx_shadow_info *shadow_info);
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check);
enum amdgpu_gfx_partition
(*query_partition_mode)(struct amdgpu_device *adev);
int (*switch_partition_mode)(struct amdgpu_device *adev,
@@ -471,11 +518,19 @@ struct amdgpu_gfx {
bool enable_cleaner_shader;
struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
/* Mutex for synchronizing KFD scheduler operations */
- struct mutex kfd_sch_mutex;
- u64 kfd_sch_req_count[MAX_XCP];
- bool kfd_sch_inactive[MAX_XCP];
+ struct mutex userq_sch_mutex;
+ u64 userq_sch_req_count[MAX_XCP];
+ bool userq_sch_inactive[MAX_XCP];
unsigned long enforce_isolation_jiffies[MAX_XCP];
unsigned long enforce_isolation_time[MAX_XCP];
+
+ atomic_t total_submission_cnt;
+ struct delayed_work idle_work;
+ bool workload_profile_active;
+ struct mutex workload_profile_mutex;
+
+ bool disable_kq;
+ bool disable_uq;
};
struct amdgpu_gfx_ras_reg_entry {
@@ -495,7 +550,7 @@ struct amdgpu_gfx_ras_mem_id_entry {
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
-#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si)))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
@@ -542,11 +597,10 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
- int pipe, int queue);
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
@@ -561,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
@@ -584,6 +639,14 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
+
+void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
+void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count);
+
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 1c19a65e6553..869bceb0fe2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -38,6 +38,13 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
+static const u64 four_gb = 0x100000000ULL;
+
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev)
+{
+ return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev);
+}
+
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
*
@@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
- mc->gart_start = hive_vram_end + 1;
+ /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */
+ mc->gart_start = ALIGN(hive_vram_end + 1, four_gb);
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- mc->fb_start = hive_vram_start;
- mc->fb_end = hive_vram_end;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ /* set mc->vram_start to 0 to switch the returned GPU address of
+ * amdgpu_bo_create_reserved() from FB aperture to GART aperture.
+ */
+ mc->vram_start = 0;
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size);
+ } else {
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ }
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -269,14 +286,13 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
* @mc: memory controller structure holding memory information
* @gart_placement: GART placement policy with respect to VRAM
*
- * Function will place try to place GART before or after VRAM.
+ * Function will try to place GART before or after VRAM.
* If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
*/
void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
enum amdgpu_gart_placement gart_placement)
{
- const uint64_t four_gb = 0x100000000ULL;
u64 size_af, size_bf;
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
@@ -573,6 +589,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0};
unsigned i;
unsigned vmhub, inv_eng;
+ struct amdgpu_ring *shared_ring;
/* init the vm inv eng for all vmhubs */
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
@@ -580,6 +597,9 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
/* reserve engine 5 for firmware */
if (adev->enable_mes)
vm_inv_engs[i] &= ~(1 << 5);
+ /* reserve engine 6 for uni mes */
+ if (adev->enable_uni_mes)
+ vm_inv_engs[i] &= ~(1 << 6);
/* reserve mmhub engine 3 for firmware */
if (adev->enable_umsch_mm)
vm_inv_engs[i] &= ~(1 << 3);
@@ -591,7 +611,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
if (ring == &adev->mes.ring[0] ||
ring == &adev->mes.ring[1] ||
- ring == &adev->umsch_mm.ring)
+ ring == &adev->umsch_mm.ring ||
+ ring == &adev->cper.ring_buf)
+ continue;
+
+ /* Skip if the ring is a shared ring */
+ if (amdgpu_sdma_is_shared_inv_eng(adev, ring))
continue;
inv_eng = ffs(vm_inv_engs[vmhub]);
@@ -606,6 +631,21 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->vm_hub);
+ /* SDMA has a special packet which allows it to use the same
+ * invalidation engine for all the rings in one instance.
+ * Therefore, we do not allocate a separate VM invalidation engine
+ * for SDMA page rings. Instead, they share the VM invalidation
+ * engine with the SDMA gfx ring. This change ensures efficient
+ * resource management and avoids the issue of insufficient VM
+ * invalidation engines.
+ */
+ shared_ring = amdgpu_sdma_get_shared_ring(adev, ring);
+ if (shared_ring) {
+ shared_ring->vm_inv_eng = ring->vm_inv_eng;
+ dev_info(adev->dev, "ring %s shares VM invalidation engine %u with ring %s on hub %u\n",
+ ring->name, ring->vm_inv_eng, shared_ring->name, ring->vm_hub);
+ continue;
+ }
}
return 0;
@@ -653,7 +693,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
if (r)
goto error_alloc;
@@ -678,12 +718,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
- u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
- adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- int r;
+ int r, cnt = 0;
uint32_t seq;
/*
@@ -740,10 +778,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
- }
+ } else
+ r = 0;
}
error_unlock_reset:
@@ -851,6 +900,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
@@ -888,6 +938,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
gc_ver == IP_VERSION(9, 4, 2) ||
gc_ver == IP_VERSION(9, 4, 3) ||
gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0) ||
gc_ver >= IP_VERSION(10, 3, 0));
if (!amdgpu_sriov_xnack_support(adev))
@@ -1009,9 +1060,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
*/
u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
- u64 vram_addr = adev->vm_manager.vram_base_offset -
- adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- u64 vram_end = vram_addr + vram_size;
+ u64 vram_addr, vram_end;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
@@ -1024,6 +1073,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
flags |= AMDGPU_PDE_PTE_FLAG(adev);
+ vram_addr = adev->vm_manager.vram_base_offset;
+ if (!amdgpu_virt_xgmi_migrate_enabled(adev))
+ vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ vram_end = vram_addr + vram_size;
+
/* The first n PDE0 entries are used as PTE,
* pointing to vram
*/
@@ -1198,6 +1252,10 @@ static ssize_t current_memory_partition_show(
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_memory_partition mode;
+ /* Only minimal precaution taken to reject requests while in reset */
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
if ((mode >= ARRAY_SIZE(nps_desc)) ||
(BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
@@ -1393,3 +1451,232 @@ bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ case 8:
+ return AMDGPU_NPS8_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
+{
+ enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->get_memory_partition_mode)
+ mode = adev->nbio.funcs->get_memory_partition_mode(adev,
+ supp_modes);
+ else
+ dev_warn(adev->dev, "memory partition mode query is not supported\n");
+
+ return mode;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return amdgpu_gmc_get_vf_memory_partition(adev);
+ else
+ return amdgpu_gmc_get_memory_partition(adev, NULL);
+}
+
+static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition mode;
+ u32 supp_modes;
+ bool valid;
+
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware not present in supported modes */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
+ !(BIT(mode - 1) & supp_modes))
+ return false;
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 1);
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 2);
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 3 ||
+ adev->gmc.num_mem_partitions == 4);
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 8);
+ break;
+ default:
+ valid = false;
+ }
+
+ return valid;
+}
+
+static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid)
+{
+ int i;
+
+ /* Check if node with id 'nid' is present in 'node_ids' array */
+ for (i = 0; i < num_ids; ++i)
+ if (node_ids[i] == nid)
+ return true;
+
+ return false;
+}
+
+static void
+amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ struct amdgpu_numa_info numa_info;
+ int node_ids[AMDGPU_MAX_MEM_RANGES];
+ int num_ranges = 0, ret;
+ int num_xcc, xcc_id;
+ uint32_t xcc_mask;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ xcc_mask = (1U << num_xcc) - 1;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+ if (ret)
+ continue;
+
+ if (numa_info.nid == NUMA_NO_NODE) {
+ mem_ranges[0].size = numa_info.size;
+ mem_ranges[0].numa.node = numa_info.nid;
+ num_ranges = 1;
+ break;
+ }
+
+ if (amdgpu_gmc_is_node_present(node_ids, num_ranges,
+ numa_info.nid))
+ continue;
+
+ node_ids[num_ranges] = numa_info.nid;
+ mem_ranges[num_ranges].numa.node = numa_info.nid;
+ mem_ranges[num_ranges].size = numa_info.size;
+ ++num_ranges;
+ }
+
+ adev->gmc.num_mem_partitions = num_ranges;
+}
+
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ enum amdgpu_memory_partition mode;
+ u32 start_addr = 0, size;
+ int i, r, l;
+
+ mode = amdgpu_gmc_query_memory_partition(adev);
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
+ case AMDGPU_NPS1_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 2;
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ if (adev->flags & AMD_IS_APU)
+ adev->gmc.num_mem_partitions = 3;
+ else
+ adev->gmc.num_mem_partitions = 4;
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 8;
+ break;
+ default:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ }
+
+ /* Use NPS range info, if populated */
+ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
+ &adev->gmc.num_mem_partitions);
+ if (!r) {
+ l = 0;
+ for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
+ if (mem_ranges[i].range.lpfn >
+ mem_ranges[i - 1].range.lpfn)
+ l = i;
+ }
+
+ } else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_warn(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1\n");
+ adev->gmc.num_mem_partitions = 1;
+ }
+ /* Fallback to sw based calculation */
+ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ mem_ranges[i].range.fpfn = start_addr;
+ mem_ranges[i].size =
+ ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
+ mem_ranges[i].range.lpfn = start_addr + size - 1;
+ start_addr += size;
+ }
+
+ l = adev->gmc.num_mem_partitions - 1;
+ }
+
+ /* Adjust the last one */
+ mem_ranges[l].range.lpfn =
+ (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
+ mem_ranges[l].size =
+ adev->gmc.real_vram_size -
+ ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
+}
+
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
+{
+ bool valid;
+
+ adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
+ sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
+ if (!adev->gmc.mem_partitions)
+ return -ENOMEM;
+
+ if (adev->gmc.is_app_apu)
+ amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
+ else
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+
+ if (amdgpu_sriov_vf(adev))
+ valid = true;
+ else
+ valid = amdgpu_gmc_validate_partition_info(adev);
+ if (!valid) {
+ /* TODO: handle invalid case */
+ dev_warn(adev->dev,
+ "Mem ranges not matching with hardware config\n");
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 459a30fe239f..727342689d4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "amdgpu_irq.h"
+#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
/* VA hole for 48bit addresses on Vega10 */
@@ -61,6 +62,9 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+/* XNACK flags */
+#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0)
+
struct firmware;
enum amdgpu_memory_partition {
@@ -80,6 +84,13 @@ enum amdgpu_memory_partition {
#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+#define AMDGPU_MAX_MEM_RANGES 8
+
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY 0x80
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_READ 0x40
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10
+
/*
* GMC page fault information
*/
@@ -148,15 +159,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* map mtype to hardware flags */
- uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
- /* get the pte flags to use for a BO VA mapping */
+ /* get the pte flags to use for PTEs */
void (*get_vm_pte)(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t *flags);
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
+ uint64_t *pte_flags);
/* override per-page pte flags */
void (*override_vm_pte_flags)(struct amdgpu_device *dev,
struct amdgpu_vm *vm,
@@ -174,28 +185,6 @@ struct amdgpu_gmc_funcs {
bool (*need_reset_on_init)(struct amdgpu_device *adev);
};
-struct amdgpu_xgmi_ras {
- struct amdgpu_ras_block_object ras_block;
-};
-
-struct amdgpu_xgmi {
- /* from psp */
- u64 node_id;
- u64 hive_id;
- /* fixed per family */
- u64 node_segment_size;
- /* physical node (0-3) */
- unsigned physical_node_id;
- /* number of nodes (0-4) */
- unsigned num_physical_nodes;
- /* gpu list in the same hive */
- struct list_head head;
- bool supported;
- struct ras_common_if *ras_if;
- bool connected_to_cpu;
- struct amdgpu_xgmi_ras *ras;
-};
-
struct amdgpu_mem_partition_info {
union {
struct {
@@ -322,6 +311,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
+ uint32_t xnack_flags;
uint32_t vmid0_page_table_block_size;
uint32_t vmid0_page_table_depth;
@@ -371,9 +361,10 @@ struct amdgpu_gmc {
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
+ ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
+ (pte_flags)))
#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
(adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags))
@@ -411,6 +402,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
return addr;
}
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
@@ -472,5 +464,13 @@ int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
int nps_mode);
void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
-
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes);
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev);
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0760e70402ec..895c1e4c6747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -284,6 +284,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+ start += amdgpu_vce_required_gart_pages(adev);
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index b6cf801939aa..5a60d69a3e1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -22,6 +22,7 @@
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
+#include <uapi/linux/kfd_ioctl.h>
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
{
@@ -46,3 +47,38 @@ int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
/* hdp ras follows amdgpu_ras_block_late_init_default for late init */
return 0;
}
+
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32((adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ } else {
+ amdgpu_ring_emit_wreg(ring,
+ (adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ }
+}
+
+void amdgpu_hdp_invalidate(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->asic_funcs && adev->asic_funcs->invalidate_hdp)
+ adev->asic_funcs->invalidate_hdp(adev, ring);
+ else if (adev->hdp.funcs && adev->hdp.funcs->invalidate_hdp)
+ adev->hdp.funcs->invalidate_hdp(adev, ring);
+}
+
+void amdgpu_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->asic_funcs && adev->asic_funcs->flush_hdp)
+ adev->asic_funcs->flush_hdp(adev, ring);
+ else if (adev->hdp.funcs && adev->hdp.funcs->flush_hdp)
+ adev->hdp.funcs->flush_hdp(adev, ring);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 7b8a6152dc8d..d9f488fa76b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -44,4 +44,10 @@ struct amdgpu_hdp {
};
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_hdp_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_hdp_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index e36fede7f74c..90d26d820bac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -167,19 +167,14 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
- struct hmm_range **phmm_range)
+ void *owner,
+ struct amdgpu_hmm_range *range)
{
- struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
- unsigned long i;
unsigned long *pfns;
int r = 0;
-
- hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
- if (unlikely(!hmm_range))
- return -ENOMEM;
+ struct hmm_range *hmm_range = &range->hmm_range;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
if (unlikely(!pfns)) {
@@ -222,36 +217,77 @@ retry:
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- /*
- * Due to default_flags, all pages are HMM_PFN_VALID or
- * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
- * the notifier_lock, and mmu_interval_read_retry() must be done first.
- */
- for (i = 0; pages && i < npages; i++)
- pages[i] = hmm_pfn_to_page(pfns[i]);
-
- *phmm_range = hmm_range;
-
return 0;
out_free_pfns:
kvfree(pfns);
+ hmm_range->hmm_pfns = NULL;
out_free_range:
- kfree(hmm_range);
-
if (r == -EBUSY)
r = -EAGAIN;
return r;
}
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+/**
+ * amdgpu_hmm_range_valid - check if an HMM range is still valid
+ * @range: pointer to the &struct amdgpu_hmm_range to validate
+ *
+ * Determines whether the given HMM range @range is still valid by
+ * checking for invalidations via the MMU notifier sequence. This is
+ * typically used to verify that the range has not been invalidated
+ * by concurrent address space updates before it is accessed.
+ *
+ * Return:
+ * * true if @range is valid and can be used safely
+ * * false if @range is NULL or has been invalidated
+ */
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
{
- bool r;
+ if (!range)
+ return false;
- r = mmu_interval_read_retry(hmm_range->notifier,
- hmm_range->notifier_seq);
- kvfree(hmm_range->hmm_pfns);
- kfree(hmm_range);
+ return !mmu_interval_read_retry(range->hmm_range.notifier,
+ range->hmm_range.notifier_seq);
+}
- return r;
+/**
+ * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range
+ * @bo: optional buffer object to associate with this HMM range
+ *
+ * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed.
+ * The reference count of the @bo is incremented.
+ *
+ * Return:
+ * Pointer to a newly allocated struct amdgpu_hmm_range on success,
+ * or NULL if memory allocation fails.
+ */
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ struct amdgpu_hmm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return NULL;
+
+ range->bo = amdgpu_bo_ref(bo);
+ return range;
+}
+
+/**
+ * amdgpu_hmm_range_free - release an AMDGPU HMM range
+ * @range: pointer to the range object to free
+ *
+ * Releases all resources held by @range, including the associated
+ * hmm_pfns and the dropping reference of associated bo if any.
+ *
+ * Return: void
+ */
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
+{
+ if (!range)
+ return;
+
+ kvfree(range->hmm_range.hmm_pfns);
+ amdgpu_bo_unref(&range->bo);
+ kfree(range);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index e2edcd010ccc..140bc9cd57b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -31,13 +31,20 @@
#include <linux/interval_tree.h>
#include <linux/mmu_notifier.h>
+struct amdgpu_hmm_range {
+ struct hmm_range hmm_range;
+ struct amdgpu_bo *bo;
+};
+
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
- struct hmm_range **phmm_range);
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+ void *owner,
+ struct amdgpu_hmm_range *range);
#if defined(CONFIG_HMM_MIRROR)
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range);
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo);
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range);
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
@@ -47,7 +54,20 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
+
static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
+
+static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
+{
+ return false;
+}
+
+static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ return NULL;
+}
+
+static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {}
#endif
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index f0765ccde668..9cb72f0c5277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_edid.h>
@@ -185,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter);
if (ret)
goto out_free;
} else {
@@ -216,13 +215,23 @@ out_free:
}
-void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
+void amdgpu_i2c_init(struct amdgpu_device *adev)
{
- if (!i2c)
- return;
- WARN_ON(i2c->has_aux);
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
+ if (!adev->is_atom_fw) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ amdgpu_atombios_i2c_init(adev);
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ amdgpu_atombios_oem_i2c_init(adev, 0x97);
+ break;
+ default:
+ break;
+ }
+ }
+ }
}
/* remove all the buses */
@@ -230,12 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (adev->i2c_bus[i]) {
- amdgpu_i2c_destroy(adev->i2c_bus[i]);
+ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++)
+ if (adev->i2c_bus[i])
adev->i2c_bus[i] = NULL;
- }
- }
}
/* looks up bus based on id */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index 21e3d1dad0a1..1d3d3806e0dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -28,6 +28,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
const struct amdgpu_i2c_bus_rec *rec,
const char *name);
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
+void amdgpu_i2c_init(struct amdgpu_device *adev);
void amdgpu_i2c_fini(struct amdgpu_device *adev);
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index d751995dc131..586a58facca1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
+ struct amdgpu_fence *af;
bool need_ctx_switch;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -138,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
int vmid = AMDGPU_JOB_GET_VMID(job);
bool need_pipe_sync = false;
unsigned int cond_exec;
-
unsigned int i;
int r = 0;
@@ -149,11 +149,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (job) {
vm = job->vm;
fence_ctx = job->base.s_fence ?
- job->base.s_fence->scheduled.context : 0;
+ job->base.s_fence->finished.context : 0;
shadow_va = job->shadow_va;
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
+ af = job->hw_fence;
+ /* Save the context of the job for reset handling.
+ * The driver needs this so it can skip the ring
+ * contents for guilty contexts.
+ */
+ af->context = fence_ctx;
+ /* the vm fence is also part of the job's context */
+ job->hw_vm_fence->context = fence_ctx;
} else {
vm = NULL;
fence_ctx = 0;
@@ -161,22 +169,28 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
+ af = kzalloc(sizeof(*af), GFP_ATOMIC);
+ if (!af)
+ return -ENOMEM;
}
- if (!ring->sched.ready && !ring->is_mes_queue) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
- if (vm && !job->vmid && !ring->is_mes_queue) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
(!ring->funcs->secure_submission_supported)) {
dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
alloc_size = ring->funcs->emit_frame_size + num_ibs *
@@ -185,14 +199,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
- return r;
+ goto free_fence;
}
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
- (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
- amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
need_pipe_sync = true;
if (tmp)
@@ -282,7 +296,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, job, fence_flags);
+ r = amdgpu_fence_emit(ring, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -290,6 +304,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_undo(ring);
return r;
}
+ *f = &af->base;
+ /* get a ref for the job */
+ if (job)
+ dma_fence_get(*f);
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
@@ -297,16 +315,30 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_patch_cond_exec(ring, cond_exec);
ring->current_ctx = fence_ctx;
- if (vm && ring->funcs->emit_switch_buffer)
+ if (job && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
if (ring->funcs->emit_wave_limit &&
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
ring->funcs->emit_wave_limit(ring, false);
+ /* Save the wptr associated with this fence.
+ * This must be last for resets to work properly
+ * as we need to save the wptr associated with this
+ * fence so we know what rings contents to backup
+ * after we reset the queue.
+ */
+ amdgpu_fence_save_wptr(af);
+
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
+
return 0;
+
+free_fence:
+ if (!job)
+ kfree(af);
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 8e712a11aba5..9cab36322c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct dma_fence **fences;
- unsigned i;
+ /* If anybody is waiting for a VMID let everybody wait for fairness */
if (!dma_fence_is_signaled(ring->vmid_wait)) {
*fence = dma_fence_get(ring->vmid_wait);
return 0;
}
- fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
- if (!fences)
- return -ENOMEM;
-
/* Check if we have an idle VMID */
- i = 0;
- list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+ list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) {
/* Don't use per engine and per process VMID at the same time */
struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
NULL : ring;
- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
- if (!fences[i])
- break;
- ++i;
+ *fence = amdgpu_sync_peek_fence(&(*idle)->active, r);
+ if (!(*fence))
+ return 0;
}
- /* If we can't find a idle VMID to use, wait till one becomes available */
- if (&(*idle)->list == &id_mgr->ids_lru) {
- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct dma_fence_array *array;
- unsigned j;
-
- *idle = NULL;
- for (j = 0; j < i; ++j)
- dma_fence_get(fences[j]);
-
- array = dma_fence_array_create(i, fences, fence_context,
- seqno, true);
- if (!array) {
- for (j = 0; j < i; ++j)
- dma_fence_put(fences[j]);
- kfree(fences);
- return -ENOMEM;
- }
-
- *fence = dma_fence_get(&array->base);
- dma_fence_put(ring->vmid_wait);
- ring->vmid_wait = &array->base;
- return 0;
- }
- kfree(fences);
+ /*
+ * If we can't find a idle VMID to use, wait on a fence from the least
+ * recently used in the hope that it will be available soon.
+ */
+ *idle = NULL;
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = dma_fence_get(*fence);
+ /* This is the reference we return */
+ dma_fence_get(*fence);
return 0;
}
@@ -275,58 +251,45 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
- *id = id_mgr->reserved;
+ *id = vm->reserved_vmid[vmhub];
if ((*id)->owner != vm->immediate.fence_context ||
!amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates ||
!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
- !dma_fence_is_signaled((*id)->last_flush))) {
+ !dma_fence_is_signaled((*id)->last_flush)))
+ needs_flush = true;
+
+ if ((*id)->owner != vm->immediate.fence_context ||
+ (!adev->vm_manager.concurrent_flush && needs_flush)) {
struct dma_fence *tmp;
- /* Wait for the gang to be assembled before using a
- * reserved VMID or otherwise the gang could deadlock.
+ /* Don't use per engine and per process VMID at the
+ * same time
*/
- tmp = amdgpu_device_get_gang(adev);
- if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) {
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
+ /* to prevent one context starved by another context */
+ (*id)->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+ if (tmp) {
*id = NULL;
- *fence = tmp;
+ *fence = dma_fence_get(tmp);
return 0;
}
- dma_fence_put(tmp);
-
- /* Make sure the id is owned by the gang before proceeding */
- if (!job->gang_submit ||
- (*id)->owner != vm->immediate.fence_context) {
-
- /* Don't use per engine and per process VMID at the
- * same time
- */
- if (adev->vm_manager.concurrent_flush)
- ring = NULL;
-
- /* to prevent one context starved by another context */
- (*id)->pd_gpu_addr = 0;
- tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
- if (tmp) {
- *id = NULL;
- *fence = dma_fence_get(tmp);
- return 0;
- }
- }
- needs_flush = true;
}
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
+ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
+ GFP_ATOMIC);
if (r)
return r;
@@ -385,7 +348,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
* user of the VMID.
*/
r = amdgpu_sync_fence(&(*id)->active,
- &job->base.s_fence->finished);
+ &job->base.s_fence->finished,
+ GFP_ATOMIC);
if (r)
return r;
@@ -422,7 +386,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
+ if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@@ -437,7 +401,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(&id->active,
- &job->base.s_fence->finished);
+ &job->base.s_fence->finished,
+ GFP_ATOMIC);
if (r)
goto error;
@@ -474,55 +439,71 @@ error:
/*
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
- * @adev: amdgpu_device pointer
* @vm: the VM to check
* @vmhub: the VMHUB which will be used
*
* Returns: True if the VM will use a reserved VMID.
*/
-bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, unsigned int vmhub)
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
{
- return vm->reserved_vmid[vmhub] ||
- (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
- vm->root.bo->xcp_id : 0] &&
- AMDGPU_IS_GFXHUB(vmhub));
+ return vm->reserved_vmid[vmhub];
}
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
+ * @adev: amdgpu device structure
+ * @vm: the VM to reserve an ID for
+ * @vmhub: the VMHUB which should be used
+ *
+ * Mostly used to have a reserved VMID for debugging and SPM.
+ *
+ * Returns: 0 for success, -ENOENT if an ID is already reserved.
+ */
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
+ int r = 0;
mutex_lock(&id_mgr->lock);
-
- ++id_mgr->reserved_use_count;
- if (!id_mgr->reserved) {
- struct amdgpu_vmid *id;
-
- id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
- list);
- /* Remove from normal round robin handling */
- list_del_init(&id->list);
- id_mgr->reserved = id;
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (id_mgr->reserved_vmid) {
+ r = -ENOENT;
+ goto unlock;
}
-
+ /* Remove from normal round robin handling */
+ id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&id->list);
+ vm->reserved_vmid[vmhub] = id;
+ id_mgr->reserved_vmid = true;
mutex_unlock(&id_mgr->lock);
+
return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
}
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_free_reserved - free up a reserved VMID again
+ * @adev: amdgpu device structure
+ * @vm: the VM with the reserved ID
+ * @vmhub: the VMHUB which should be used
+ */
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (!--id_mgr->reserved_use_count) {
- /* give the reserved ID back to normal round robin */
- list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
- id_mgr->reserved = NULL;
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ id_mgr->reserved_vmid = false;
}
-
mutex_unlock(&id_mgr->lock);
}
@@ -589,10 +570,17 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
- id_mgr->reserved_use_count = 0;
- /* manage only VMIDs not used by KFD */
- id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ else if (AMDGPU_IS_MMHUB0(i) ||
+ AMDGPU_IS_MMHUB1(i))
+ id_mgr->num_ids = 16;
+ else
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
@@ -601,11 +589,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
- /* alloc a default reserved vmid to enforce isolation */
- for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 4012fb2dd08a..b3649cd3af56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -67,8 +67,7 @@ struct amdgpu_vmid_mgr {
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
- struct amdgpu_vmid *reserved;
- unsigned int reserved_use_count;
+ bool reserved_vmid;
};
int amdgpu_pasid_alloc(unsigned int bits);
@@ -78,12 +77,11 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
-bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, unsigned int vmhub);
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 901f8b12c672..a6419246e9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_ih.h"
+#include "amdgpu_reset.h"
/**
* amdgpu_ih_ring_init - initialize the IH state
@@ -217,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
restart_ih:
count = AMDGPU_IH_MAX_NUM_IVS;
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
+ dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
@@ -227,13 +228,23 @@ restart_ih:
ih->rptr &= ih->ptr_mask;
}
- amdgpu_ih_set_rptr(adev, ih);
+ if (!ih->overflow)
+ amdgpu_ih_set_rptr(adev, ih);
+
wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev, ih);
if (wptr != ih->rptr)
- goto restart_ih;
+ if (!ih->overflow)
+ goto restart_ih;
+
+ if (ih->overflow)
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 7d4395a5d8ac..f58b6be7fccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -56,14 +56,14 @@ struct amdgpu_ih_ring {
bool use_bus_addr;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
uint64_t gpu_addr;
uint64_t wptr_addr;
- volatile uint32_t *wptr_cpu;
+ uint32_t *wptr_cpu;
uint64_t rptr_addr;
- volatile uint32_t *rptr_cpu;
+ uint32_t *rptr_cpu;
bool enabled;
unsigned rptr;
@@ -72,12 +72,16 @@ struct amdgpu_ih_ring {
/* For waiting on IH processing at checkpoint. */
wait_queue_head_t wait_process;
uint64_t processed_timestamp;
+ bool overflow;
};
/* return true if time stamp t2 is after t1 with 48bit wrap around */
#define amdgpu_ih_ts_after(t1, t2) \
(((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+#define amdgpu_ih_ts_after_or_equal(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL)
+
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
new file mode 100644
index 000000000000..99e1cf4fc955
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ip.h"
+
+static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst)
+{
+ int8_t dev_inst;
+
+ switch (block) {
+ case GC_HWIP:
+ case SDMA0_HWIP:
+ /* Both JPEG and VCN as JPEG is only alias of VCN */
+ case VCN_HWIP:
+ dev_inst = adev->ip_map.dev_inst[block][inst];
+ break;
+ default:
+ /* For rest of the IPs, no look up required.
+ * Assume 'logical instance == physical instance' for all configs. */
+ dev_inst = inst;
+ break;
+ }
+
+ return dev_inst;
+}
+
+static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask)
+{
+ uint32_t dev_mask = 0;
+ int8_t log_inst, dev_inst;
+
+ while (mask) {
+ log_inst = ffs(mask) - 1;
+ dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst);
+ dev_mask |= (1 << dev_inst);
+ mask &= ~(1 << log_inst);
+ }
+
+ return dev_mask;
+}
+
+static void amdgpu_populate_ip_map(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type ip_block,
+ uint32_t inst_mask)
+{
+ int l = 0, i;
+
+ while (inst_mask) {
+ i = ffs(inst_mask) - 1;
+ adev->ip_map.dev_inst[ip_block][l++] = i;
+ inst_mask &= ~(1 << i);
+ }
+ for (; l < HWIP_MAX_INSTANCE; l++)
+ adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev)
+{
+ u32 ip_map[][2] = {
+ { GC_HWIP, adev->gfx.xcc_mask },
+ { SDMA0_HWIP, adev->sdma.sdma_mask },
+ { VCN_HWIP, adev->vcn.inst_mask },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+ amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+ adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst;
+ adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
index bea255529993..2490fd322aec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,12 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-static const struct nvkm_engine_func
-ga102_ofa = {
- .sclass = {
- { -1, -1, NVC7FA_VIDEO_OFA },
- {}
- }
-};
+#ifndef __AMDGPU_IP_H__
+#define __AMDGPU_IP_H__
-int
-ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga102_ofa, device, type, inst, pengine);
+void amdgpu_ip_map_init(struct amdgpu_device *adev);
- return -ENODEV;
-}
+#endif /* __AMDGPU_IP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 19ce4da285e8..8112ffc85995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
r = src->funcs->set(adev, src, k,
AMDGPU_IRQ_STATE_DISABLE);
if (r)
- DRM_ERROR("error disabling interrupt (%d)\n",
- r);
+ dev_err(adev->dev,
+ "error disabling interrupt (%d)\n",
+ r);
}
}
}
@@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_restore_msix(struct amdgpu_device *adev)
+void amdgpu_restore_msix(struct amdgpu_device *adev)
{
u16 ctrl;
@@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
- DRM_DEBUG("amdgpu: irq initialized.\n");
+ dev_dbg(adev->dev, "amdgpu: irq initialized.\n");
return 0;
free_vectors:
@@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
src_id = entry.src_id;
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
- DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+ dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id);
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
- DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
+ dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id);
} else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) ||
(client_id == SOC15_IH_CLIENTID_ISP)) &&
@@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
generic_handle_domain_irq(adev->irq.domain, src_id);
} else if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
+ dev_dbg(adev->dev,
+ "Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
} else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, &entry);
if (r < 0)
- DRM_ERROR("error processing interrupt (%d)\n", r);
+ dev_err(adev->dev, "error processing interrupt (%d)\n",
+ r);
else if (r)
handled = true;
} else {
- DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
+ dev_dbg(adev->dev,
+ "Unregistered interrupt src_id: %d of client_id:%d\n",
src_id, client_id);
}
@@ -619,6 +623,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
+ /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
+ if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type))
+ return -EINVAL;
+
if (!adev->irq.installed)
return -ENOENT;
@@ -725,10 +733,10 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
- adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
- &amdgpu_hw_irqdomain_ops, adev);
+ adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
+ &amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
- DRM_ERROR("GPU irq add domain failed\n");
+ dev_err(adev->dev, "GPU irq add domain failed\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 04c0b4fa17a4..9f0417456abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
+void amdgpu_restore_msix(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 732744488b03..37270c4dab8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -33,6 +33,8 @@
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
+#define ISP_MC_ADDR_ALIGN (1024 * 32)
+
/**
* isp_hw_init - start and test isp block
*
@@ -124,7 +126,7 @@ static int isp_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool isp_is_idle(void *handle)
+static bool isp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -141,6 +143,181 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
+static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
+{
+ if (isp_parent != amdgpu_dev)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * isp_user_buffer_alloc - create user buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @dmabuf: DMABUF handle for isp buffer allocated in system memory
+ * @buf_obj: GPU buffer object handle to initialize
+ * @buf_addr: GPU addr of the pinned BO to initialize
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate GPU addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ struct amdgpu_bo *bo;
+ u64 gpu_addr;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!buf_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_isp_user(adev, dmabuf,
+ AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
+ return ret;
+ }
+
+ *buf_obj = (void *)bo;
+ *buf_addr = gpu_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_user_buffer_alloc);
+
+/**
+ * isp_user_buffer_free - free isp user buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void isp_user_buffer_free(void *buf_obj)
+{
+ amdgpu_bo_free_isp_user(buf_obj);
+}
+EXPORT_SYMBOL(isp_user_buffer_free);
+
+/**
+ * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @size: size for the new BO
+ * @buf_obj: GPU BO handle to initialize
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: CPU address mapping of BO
+ *
+ * Allocates and pins a kernel BO for internal isp firmware use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!gpu_addr))
+ return -EINVAL;
+
+ if (WARN_ON(!cpu_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ /* Ensure *bo is NULL so a new BO will be created */
+ *bo = NULL;
+ ret = amdgpu_bo_create_kernel(adev,
+ size,
+ ISP_MC_ADDR_ALIGN,
+ AMDGPU_GEM_DOMAIN_GTT,
+ bo,
+ gpu_addr,
+ cpu_addr);
+ if (!cpu_addr || ret) {
+ drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_kernel_buffer_alloc);
+
+/**
+ * isp_kernel_buffer_free - free isp kernel buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ * @gpu_addr: GPU addr of isp kernel BO
+ * @cpu_addr: CPU addr of isp kernel BO
+ *
+ * unmaps and unpin a isp kernel BO.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
+ */
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+
+ amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
+}
+EXPORT_SYMBOL(isp_kernel_buffer_free);
+
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index b03664c66dd6..d6f4ffa4c97c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -28,16 +28,13 @@
#ifndef __AMDGPU_ISP_H__
#define __AMDGPU_ISP_H__
+#include <drm/amd/isp.h>
+#include <linux/pm_domain.h>
+
#define ISP_REGS_OFFSET_END 0x629A4
struct amdgpu_isp;
-struct isp_platform_data {
- void *adev;
- u32 asic_type;
- resource_size_t base_rmmio_size;
-};
-
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
@@ -50,9 +47,11 @@ struct amdgpu_isp {
struct mfd_cell *isp_cell;
struct resource *isp_res;
struct resource *isp_i2c_res;
+ struct resource *isp_gpio_res;
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
+ struct generic_pm_domain ispgpd;
};
extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 100f04475943..0a0dcbf0798d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,10 +89,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
- struct amdgpu_task_info *ti;
+ struct drm_wedge_task_info *info = NULL;
+ struct amdgpu_task_info *ti = NULL;
struct amdgpu_device *adev = ring->adev;
- int idx;
- int r;
+ int idx, r;
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
@@ -112,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_job_core_dump(adev, job);
if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
s_job->sched->name);
@@ -124,36 +125,30 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
if (ti) {
- dev_err(adev->dev,
- "Process information: process %s pid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
- amdgpu_vm_put_task_info(ti);
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
}
- dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
-
/* attempt a per ring reset */
if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
ring->funcs->reset) {
- dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name);
- /* stop the scheduler, but don't mess with the
- * bad job yet because if ring reset fails
- * we'll fall back to full GPU reset.
- */
- drm_sched_wqueue_stop(&ring->sched);
- r = amdgpu_ring_reset(ring, job->vmid);
+ dev_err(adev->dev, "Starting %s ring reset\n",
+ s_job->sched->name);
+ r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
if (!r) {
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_stop(&ring->sched, s_job);
atomic_inc(&ring->adev->gpu_reset_counter);
- amdgpu_fence_driver_force_completion(ring);
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_start(&ring->sched, 0);
+ dev_err(adev->dev, "Ring %s reset succeeded\n",
+ ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev),
+ DRM_WEDGE_RECOVERY_NONE, info);
goto exit;
}
- dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
+ dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
}
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
@@ -179,14 +174,19 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
+ amdgpu_vm_put_task_info(ti);
drm_dev_exit(idx);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job)
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
{
+ struct amdgpu_fence *af;
+ int r;
+
if (num_ibs == 0)
return -EINVAL;
@@ -194,6 +194,20 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_job;
+ }
+ (*job)->hw_fence = af;
+
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_fence;
+ }
+ (*job)->hw_vm_fence = af;
+
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -203,17 +217,27 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, 1, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
+
+err_fence:
+ kfree((*job)->hw_fence);
+err_job:
+ kfree(*job);
+ *job = NULL;
+
+ return r;
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job)
+ struct amdgpu_job **job, u64 k_job_id)
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+ k_job_id);
if (r)
return r;
@@ -222,7 +246,10 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
if (r) {
if (entity)
drm_sched_job_cleanup(&(*job)->base);
+ kfree((*job)->hw_vm_fence);
+ kfree((*job)->hw_fence);
kfree(*job);
+ *job = NULL;
}
return r;
@@ -250,11 +277,11 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
struct dma_fence *f;
unsigned i;
- /* Check if any fences where initialized */
+ /* Check if any fences were initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.ops)
- f = &job->hw_fence;
+ else if (job->hw_fence && job->hw_fence->base.ops)
+ f = &job->hw_fence->base;
else
f = NULL;
@@ -270,11 +297,16 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
- /* only put the hw fence if has embedded fence */
- if (!job->hw_fence.ops)
- kfree(job);
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
else
- dma_fence_put(&job->hw_fence);
+ kfree(job->hw_vm_fence);
+
+ kfree(job);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -303,10 +335,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.ops)
- kfree(job);
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
else
- dma_fence_put(&job->hw_fence);
+ kfree(job->hw_vm_fence);
+
+ kfree(job);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
@@ -342,32 +380,33 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
int r;
r = drm_sched_entity_error(s_entity);
if (r)
goto error;
- if (job->gang_submit)
+ if (job->gang_submit) {
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+ if (fence)
+ return fence;
+ }
- if (!fence && job->vm && !job->vmid) {
+ fence = amdgpu_device_enforce_isolation(ring->adev, ring, job);
+ if (fence)
+ return fence;
+
+ if (job->vm && !job->vmid) {
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
if (r) {
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
- /*
- * The VM structure might be released after the VMID is
- * assigned, we had multiple problems with people trying to use
- * the VM pointer so better set it to NULL.
- */
- if (!fence)
- job->vm = NULL;
+ return fence;
}
- return fence;
+ return NULL;
error:
dma_fence_set_error(&job->base.s_fence->finished, r);
@@ -411,8 +450,24 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
return fence;
}
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
+/*
+ * This is a duplicate function from DRM scheduler sched_internal.h.
+ * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due
+ * latter being incorrect and racy.
+ *
+ * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/
+ */
+static struct drm_sched_job *
+drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_pop(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
{
@@ -425,7 +480,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
- while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_signal(&s_fence->scheduled);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index ce6b9ba967ff..7abf069d17d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -44,11 +44,28 @@
struct amdgpu_fence;
enum amdgpu_ib_pool_type;
+/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL)
+#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL)
+#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL)
+#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL)
+#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL)
+
struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct dma_fence hw_fence;
+ struct amdgpu_fence *hw_fence;
+ struct amdgpu_fence *hw_vm_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
@@ -78,6 +95,7 @@ struct amdgpu_job {
/* enforce isolation */
bool enforce_isolation;
+ bool run_cleaner_shader;
uint32_t num_ibs;
struct amdgpu_ib ibs[];
@@ -90,11 +108,13 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job);
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job);
+ struct amdgpu_job **job,
+ u64 k_job_id);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index b6d2eb049f54..63ee6ba6a931 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -33,6 +33,7 @@
#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000)
static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
+static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
{
@@ -85,6 +86,9 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
}
+ if (adev->jpeg.reg_list)
+ amdgpu_jpeg_reg_dump_fini(adev);
+
mutex_destroy(&adev->jpeg.jpeg_pg_lock);
return 0;
@@ -117,10 +121,12 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
}
- if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
+ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) {
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
AMD_PG_STATE_GATE);
- else
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+ } else
schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
}
@@ -190,7 +196,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -364,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j)))
ring->sched.ready = true;
else
ring->sched.ready = false;
@@ -452,3 +459,149 @@ void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask);
}
}
+
+int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->jpeg.ip_dump) {
+ dev_err(adev->dev,
+ "Failed to allocate memory for JPEG IP Dump\n");
+ return -ENOMEM;
+ }
+ adev->jpeg.reg_list = reg;
+ adev->jpeg.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->jpeg.ip_dump);
+ adev->jpeg.reg_list = NULL;
+ adev->jpeg.reg_count = 0;
+}
+
+void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 inst_off, inst_id, is_powered;
+ int i, j;
+
+ if (!adev->jpeg.ip_dump)
+ return;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ inst_id = GET_INST(JPEG, i);
+ inst_off = i * adev->jpeg.reg_count;
+ /* check power status from UVD_JPEG_POWER_STATUS */
+ adev->jpeg.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[0],
+ inst_id));
+ is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
+
+ if (is_powered)
+ for (j = 1; j < adev->jpeg.reg_count; j++)
+ adev->jpeg.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[j],
+ inst_id));
+ }
+}
+
+void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 inst_off, is_powered;
+ int i, j;
+
+ if (!adev->jpeg.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->jpeg.num_jpeg_inst);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:JPEG%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->jpeg.reg_count;
+ is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
+
+ if (is_powered) {
+ drm_printf(p, "Active Instance:JPEG%d\n", i);
+ for (j = 0; j < adev->jpeg.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->jpeg.reg_list[j].reg_name,
+ adev->jpeg.ip_dump[inst_off + j]);
+ } else
+ drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
+ }
+}
+
+static inline bool amdgpu_jpeg_reg_valid(u32 reg)
+{
+ if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
+ (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
+ return false;
+ else
+ return true;
+}
+
+/**
+ * amdgpu_jpeg_dec_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index d9cb343a8708..346ae0ab09d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -25,11 +25,18 @@
#define __AMDGPU_JPEG_H__
#include "amdgpu_ras.h"
+#include "amdgpu_cs.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
#define AMDGPU_MAX_JPEG_RINGS 10
#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+#define JPEG_ATOMIC_RANGE_START 0x4120
+#define JPEG_ATOMIC_RANGE_END 0x412A
+
+
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
@@ -92,6 +99,14 @@
*adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = value; \
} while (0)
+struct amdgpu_hwip_reg_entry;
+
+enum amdgpu_jpeg_caps {
+ AMDGPU_JPEG_RRMT_ENABLED,
+};
+
+#define AMDGPU_JPEG_CAPS(caps) BIT(AMDGPU_JPEG_##caps)
+
struct amdgpu_jpeg_reg{
unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
};
@@ -130,6 +145,10 @@ struct amdgpu_jpeg {
uint8_t num_inst_per_aid;
bool indirect_sram;
uint32_t supported_reset;
+ uint32_t caps;
+ u32 *ip_dump;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
@@ -154,5 +173,12 @@ int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev);
int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 016a6f6c4267..6ee77f431d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -45,6 +45,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amd_pcie.h"
+#include "amdgpu_userq.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -90,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
return;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
amdgpu_acpi_fini(adev);
@@ -160,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
@@ -370,6 +371,26 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
return 0;
}
+static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_uq_metadata_gfx *meta)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow = {};
+
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true);
+ meta->shadow_size = shadow.shadow_size;
+ meta->shadow_alignment = shadow.shadow_alignment;
+ meta->csa_size = shadow.csa_size;
+ meta->csa_alignment = shadow.csa_alignment;
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
struct drm_amdgpu_info *info,
struct drm_amdgpu_info_hw_ip *result)
@@ -378,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
uint32_t ib_size_alignment = 0;
enum amd_ip_block_type type;
unsigned int num_rings = 0;
+ uint32_t num_slots = 0;
unsigned int i, j;
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
@@ -387,24 +409,45 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].sched.ready)
+ if (adev->gfx.gfx_ring[i].sched.ready &&
+ !adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
+ num_slots += hweight32(adev->mes.gfx_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].sched.ready)
+ if (adev->gfx.compute_ring[i].sched.ready &&
+ !adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->sdma.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++)
+ num_slots += hweight32(adev->mes.compute_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.sched.ready)
+ if (adev->sdma.instance[i].ring.sched.ready &&
+ !adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++)
+ num_slots += hweight32(adev->mes.sdma_hqd_mask[i]);
+ }
+
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
@@ -414,7 +457,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.sched.ready)
+ if (adev->uvd.inst[i].ring.sched.ready &&
+ !adev->uvd.inst[i].ring.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -423,7 +467,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].sched.ready)
+ if (adev->vce.ring[i].sched.ready &&
+ !adev->vce.ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -435,7 +480,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].sched.ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
+ !adev->uvd.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -447,7 +493,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->vcn.inst[i].ring_dec.sched.ready)
+ if (adev->vcn.inst[i].ring_dec.sched.ready &&
+ !adev->vcn.inst[i].ring_dec.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -459,8 +506,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- for (j = 0; j < adev->vcn.num_enc_rings; j++)
- if (adev->vcn.inst[i].ring_enc[j].sched.ready)
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
+ !adev->vcn.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -475,7 +523,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
- if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
+ if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
+ !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -483,7 +532,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VPE:
type = AMD_IP_BLOCK_TYPE_VPE;
- if (adev->vpe.ring.sched.ready)
+ if (adev->vpe.ring.sched.ready &&
+ !adev->vpe.ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -539,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
}
result->capabilities_flags = 0;
result->available_rings = (1 << num_rings) - 1;
+ result->userq_num_slots = num_slots;
result->ib_start_alignment = ib_start_alignment;
result->ib_size_alignment = ib_size_alignment;
return 0;
@@ -707,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
+ ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0;
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
@@ -753,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
- mem.vram.heap_usage =
- ttm_resource_manager_usage(vram_man);
+ mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(vram_man) : 0;
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -846,7 +898,7 @@ out:
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
uint64_t vm_size;
- uint32_t pcie_gen_mask;
+ uint32_t pcie_gen_mask, pcie_width_mask;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info)
@@ -888,6 +940,19 @@ out:
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
+ /* Gang submit is not supported under SRIOV currently */
+ if (!amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT;
+
+ if (amdgpu_passthrough(adev))
+ dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
+ AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+ AMDGPU_IDS_FLAGS_MODE_MASK;
+ else if (amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_VF <<
+ AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+ AMDGPU_IDS_FLAGS_MODE_MASK;
+
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_TOP;
@@ -934,15 +999,18 @@ out:
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
/* Combine the chip gen mask with the platform (CPU/mobo) mask. */
- pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
+ pcie_gen_mask = adev->pm.pcie_gen_mask &
+ (adev->pm.pcie_gen_mask >> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT);
+ pcie_width_mask = adev->pm.pcie_mlw_mask &
+ (adev->pm.pcie_mlw_mask >> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT);
dev_info->pcie_gen = fls(pcie_gen_mask);
dev_info->pcie_num_lanes =
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
@@ -966,6 +1034,8 @@ out:
}
}
+ dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1281,6 +1351,22 @@ out:
return copy_to_user(out, &gpuvm_fault,
min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_UQ_FW_AREAS: {
+ struct drm_amdgpu_info_uq_metadata meta_info = {};
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_GFX:
+ ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(out, &meta_info,
+ min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1334,13 +1420,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (r)
goto error_pasid;
- r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
- if (r)
- goto error_pasid;
+ amdgpu_debugfs_vm_init(file_priv);
- r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
+ r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);
if (r)
- goto error_vm;
+ goto error_pasid;
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
@@ -1364,6 +1448,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
+ r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
+ if (r)
+ DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
+
+ r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
+ if (r)
+ goto error_vm;
+
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
@@ -1373,15 +1465,12 @@ error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
- if (pasid) {
+ if (pasid)
amdgpu_pasid_free(pasid);
- amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
- }
kfree(fpriv);
out_suspend:
- pm_runtime_mark_last_busy(dev->dev);
pm_put:
pm_runtime_put_autosuspend(dev->dev);
@@ -1449,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 32b27a1658e7..9c182ce501af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -39,42 +39,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
PAGE_SIZE);
}
-static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
- int ip_type, uint64_t *doorbell_index)
-{
- unsigned int offset, found;
- struct amdgpu_mes *mes = &adev->mes;
-
- if (ip_type == AMDGPU_RING_TYPE_SDMA)
- offset = adev->doorbell_index.sdma_engine[0];
- else
- offset = 0;
-
- found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
- if (found >= mes->num_mes_dbs) {
- DRM_WARN("No doorbell available\n");
- return -ENOSPC;
- }
-
- set_bit(found, mes->doorbell_bitmap);
-
- /* Get the absolute doorbell index on BAR */
- *doorbell_index = mes->db_start_dw_offset + found * 2;
- return 0;
-}
-
-static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
- uint32_t doorbell_index)
-{
- unsigned int old, rel_index;
- struct amdgpu_mes *mes = &adev->mes;
-
- /* Find the relative index of the doorbell in this object */
- rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
- old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
- WARN_ON(!old);
-}
-
static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
{
int i;
@@ -83,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
/* Bitmap for dynamic allocation of kernel doorbells */
mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
if (!mes->doorbell_bitmap) {
- DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
+ dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
return -ENOMEM;
}
@@ -126,7 +90,7 @@ static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
int amdgpu_mes_init(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, num_pipes;
adev->mes.adev = adev;
@@ -141,28 +105,55 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
spin_lock_init(&adev->mes.ring_lock[i]);
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
- adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = 0xffffff00;
+ adev->mes.vmid_mask_mmhub = 0xFF00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xFFFE : 0xFF00;
+
+ num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
+ if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
+ dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(12, 0, 0))
+ /*
+ * GFX V12 has only one GFX pipe, but 8 queues in it.
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1-7 for MES scheduling
+ * mask = 1111 1110b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
+ else
+ /*
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1 for MES scheduling
+ * mask = 10b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
+ }
+
+ num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
+ if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
+ dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
- /* use only 1st MEC pipes */
- if (i >= adev->gfx.mec.num_pipe_per_mec)
- continue;
- adev->mes.compute_hqd_mask[i] = 0xc;
+ if (i >= num_pipes)
+ break;
+ adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
}
- for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
- adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
+ num_pipes = adev->sdma.num_instances;
+ if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
+ dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
- if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
- IP_VERSION(6, 0, 0))
- adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
- /* zero sdma_hqd_mask for non-existent engine */
- else if (adev->sdma.num_instances == 1)
- adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
- else
- adev->mes.sdma_hqd_mask[i] = 0xfc;
+ if (i >= num_pipes)
+ break;
+ adev->mes.sdma_hqd_mask[i] = 0xfc;
}
for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
@@ -200,6 +191,20 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
if (r)
goto error_doorbell;
+ if (adev->mes.hung_queue_db_array_size) {
+ r = amdgpu_bo_create_kernel(adev,
+ adev->mes.hung_queue_db_array_size * sizeof(u32),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
+ goto error_doorbell;
+ }
+ }
+
return 0;
error_doorbell:
@@ -225,6 +230,10 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
{
int i;
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
@@ -246,244 +255,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->mes.mutex_hidden);
}
-static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-{
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
-}
-
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm)
-{
- struct amdgpu_mes_process *process;
- int r;
-
- /* allocate the mes process buffer */
- process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
- if (!process) {
- DRM_ERROR("no more memory to create mes process\n");
- return -ENOMEM;
- }
-
- /* allocate the process context bo and map it */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_memory;
- }
- memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* add the mes process to idr list */
- r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to lock pasid=%d\n", pasid);
- goto clean_up_ctx;
- }
-
- INIT_LIST_HEAD(&process->gang_list);
- process->vm = vm;
- process->pasid = pasid;
- process->process_quantum = adev->mes.default_process_quantum;
- process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
-clean_up_memory:
- kfree(process);
- return r;
-}
-
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang, *tmp1;
- struct amdgpu_mes_queue *queue, *tmp2;
- struct mes_remove_queue_input queue_input;
- unsigned long flags;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_WARN("pasid %d doesn't exist\n", pasid);
- amdgpu_mes_unlock(&adev->mes);
- return;
- }
-
- /* Remove all queues from hardware */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes,
- &queue_input);
- if (r)
- DRM_WARN("failed to remove hardware queue\n");
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- }
-
- idr_remove(&adev->mes.pasid_idr, pasid);
- amdgpu_mes_unlock(&adev->mes);
-
- /* free all memory allocated by the process */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- /* free all queues in the gang */
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- amdgpu_mes_queue_free_mqd(queue);
- list_del(&queue->list);
- kfree(queue);
- }
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- list_del(&gang->list);
- kfree(gang);
-
- }
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- kfree(process);
-}
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
- int r;
-
- /* allocate the mes gang buffer */
- gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
- if (!gang) {
- return -ENOMEM;
- }
-
- /* allocate the gang context bo and map it to cpu space */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_mem;
- }
- memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_ERROR("pasid %d doesn't exist\n", pasid);
- r = -EINVAL;
- goto clean_up_ctx;
- }
-
- /* add the mes gang to idr list */
- r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to allocate idr for gang\n");
- goto clean_up_ctx;
- }
-
- gang->gang_id = r;
- *gang_id = r;
-
- INIT_LIST_HEAD(&gang->queue_list);
- gang->process = process;
- gang->priority = gprops->priority;
- gang->gang_quantum = gprops->gang_quantum ?
- gprops->gang_quantum : adev->mes.default_gang_quantum;
- gang->global_priority_level = gprops->global_priority_level;
- gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
- list_add_tail(&gang->list, &process->gang_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-clean_up_mem:
- kfree(gang);
- return r;
-}
-
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
-{
- struct amdgpu_mes_gang *gang;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
-
- if (!list_empty(&gang->queue_list)) {
- DRM_ERROR("queue list is not empty\n");
- amdgpu_mes_unlock(&adev->mes);
- return -EBUSY;
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- list_del(&gang->list);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-
- kfree(gang);
-
- return 0;
-}
-
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
struct mes_suspend_gang_input input;
@@ -503,7 +274,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev)
r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to suspend all gangs");
+ dev_err(adev->dev, "failed to suspend all gangs");
return r;
}
@@ -527,306 +298,8 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to resume all gangs");
-
- return r;
-}
-
-static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- u32 mqd_size = mqd_mgr->mqd_size;
- int r;
-
- r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &q->mqd_obj,
- &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
- if (r) {
- dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
- return r;
- }
- memset(q->mqd_cpu_ptr, 0, mqd_size);
-
- r = amdgpu_bo_reserve(q->mqd_obj, false);
- if (unlikely(r != 0))
- goto clean_up;
-
- return 0;
-
-clean_up:
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
- return r;
-}
-
-static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- struct amdgpu_mqd_prop mqd_prop = {0};
-
- mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
- mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
- mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
- mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
- mqd_prop.queue_size = p->queue_size;
- mqd_prop.use_doorbell = true;
- mqd_prop.doorbell_index = p->doorbell_off;
- mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
- mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
- mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
- mqd_prop.hqd_active = false;
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- mutex_lock(&adev->srbm_mutex);
- amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
- }
-
- mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-
- amdgpu_bo_unreserve(q->mqd_obj);
-}
-
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id)
-{
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_add_queue_input queue_input;
- unsigned long flags;
- int r;
-
- memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
-
- /* allocate the mes queue buffer */
- queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
- if (!queue) {
- DRM_ERROR("Failed to allocate memory for queue\n");
- return -ENOMEM;
- }
-
- /* Allocate the queue mqd */
- r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
- if (r)
- goto clean_up_memory;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- r = -EINVAL;
- goto clean_up_mqd;
- }
-
- /* add the mes gang to idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
- GFP_ATOMIC);
- if (r < 0) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- goto clean_up_mqd;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- *queue_id = queue->queue_id = r;
-
- /* allocate a doorbell index for the queue */
- r = amdgpu_mes_kernel_doorbell_get(adev,
- qprops->queue_type,
- &qprops->doorbell_off);
- if (r)
- goto clean_up_queue_id;
-
- /* initialize the queue mqd */
- amdgpu_mes_queue_init_mqd(adev, queue, qprops);
-
- /* add hw queue to mes */
- queue_input.process_id = gang->process->pasid;
-
- queue_input.page_table_base_addr =
- adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
- adev->gmc.vram_start;
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end =
- (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
- queue_input.process_quantum = gang->process->process_quantum;
- queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
- queue_input.gang_quantum = gang->gang_quantum;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
- queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
- queue_input.gang_global_priority_level = gang->global_priority_level;
- queue_input.doorbell_offset = qprops->doorbell_off;
- queue_input.mqd_addr = queue->mqd_gpu_addr;
- queue_input.wptr_addr = qprops->wptr_gpu_addr;
- queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
- queue_input.queue_type = qprops->queue_type;
- queue_input.paging = qprops->paging;
- queue_input.is_kfd_process = 0;
-
- r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
- if (r) {
- DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
- qprops->doorbell_off);
- goto clean_up_doorbell;
- }
-
- DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
- "queue type=%d, doorbell=0x%llx\n",
- gang->process->pasid, gang_id, qprops->queue_type,
- qprops->doorbell_off);
-
- queue->ring = qprops->ring;
- queue->doorbell_off = qprops->doorbell_off;
- queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
- queue->queue_type = qprops->queue_type;
- queue->paging = qprops->paging;
- queue->gang = gang;
- queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
- list_add_tail(&queue->list, &gang->queue_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_doorbell:
- amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
-clean_up_queue_id:
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-clean_up_mqd:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_mes_queue_free_mqd(queue);
-clean_up_memory:
- kfree(queue);
- return r;
-}
-
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_remove_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
-
- idr_remove(&adev->mes.queue_id_idr, queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
- queue_id);
-
- list_del(&queue->list);
- amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_mes_queue_free_mqd(queue);
- kfree(queue);
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_reset_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
- queue_id);
+ dev_err(adev->dev, "failed to resume all gangs");
- amdgpu_mes_unlock(&adev->mes);
-
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid)
-{
- struct mes_reset_queue_input queue_input;
- int r;
-
- queue_input.queue_type = queue_type;
- queue_input.use_mmio = true;
- queue_input.me_id = me_id;
- queue_input.pipe_id = pipe_id;
- queue_input.queue_id = queue_id;
- queue_input.vmid = vmid;
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
- queue_id);
return r;
}
@@ -845,9 +318,11 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
queue_input.wptr_addr = ring->wptr_gpu_addr;
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to map legacy queue\n");
+ dev_err(adev->dev, "failed to map legacy queue\n");
return r;
}
@@ -868,9 +343,11 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
queue_input.trail_fence_addr = gpu_addr;
queue_input.trail_fence_data = seq;
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to unmap legacy queue\n");
+ dev_err(adev->dev, "failed to unmap legacy queue\n");
return r;
}
@@ -880,7 +357,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio)
{
- struct mes_reset_legacy_queue_input queue_input;
+ struct mes_reset_queue_input queue_input;
int r;
memset(&queue_input, 0, sizeof(queue_input));
@@ -894,10 +371,67 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
queue_input.use_mmio = use_mmio;
+ queue_input.is_kq = true;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ queue_input.legacy_gfx = true;
- r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reset legacy queue\n");
+ dev_err(adev->dev, "failed to reset legacy queue\n");
+
+ return r;
+}
+
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
+{
+ return adev->mes.hung_queue_db_array_size;
+}
+
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array)
+
+{
+ struct mes_detect_and_reset_queue_input input;
+ u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
+ int r, i;
+
+ if (!hung_db_num || !hung_db_array)
+ return -EINVAL;
+
+ if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
+ (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
+ (queue_type != AMDGPU_RING_TYPE_SDMA))
+ return -EINVAL;
+
+ /* Clear the doorbell array before detection */
+ memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,
+ adev->mes.hung_queue_db_array_size * sizeof(u32));
+ input.queue_type = queue_type;
+ input.detect_only = detect_only;
+
+ r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
+ &input);
+ if (r) {
+ dev_err(adev->dev, "failed to detect and reset\n");
+ } else {
+ *hung_db_num = 0;
+ for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {
+ if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
+ hung_db_array[i] = db_array[i];
+ *hung_db_num += 1;
+ }
+ }
+
+ /*
+ * TODO: return HQD info for MES scheduled user compute queue reset cases
+ * stored in hung_db_array hqd info offset to full array size
+ */
+ }
return r;
}
@@ -911,7 +445,7 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
uint32_t *read_val_ptr;
if (amdgpu_device_wb_get(adev, &addr_offset)) {
- DRM_ERROR("critical bug! too many mes readers\n");
+ dev_err(adev->dev, "critical bug! too many mes readers\n");
goto error;
}
read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
@@ -921,13 +455,15 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
op_input.read_reg.buffer_addr = read_val_gpu_addr;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes rreg is not supported!\n");
+ dev_err(adev->dev, "mes rreg is not supported!\n");
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to read reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
else
val = *(read_val_ptr);
@@ -948,14 +484,16 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
op_input.write_reg.reg_value = val;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes wreg is not supported!\n");
+ dev_err(adev->dev, "mes wreg is not supported!\n");
r = -EINVAL;
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to write reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
error:
return r;
@@ -975,42 +513,31 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
+ dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
r = -EINVAL;
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
}
-int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
- uint32_t val, uint32_t mask)
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
{
- struct mes_misc_op_input op_input;
- int r;
-
- op_input.op = MES_MISC_OP_WRM_REG_WAIT;
- op_input.wrm_reg.reg0 = reg;
- op_input.wrm_reg.ref = val;
- op_input.wrm_reg.mask = mask;
-
- if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg wait is not supported!\n");
- r = -EINVAL;
- goto error;
- }
+ uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
- r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
- if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
+ hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev);
+ ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
-error:
- return r;
+ return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
+ ref_and_mask, ref_and_mask);
}
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
@@ -1024,7 +551,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes set shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes set shader debugger is not supported!\n");
return -EINVAL;
}
@@ -1048,7 +576,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
@@ -1062,7 +590,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes flush shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes flush shader debugger is not supported!\n");
return -EINVAL;
}
@@ -1074,515 +603,19 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
return r;
}
-static void
-amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_mes_queue_properties *props)
-{
- props->queue_type = ring->funcs->type;
- props->hqd_base_gpu_addr = ring->gpu_addr;
- props->rptr_gpu_addr = ring->rptr_gpu_addr;
- props->wptr_gpu_addr = ring->wptr_gpu_addr;
- props->wptr_mc_addr =
- ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
- props->queue_size = ring->ring_size;
- props->eop_gpu_addr = ring->eop_gpu_addr;
- props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
- props->paging = false;
- props->ring = ring;
-}
-
-#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
-do { \
- if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].slots[id_offs]); \
- else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].ring); \
- else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].ib); \
- else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].padding); \
-} while(0)
-
-int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
-{
- switch (ring->funcs->type) {
- case AMDGPU_RING_TYPE_GFX:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
- break;
- case AMDGPU_RING_TYPE_SDMA:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
- break;
- default:
- break;
- }
-
- WARN_ON(1);
- return -EINVAL;
-}
-
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang *gang;
- struct amdgpu_mes_queue_properties qprops = {0};
- int r, queue_id, pasid;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
- pasid = gang->process->pasid;
-
- ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
- if (!ring) {
- amdgpu_mes_unlock(&adev->mes);
- return -ENOMEM;
- }
-
- ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->is_mes_queue = true;
- ring->mes_ctx = ctx_data;
- ring->idx = idx;
- ring->no_scheduler = true;
-
- if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- compute[ring->idx].mec_hpd);
- ring->eop_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- }
-
- switch (queue_type) {
- case AMDGPU_RING_TYPE_GFX:
- ring->funcs = adev->gfx.gfx_ring[0].funcs;
- ring->me = adev->gfx.gfx_ring[0].me;
- ring->pipe = adev->gfx.gfx_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- ring->funcs = adev->gfx.compute_ring[0].funcs;
- ring->me = adev->gfx.compute_ring[0].me;
- ring->pipe = adev->gfx.compute_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ring->funcs = adev->sdma.instance[0].ring.funcs;
- break;
- default:
- BUG();
- }
-
- r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r) {
- amdgpu_mes_unlock(&adev->mes);
- goto clean_up_memory;
- }
-
- amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
-
- dma_fence_wait(gang->process->vm->last_update, false);
- dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
- amdgpu_mes_unlock(&adev->mes);
-
- r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
- if (r)
- goto clean_up_ring;
-
- ring->hw_queue_id = queue_id;
- ring->doorbell_index = qprops.doorbell_off;
-
- if (queue_type == AMDGPU_RING_TYPE_GFX)
- sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
- sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
- queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_SDMA)
- sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
- queue_id);
- else
- BUG();
-
- *out = ring;
- return 0;
-
-clean_up_ring:
- amdgpu_ring_fini(ring);
-clean_up_memory:
- kfree(ring);
- return r;
-}
-
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring)
- return;
-
- amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
- del_timer_sync(&ring->fence_drv.fallback_timer);
- amdgpu_ring_fini(ring);
- kfree(ring);
-}
-
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio)
{
return adev->mes.aggregated_doorbells[prio];
}
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- int r;
-
- r = amdgpu_bo_create_kernel(adev,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
- return r;
- }
-
- if (!ctx_data->meta_data_obj)
- return -ENOMEM;
-
- memset(ctx_data->meta_data_ptr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data));
-
- return 0;
-}
-
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
-{
- if (ctx_data->meta_data_obj)
- amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
-}
-
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
- if (!bo_va) {
- DRM_ERROR("failed to create bo_va for meta data BO\n");
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
- goto error_del_bo_va;
- }
-
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- DRM_ERROR("failed to do vm_bo_update on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, bo_va->last_pt_update);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r) {
- DRM_ERROR("failed to update pdes on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, vm->last_update);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
- ctx_data->meta_data_va = bo_va;
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, bo_va);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
- struct amdgpu_bo *bo = ctx_data->meta_data_obj;
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct dma_fence *fence;
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
- amdgpu_vm_bo_del(adev, bo_va);
- if (!amdgpu_vm_ready(vm))
- goto out_unlock;
-
- r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
- &fence);
- if (r)
- goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- fence = NULL;
- }
-
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (r || !fence)
- goto out_unlock;
-
- dma_fence_wait(fence, false);
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
-out_unlock:
- if (unlikely(r < 0))
- dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
- int pasid, int *gang_id,
- int queue_type, int num_queue,
- struct amdgpu_ring **added_rings,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang_properties gprops = {0};
- int r, j;
-
- /* create a gang for the process */
- gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.gang_quantum = adev->mes.default_gang_quantum;
- gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
-
- r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
- if (r) {
- DRM_ERROR("failed to add gang\n");
- return r;
- }
-
- /* create queues for the gang */
- for (j = 0; j < num_queue; j++) {
- r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
- ctx_data, &ring);
- if (r) {
- DRM_ERROR("failed to add ring\n");
- break;
- }
-
- DRM_INFO("ring %s was added\n", ring->name);
- added_rings[j] = ring;
- }
-
- return 0;
-}
-
-static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
-{
- struct amdgpu_ring *ring;
- int i, r;
-
- for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
- ring = added_rings[i];
- if (!ring)
- continue;
-
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
-
- r = amdgpu_ring_test_ib(ring, 1000 * 10);
- if (r) {
- DRM_DEV_ERROR(ring->adev->dev,
- "ring %s ib test failed (%d)\n",
- ring->name, r);
- return r;
- } else
- DRM_INFO("ring %s ib test pass\n", ring->name);
- }
-
- return 0;
-}
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev)
-{
- struct amdgpu_vm *vm = NULL;
- struct amdgpu_mes_ctx_data ctx_data = {0};
- struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
- int gang_ids[3] = {0};
- int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
- { AMDGPU_RING_TYPE_COMPUTE, 1 },
- { AMDGPU_RING_TYPE_SDMA, 1} };
- int i, r, pasid, k = 0;
-
- pasid = amdgpu_pasid_alloc(16);
- if (pasid < 0) {
- dev_warn(adev->dev, "No more PASIDs available!");
- pasid = 0;
- }
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm) {
- r = -ENOMEM;
- goto error_pasid;
- }
-
- r = amdgpu_vm_init(adev, vm, -1);
- if (r) {
- DRM_ERROR("failed to initialize vm\n");
- goto error_pasid;
- }
-
- r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
- if (r) {
- DRM_ERROR("failed to alloc ctx meta data\n");
- goto error_fini;
- }
-
- ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
- if (r) {
- DRM_ERROR("failed to map ctx meta data\n");
- goto error_vm;
- }
-
- r = amdgpu_mes_create_process(adev, pasid, vm);
- if (r) {
- DRM_ERROR("failed to create MES process\n");
- goto error_vm;
- }
-
- for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
- /* On GFX v10.3, fw hasn't supported to map sdma queue. */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
- IP_VERSION(10, 3, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) <
- IP_VERSION(11, 0, 0) &&
- queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
- continue;
-
- r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
- &gang_ids[i],
- queue_types[i][0],
- queue_types[i][1],
- &added_rings[k],
- &ctx_data);
- if (r)
- goto error_queues;
-
- k += queue_types[i][1];
- }
-
- /* start ring test and ib test for MES queues */
- amdgpu_mes_test_queues(added_rings);
-
-error_queues:
- /* remove all queues */
- for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
- if (!added_rings[i])
- continue;
- amdgpu_mes_remove_ring(adev, added_rings[i]);
- }
-
- for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
- if (!gang_ids[i])
- continue;
- amdgpu_mes_remove_gang(adev, gang_ids[i]);
- }
-
- amdgpu_mes_destroy_process(adev, pasid);
-
-error_vm:
- amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
-
-error_fini:
- amdgpu_vm_fini(adev, vm);
-
-error_pasid:
- if (pasid)
- amdgpu_pasid_free(pasid);
-
- amdgpu_mes_ctx_free_meta_data(&ctx_data);
- kfree(vm);
- return 0;
-}
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
{
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -1670,18 +703,16 @@ out:
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
{
uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
- bool is_supported = false;
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
- mes_rev >= 0x63)
- is_supported = true;
-
- return is_supported;
+ return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
+ mes_rev >= 0x63) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
+static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
+ uint32_t node_id, bool enable)
{
struct mes_misc_op_input op_input = {0};
int r;
@@ -1695,7 +726,9 @@ int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_i
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
dev_err(adev->dev, "failed to change_config.\n");
@@ -1703,6 +736,23 @@ error:
return r;
}
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
+ else
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
+ return r;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c6f93cbd6739..e989225b354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -40,7 +40,8 @@
#define AMDGPU_MES_VERSION_MASK 0x00000fff
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
-#define AMDGPU_MES_MSCRATCH_SIZE 0x8000
+#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
+#define AMDGPU_MES_INVALID_DB_OFFSET 0xffffffff
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
@@ -56,7 +57,7 @@ enum amdgpu_mes_priority_level {
struct amdgpu_mes_funcs;
-enum admgpu_mes_pipe {
+enum amdgpu_mes_pipe {
AMDGPU_MES_SCHED_PIPE = 0,
AMDGPU_MES_KIQ_PIPE,
AMDGPU_MAX_MES_PIPES = 2,
@@ -111,8 +112,8 @@ struct amdgpu_mes {
uint32_t vmid_mask_gfxhub;
uint32_t vmid_mask_mmhub;
- uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
+ uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
@@ -143,23 +144,15 @@ struct amdgpu_mes {
const struct amdgpu_mes_funcs *funcs;
/* mes resource_1 bo*/
- struct amdgpu_bo *resource_1;
- uint64_t resource_1_gpu_addr;
- void *resource_1_addr;
-
-};
-
-struct amdgpu_mes_process {
- int pasid;
- struct amdgpu_vm *vm;
- uint64_t pd_gpu_addr;
- struct amdgpu_bo *proc_ctx_bo;
- uint64_t proc_ctx_gpu_addr;
- void *proc_ctx_cpu_ptr;
- uint64_t process_quantum;
- struct list_head gang_list;
- uint32_t doorbell_index;
- struct mutex doorbell_lock;
+ struct amdgpu_bo *resource_1[AMDGPU_MAX_MES_PIPES];
+ uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
+
+ int hung_queue_db_array_size;
+ int hung_queue_hqd_info_offset;
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj;
+ uint64_t hung_queue_db_array_gpu_addr;
+ void *hung_queue_db_array_cpu_addr;
};
struct amdgpu_mes_gang {
@@ -246,18 +239,7 @@ struct mes_add_queue_input {
struct mes_remove_queue_input {
uint32_t doorbell_offset;
uint64_t gang_context_addr;
-};
-
-struct mes_reset_queue_input {
- uint32_t doorbell_offset;
- uint64_t gang_context_addr;
- bool use_mmio;
- uint32_t queue_type;
- uint32_t me_id;
- uint32_t pipe_id;
- uint32_t queue_id;
- uint32_t xcc_id;
- uint32_t vmid;
+ bool remove_queue_after_reset;
};
struct mes_map_legacy_queue_input {
@@ -291,7 +273,7 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
-struct mes_reset_legacy_queue_input {
+struct mes_reset_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
bool use_mmio;
@@ -301,6 +283,20 @@ struct mes_reset_legacy_queue_input {
uint64_t mqd_addr;
uint64_t wptr_addr;
uint32_t vmid;
+ bool legacy_gfx;
+ bool is_kq;
+};
+
+struct mes_detect_and_reset_queue_input {
+ uint32_t queue_type;
+ bool detect_only;
+};
+
+struct mes_inv_tlbs_pasid_input {
+ uint32_t xcc_id;
+ uint16_t pasid;
+ uint8_t hub_id;
+ uint8_t flush_type;
};
enum mes_misc_opcode {
@@ -388,42 +384,27 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
- int (*reset_legacy_queue)(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input);
-
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
+
+ int (*detect_and_reset_hung_queues)(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input);
+
+
+ int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
-int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm);
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id);
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
-
int amdgpu_mes_suspend(struct amdgpu_device *adev);
int amdgpu_mes_resume(struct amdgpu_device *adev);
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id);
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid);
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
@@ -435,14 +416,20 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio);
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev);
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array);
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
-int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
- uint32_t val, uint32_t mask);
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev);
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
@@ -451,27 +438,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
bool trap_en);
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr);
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out);
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio);
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev);
-
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
/*
@@ -534,6 +504,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 5e3faefc5510..dc8d2f52c7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -326,6 +326,8 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
const struct drm_edid *bios_hardcoded_edid;
@@ -496,8 +498,6 @@ struct amdgpu_crtc {
struct drm_connector *connector;
/* for dpm */
u32 line_time;
- u32 wm_low;
- u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
/* for virtual dce */
@@ -609,6 +609,7 @@ struct amdgpu_i2c_adapter {
struct i2c_adapter base;
struct ddc_service *ddc_service;
+ bool oem;
};
#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index d085687a47ea..a974265837f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -53,6 +53,16 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
return 0;
}
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
+ !adev->asic_funcs->get_pcie_replay_count ||
+ (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
+ return false;
+
+ return true;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 79c2f807b9fe..b528de6a01f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -119,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 96f4b8904e9a..e08f58de4b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
@@ -62,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
amdgpu_bo_unref(&bo->parent);
@@ -152,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
+ places[c].flags = 0;
+ c++;
+ }
+
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -163,8 +172,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
- if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
- !(adev->flags & AMD_IS_APU))
+ if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
+ domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
@@ -351,7 +360,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
return 0;
}
-EXPORT_SYMBOL(amdgpu_bo_create_kernel);
/**
* amdgpu_bo_create_isp_user - create user BO for isp
@@ -420,7 +428,6 @@ error_unreserve:
return r;
}
-EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
@@ -524,7 +531,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (cpu_addr)
*cpu_addr = NULL;
}
-EXPORT_SYMBOL(amdgpu_bo_free_kernel);
/**
* amdgpu_bo_free_isp_user - free BO for isp use
@@ -547,7 +553,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
}
amdgpu_bo_unref(&bo);
}
-EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
@@ -939,7 +944,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
domain = bo->preferred_domains & domain;
/* A shared bo cannot be migrated to VRAM */
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -967,7 +972,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
*/
domain = amdgpu_bo_get_preferred_domain(adev, domain);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_pin(bo->tbo.base.import_attach);
/* force to pin into visible video ram */
@@ -1018,7 +1023,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.pin_count)
return;
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_unpin(bo->tbo.base.import_attach);
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
@@ -1044,7 +1049,8 @@ static const char * const amdgpu_vram_names[] = {
"GDDR6",
"DDR5",
"LPDDR4",
- "LPDDR5"
+ "LPDDR5",
+ "HBM3E"
};
/**
@@ -1262,7 +1268,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
- if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
@@ -1295,28 +1301,37 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
- /* We only remove the fence if the resv has individualized. */
- WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
- && bo->base.resv != &bo->base._resv);
- if (bo->base.resv == &bo->base._resv)
- amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+ /*
+ * We lock the private dma_resv object here and since the BO is about to
+ * be released nobody else should have a pointer to it.
+ * So when this locking here fails something is wrong with the reference
+ * counting.
+ */
+ if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
+ return;
+
+ amdgpu_amdkfd_remove_all_eviction_fences(abo);
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
- return;
+ goto out;
- if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
- return;
+ r = dma_resv_reserve_fences(&bo->base._resv, 1);
+ if (r)
+ goto out;
- r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
- if (!WARN_ON(r)) {
- amdgpu_vram_mgr_set_cleared(bo->resource);
- amdgpu_bo_fence(abo, fence, false);
- dma_fence_put(fence);
- }
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
+ AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
+ if (WARN_ON(r))
+ goto out;
+
+ amdgpu_vram_mgr_set_cleared(bo->resource);
+ dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
- dma_resv_unlock(bo->base.resv);
+out:
+ dma_resv_unlock(&bo->base._resv);
}
/**
@@ -1464,6 +1479,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
+ * @bo: amdgpu VRAM buffer object for which we query the offset
+ *
+ * Returns:
+ * current FB aperture GPU offset of the object.
+ */
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset, fb_base;
+
+ WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
+
+ fb_base = adev->gmc.fb_start;
+ fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
+ return amdgpu_gmc_sign_extend(offset);
+}
+
+/**
* amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
@@ -1519,6 +1554,8 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
return AMDGPU_PL_OA;
case AMDGPU_GEM_DOMAIN_DOORBELL:
return AMDGPU_PL_DOORBELL;
+ case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
+ return AMDGPU_PL_MMIO_REMAP;
default:
return TTM_PL_SYSTEM;
}
@@ -1602,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
case AMDGPU_PL_DOORBELL:
placement = "DOORBELL";
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ placement = "MMIO REMAP";
+ break;
case TTM_PL_SYSTEM:
default:
placement = "CPU";
@@ -1636,7 +1676,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
-
+ /* Add the gem obj resv fence dump*/
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ dma_resv_describe(bo->tbo.base.resv, m);
+ dma_resv_unlock(bo->tbo.base.resv);
+ }
seq_puts(m, "\n");
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 375448627f7b..52c2d1731aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -69,7 +69,7 @@ struct amdgpu_bo_va_mapping {
uint64_t last;
uint64_t __subtree_last;
uint64_t offset;
- uint64_t flags;
+ uint32_t flags;
};
/* User space allocated BO in a VM */
@@ -96,6 +96,7 @@ struct amdgpu_bo_va {
* if non-zero, cannot unmap from GPU because user queues may still access it
*/
unsigned int queue_refcount;
+ atomic_t userq_va_mapped;
};
struct amdgpu_bo {
@@ -167,6 +168,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_OA;
case AMDGPU_PL_DOORBELL:
return AMDGPU_GEM_DOMAIN_DOORBELL;
+ case AMDGPU_PL_MMIO_REMAP:
+ return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}
@@ -304,6 +307,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3c1b08441a6f..0b10497d487c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -44,7 +44,7 @@
#include "amdgpu_securedisplay.h"
#include "amdgpu_atomfirmware.h"
-#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
+#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
static int psp_load_smu_fw(struct psp_context *psp);
static int psp_rap_terminate(struct psp_context *psp);
@@ -153,6 +153,9 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
ret = psp_init_cap_microcode(psp, ucode_prefix);
break;
+ case IP_VERSION(13, 0, 12):
+ ret = psp_init_ta_microcode(psp, ucode_prefix);
+ break;
default:
return -EINVAL;
}
@@ -193,6 +196,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
@@ -208,11 +212,15 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
psp->boot_time_tmr = false;
fallthrough;
case IP_VERSION(13, 0, 6):
- case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
+ case IP_VERSION(13, 0, 12):
+ psp_v13_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
@@ -244,8 +252,13 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
psp_v14_0_set_psp_funcs(psp);
break;
+ case IP_VERSION(14, 0, 5):
+ psp_v14_0_set_psp_funcs(psp);
+ psp->boot_time_tmr = false;
+ break;
default:
return -EINVAL;
}
@@ -435,7 +448,7 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block)
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd) {
dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
- ret = -ENOMEM;
+ return -ENOMEM;
}
adev->psp.xgmi_context.supports_extended_data =
@@ -533,7 +546,6 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
- struct psp_gfx_cmd_resp *cmd = psp->cmd;
psp_memory_training_fini(psp);
@@ -543,8 +555,8 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_ucode_release(&psp->cap_fw);
amdgpu_ucode_release(&psp->toc_fw);
- kfree(cmd);
- cmd = NULL;
+ kfree(psp->cmd);
+ psp->cmd = NULL;
psp_free_shared_bufs(psp);
@@ -563,9 +575,11 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
-int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t reg_val, uint32_t mask, bool check_changed)
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
+ uint32_t mask, uint32_t flags)
{
+ bool check_changed = flags & PSP_WAITREG_CHANGED;
+ bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
@@ -585,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
udelay(1);
}
+ if (verbose)
+ dev_err(adev->dev,
+ "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
+ reg_index, mask, val, reg_val);
+
return -ETIME;
}
@@ -643,6 +662,14 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "BOOT_CFG";
case GFX_CMD_ID_CONFIG_SQ_PERFMON:
return "CONFIG_SQ_PERFMON";
+ case GFX_CMD_ID_FB_FW_RESERV_ADDR:
+ return "FB_FW_RESERV_ADDR";
+ case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
+ return "FB_FW_RESERV_EXT_ADDR";
+ case GFX_CMD_ID_SRIOV_SPATIAL_PART:
+ return "SPATIAL_PARTITION";
+ case GFX_CMD_ID_FB_NPS_MODE:
+ return "NPS_MODE_CHANGE";
default:
return "UNKNOWN CMD";
}
@@ -854,12 +881,12 @@ static int psp_tmr_init(struct psp_context *psp)
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
- AMDGPU_HAS_VRAM(psp->adev) ?
- AMDGPU_GEM_DOMAIN_VRAM :
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
+ psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
return ret;
}
@@ -973,6 +1000,106 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_get_fw_reservation_info(struct psp_context *psp,
+ uint32_t cmd_id,
+ uint64_t *addr,
+ uint32_t *size)
+{
+ int ret;
+ uint32_t status;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = cmd_id;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (ret) {
+ release_psp_cmd_buf(psp);
+ return ret;
+ }
+
+ status = cmd->resp.status;
+ if (status == PSP_ERR_UNKNOWN_COMMAND) {
+ release_psp_cmd_buf(psp);
+ *addr = 0;
+ *size = 0;
+ return 0;
+ }
+
+ *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
+ cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
+ *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
+
+ release_psp_cmd_buf(psp);
+
+ return 0;
+}
+
+int psp_update_fw_reservation(struct psp_context *psp)
+{
+ int ret;
+ uint64_t reserv_addr, reserv_addr_ext;
+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
+ struct amdgpu_device *adev = psp->adev;
+
+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ switch (mp0_ip_ver) {
+ case IP_VERSION(14, 0, 2):
+ if (adev->psp.sos.fw_version < 0x3b0e0d)
+ return 0;
+ break;
+
+ case IP_VERSION(14, 0, 3):
+ if (adev->psp.sos.fw_version < 0x3a0e14)
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
+ if (ret)
+ return ret;
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
+ if (ret)
+ return ret;
+
+ if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
+ dev_warn(adev->dev, "reserve fw region is not valid!\n");
+ return 0;
+ }
+
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+
+ reserv_size = roundup(reserv_size, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+ return ret;
+ }
+
+ reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
+ &adev->mman.fw_reserved_memory_extend, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
{
struct psp_context *psp = &adev->psp;
@@ -1259,6 +1386,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context)
psp_copy_fw(psp, context->bin_desc.start_addr,
context->bin_desc.size_bytes);
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
+ context->mem_context.shared_bo)
+ context->mem_context.shared_mc_addr =
+ amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
+
psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -1407,6 +1539,7 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
uint64_t dst_node_id = node_info.node_id;
uint8_t dst_num_hops = node_info.num_hops;
+ uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled;
uint8_t dst_num_links = node_info.num_links;
hive = amdgpu_get_xgmi_hive(psp->adev);
@@ -1426,13 +1559,20 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
continue;
mirror_top_info->nodes[j].num_hops = dst_num_hops;
- /*
- * prevent 0 num_links value re-reflection since reflection
+ mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled;
+ /* prevent 0 num_links value re-reflection since reflection
* criteria is based on num_hops (direct or indirect).
- *
*/
- if (dst_num_links)
+ if (dst_num_links) {
mirror_top_info->nodes[j].num_links = dst_num_links;
+ /* swap src and dst due to frame of reference */
+ for (int k = 0; k < dst_num_links; k++) {
+ mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num =
+ node_info.port_num[k].dst_xgmi_port_num;
+ mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num =
+ node_info.port_num[k].src_xgmi_port_num;
+ }
+ }
break;
}
@@ -1507,9 +1647,10 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
IP_VERSION(13, 0, 6) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
- IP_VERSION(13, 0, 14);
- bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
- psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
+ IP_VERSION(13, 0, 14) ||
+ amdgpu_sriov_vf(psp->adev);
+ bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG ||
+ amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev);
/* popluate the shared output buffer rather than the cmd input buffer
* with node_ids as the input for GET_PEER_LINKS command execution.
@@ -1786,34 +1927,47 @@ int psp_ras_initialize(struct psp_context *psp)
if (ret)
dev_warn(adev->dev, "PSP get boot config failed\n");
- if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
- if (!boot_cfg) {
- dev_info(adev->dev, "GECC is disabled\n");
- } else {
- /* disable GECC in next boot cycle if ras is
- * disabled by module parameter amdgpu_ras_enable
- * and/or amdgpu_ras_mask, or boot_config_get call
- * is failed
- */
- ret = psp_boot_config_set(adev, 0);
- if (ret)
- dev_warn(adev->dev, "PSP set boot config failed\n");
- else
- dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
- }
+ if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
+ dev_warn(adev->dev,
+ "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
} else {
- if (boot_cfg == 1) {
- dev_info(adev->dev, "GECC is enabled\n");
+ if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ if (boot_cfg == 1) {
+ dev_info(adev->dev, "GECC is enabled\n");
+ } else {
+ /* enable GECC in next boot cycle if it is disabled
+ * in boot config, or force enable GECC if failed to
+ * get boot configuration
+ */
+ ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
+ }
} else {
- /* enable GECC in next boot cycle if it is disabled
- * in boot config, or force enable GECC if failed to
- * get boot configuration
- */
- ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
- if (ret)
- dev_warn(adev->dev, "PSP set boot config failed\n");
- else
- dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
+ if (!boot_cfg) {
+ if (!adev->ras_default_ecc_enabled &&
+ amdgpu_ras_enable != 1 &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
+ else
+ dev_info(adev->dev, "GECC is disabled\n");
+ } else {
+ /* disable GECC in next boot cycle if ras is
+ * disabled by module parameter amdgpu_ras_enable
+ * and/or amdgpu_ras_mask, or boot_config_get call
+ * is failed
+ */
+ ret = psp_boot_config_set(adev, 0);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
+ }
}
}
}
@@ -1840,6 +1994,7 @@ int psp_ras_initialize(struct psp_context *psp)
if (adev->gmc.gmc_funcs->query_mem_partition_mode)
ras_cmd->ras_in_message.init_flags.nps_mode =
adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
ret = psp_ta_load(psp, &psp->ras_context.context);
@@ -2189,7 +2344,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
- dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ dev_info(psp->adev->dev,
+ "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
return 0;
}
@@ -2205,11 +2361,14 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
- if (!ret) {
+ if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
- } else
+ } else {
+ /* don't try again */
+ psp->securedisplay_context.context.bin_desc.size_bytes = 0;
return ret;
+ }
mutex_lock(&psp->securedisplay_context.mutex);
@@ -2311,11 +2470,27 @@ bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
return false;
}
+static void psp_update_gpu_addresses(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
+ psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
+ psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
+ psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
+ }
+ if (adev->firmware.rbuf && psp->km_ring.ring_mem)
+ psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ psp_update_gpu_addresses(adev);
+
if (!amdgpu_sriov_vf(adev)) {
if ((is_psp_fw_valid(psp->kdb)) &&
(psp->funcs->bootloader_load_kdb != NULL)) {
@@ -2414,6 +2589,14 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ ret = psp_update_fw_reservation(psp);
+ if (ret) {
+ dev_err(adev->dev, "update fw reservation failed!\n");
+ return ret;
+ }
+ }
+
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
@@ -3020,10 +3203,7 @@ static int psp_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
mutex_lock(&adev->firmware.mutex);
- /*
- * This sequence is just used on hw_init only once, no need on
- * resume.
- */
+
ret = amdgpu_ucode_init_bo(adev);
if (ret)
goto failed;
@@ -3148,6 +3328,10 @@ static int psp_resume(struct amdgpu_ip_block *ip_block)
mutex_lock(&adev->firmware.mutex);
+ ret = amdgpu_ucode_init_bo(adev);
+ if (ret)
+ goto failed;
+
ret = psp_hw_start(psp);
if (ret)
goto failed;
@@ -3495,8 +3679,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_sos.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3772,8 +3960,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_ta.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -3814,9 +4006,10 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
err = 0;
- goto out;
+ } else {
+ dev_err(adev->dev, "fail to initialize cap microcode\n");
}
- dev_err(adev->dev, "fail to initialize cap microcode\n");
+ goto out;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
@@ -3891,10 +4084,12 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_ip_block *ip_block;
uint32_t fw_ver;
int ret;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_info(adev->dev, "PSP block is not ready yet\n.");
return -EBUSY;
}
@@ -3923,8 +4118,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
void *fw_pri_cpu_addr;
+ struct amdgpu_ip_block *ip_block;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_err(adev->dev, "PSP block is not ready yet.");
return -EBUSY;
}
@@ -3994,7 +4191,7 @@ int is_psp_fw_valid(struct psp_bin_desc bin)
}
static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -4030,7 +4227,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
}
static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -4082,7 +4279,7 @@ rel_buf:
* Writing to this file will stage an IFWI for update. Reading from this file
* will trigger the update process.
*/
-static struct bin_attribute psp_vbflash_bin_attr = {
+static const struct bin_attribute psp_vbflash_bin_attr = {
.attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
.write = amdgpu_psp_vbflash_write,
@@ -4113,7 +4310,7 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
}
static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
-static struct bin_attribute *bin_flash_attrs[] = {
+static const struct bin_attribute *const bin_flash_attrs[] = {
&psp_vbflash_bin_attr,
NULL
};
@@ -4154,6 +4351,110 @@ const struct attribute_group amdgpu_flash_attr_group = {
.is_visible = amdgpu_flash_attr_is_visible,
};
+#if defined(CONFIG_DEBUG_FS)
+static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet;
+ int ret;
+
+ /* serialize the open() file calling */
+ if (!mutex_trylock(&adev->psp.mutex))
+ return -EBUSY;
+
+ /*
+ * make sure only one userpace process is alive for dumping so that
+ * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
+ * let's say the case where one process try opening the file while
+ * another one has proceeded to read or release. In this way, eliminate
+ * the use of mutex for read() or release() callback as well.
+ */
+ if (adev->psp.spirom_dump_trip) {
+ mutex_unlock(&adev->psp.mutex);
+ return -EBUSY;
+ }
+
+ bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
+ if (!bo_triplet) {
+ mutex_unlock(&adev->psp.mutex);
+ return -ENOMEM;
+ }
+
+ ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo_triplet->bo,
+ &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ if (ret)
+ goto rel_trip;
+
+ ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
+ if (ret)
+ goto rel_bo;
+
+ adev->psp.spirom_dump_trip = bo_triplet;
+ mutex_unlock(&adev->psp.mutex);
+ return 0;
+rel_bo:
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+rel_trip:
+ kfree(bo_triplet);
+ mutex_unlock(&adev->psp.mutex);
+ dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
+ return ret;
+}
+
+static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (!bo_triplet)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, bo_triplet->cpu_addr,
+ AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+}
+
+static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (bo_triplet) {
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ kfree(bo_triplet);
+ }
+
+ adev->psp.spirom_dump_trip = NULL;
+ return 0;
+}
+
+static const struct file_operations psp_dump_spirom_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psp_read_spirom_debugfs_open,
+ .read = psp_read_spirom_debugfs_read,
+ .release = psp_read_spirom_debugfs_release,
+ .llseek = default_llseek,
+};
+#endif
+
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
+ adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+#endif
+}
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8d5acc415d38..237b624aa51c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -39,6 +39,29 @@
#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
+#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+
+/* Command register bit 31 set to indicate readiness */
+#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
+/* Values to check for a successful GFX_CMD response wait. Check against
+ * both status bits and response state - helps to detect a command failure
+ * or other unexpected cases like a device drop reading all 0xFFs
+ */
+#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -107,9 +130,13 @@ enum psp_reg_prog_id {
PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */
PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */
PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */
+ PSP_REG_MMHUB_L1_TLB_CNTL = 25,
PSP_REG_LAST
};
+#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */
+#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */
+
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
int (*wait_for_bootloader)(struct psp_context *psp);
@@ -137,11 +164,14 @@ struct psp_funcs {
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
bool (*is_aux_sos_load_required)(struct psp_context *psp);
bool (*is_reload_needed)(struct psp_context *psp);
+ int (*reg_program_no_ring)(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
};
struct ta_funcs {
@@ -319,6 +349,14 @@ struct psp_runtime_scpm_entry {
enum psp_runtime_scpm_authentication scpm_status;
};
+#if defined(CONFIG_DEBUG_FS)
+struct spirom_bo {
+ struct amdgpu_bo *bo;
+ uint64_t mc_addr;
+ void *cpu_addr;
+};
+#endif
+
struct psp_context {
struct amdgpu_device *adev;
struct psp_ring km_ring;
@@ -406,6 +444,9 @@ struct psp_context {
char *vbflash_tmp_buf;
size_t vbflash_image_size;
bool vbflash_done;
+#if defined(CONFIG_DEBUG_FS)
+ struct spirom_bo *spirom_dump_trip;
+#endif
};
struct amdgpu_psp_funcs {
@@ -464,6 +505,10 @@ struct amdgpu_psp_funcs {
((psp)->funcs->update_spirom ? \
(psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+#define psp_dump_spirom(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->dump_spirom ? \
+ (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+
#define psp_vbflash_status(psp) \
((psp)->funcs->vbflash_stat ? \
(psp)->funcs->vbflash_stat((psp)) : -EINVAL)
@@ -475,6 +520,10 @@ struct amdgpu_psp_funcs {
#define psp_is_aux_sos_load_required(psp) \
((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+#define psp_reg_program_no_ring(psp, val, id) \
+ ((psp)->funcs->reg_program_no_ring ? \
+ (psp)->funcs->reg_program_no_ring((psp), val, id) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -486,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t flags);
extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
@@ -553,7 +602,7 @@ int psp_init_cap_microcode(struct psp_context *psp,
const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
-
+int psp_update_fw_reservation(struct psp_context *psp);
int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
@@ -569,5 +618,9 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 38face981c3e..6e8aad91bcd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
copy_pos += sizeof(uint32_t);
- ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
- if (!ta_bin)
- return -ENOMEM;
- if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
- ret = -EFAULT;
- goto err_free_bin;
- }
+ ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
+ if (IS_ERR(ta_bin))
+ return PTR_ERR(ta_bin);
/* Set TA context and functions */
set_ta_context_funcs(psp, ta_type, &context);
@@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
return -EFAULT;
copy_pos += sizeof(uint32_t);
- shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
- if (!shared_buf)
- return -ENOMEM;
- if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
- ret = -EFAULT;
- goto err_free_shared_buf;
- }
+ shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
+ if (IS_ERR(shared_buf))
+ return PTR_ERR(shared_buf);
set_ta_context_funcs(psp, ta_type, &context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 123bcf5c2bb1..bacf888735db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
}
amdgpu_gfx_off_ctrl(adev, true);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 01c947066a2e..2a6cf7963dde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -41,6 +41,7 @@
#include "atom.h"
#include "amdgpu_reset.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras_mgr.h"
#ifdef CONFIG_X86_MCE_AMD
#include <asm/mce.h>
@@ -77,6 +78,7 @@ const char *ras_block_string[] = {
"jpeg",
"ih",
"mpio",
+ "mmsch",
};
const char *ras_mca_block_string[] = {
@@ -121,12 +123,15 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
-#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
+#define BYPASS_ALLOCATED_ADDRESS 0x0
+#define BYPASS_INITIALIZATION_ADDRESS 0x1
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -135,12 +140,18 @@ enum amdgpu_ras_retire_page_reservation {
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
+
#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
+static void
+amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
struct amdgpu_device *devs[MAX_GPU_INSTANCE];
int num_gpu;
@@ -168,18 +179,16 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
struct eeprom_table_record err_rec;
int ret;
- if ((address >= adev->gmc.mc_vram_size) ||
- (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ ret = amdgpu_ras_check_bad_page(adev, address);
+ if (ret == -EINVAL) {
dev_warn(adev->dev,
- "RAS WARN: input address 0x%llx is invalid.\n",
- address);
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
return -EINVAL;
- }
-
- if (amdgpu_ras_check_bad_page(adev, address)) {
+ } else if (ret == 1) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has already been marked as bad page!\n",
- address);
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
return 0;
}
@@ -206,6 +215,56 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
return 0;
}
+static int amdgpu_check_address_validity(struct amdgpu_device *adev,
+ uint64_t address, uint64_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_block_info blk_info;
+ uint64_t page_pfns[32] = {0};
+ int i, ret, count;
+ bool hit = false;
+
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
+ return 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
+ return -EPERM;
+ return hit ? -EACCES : 0;
+ }
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EFAULT;
+
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
+ address, page_pfns, ARRAY_SIZE(page_pfns));
+ if (count <= 0)
+ return -EPERM;
+
+ for (i = 0; i < count; i++) {
+ memset(&blk_info, 0, sizeof(blk_info));
+ ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
+ page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
+ if (!ret) {
+ /* The input address that needs to be checked is allocated by
+ * current calling process, so it is necessary to exclude
+ * the calling process.
+ */
+ if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
+ ((blk_info.task.pid != task_pid_nr(current)) ||
+ strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
+ return -EACCES;
+ else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
+ (blk_info.task.pid == con->init_task_pid) &&
+ !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -296,6 +355,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 2;
else if (strstr(str, "retire_page") != NULL)
op = 3;
+ else if (strstr(str, "check_address") != NULL)
+ op = 4;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
@@ -310,6 +371,15 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->inject.address = address;
return 0;
+ } else if (op == 4) {
+ if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
+ sscanf(str, "%*s %llu %llu", &address, &value) != 2)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+ data->inject.value = value;
+ return 0;
}
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
@@ -499,6 +569,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
else
return ret;
+ } else if (data.op == 4) {
+ ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
+ return ret ? ret : size;
}
if (!amdgpu_ras_is_supported(adev, data.head.block))
@@ -512,22 +585,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size &&
- adev->gmc.mc_vram_size) ||
- (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
- dev_warn(adev->dev, "RAS WARN: input address "
- "0x%llx is invalid.",
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
+ ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
data.inject.address);
- ret = -EINVAL;
break;
- }
-
- /* umc ce/ue error injection for a bad page is not allowed */
- if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
- amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
- "already been marked as bad!\n",
- data.inject.address);
+ } else if (ret == 1) {
+ dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ data.inject.address);
break;
}
@@ -547,6 +614,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
+
/**
* DOC: AMDGPU RAS debugfs EEPROM table reset interface
*
@@ -571,6 +640,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
(struct amdgpu_device *)file_inode(f)->i_private;
int ret;
+ if (amdgpu_uniras_enabled(adev)) {
+ ret = amdgpu_uniras_clear_badpages_info(adev);
+ return ret ? ret : size;
+ }
+
ret = amdgpu_ras_eeprom_reset_table(
&(amdgpu_ras_get_context(adev)->eeprom_control));
@@ -1106,6 +1180,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info->de_count, blk_name);
}
} else {
+ if (adev->debug_disable_ce_logs)
+ return;
+
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
@@ -1475,9 +1552,51 @@ out_fini_err_data:
return ret;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
+{
+ struct ras_cmd_dev_handle req = {0};
+ int ret;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ &req, sizeof(req), NULL, 0);
+ if (ret) {
+ dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct ras_cmd_block_ecc_info_req req = {0};
+ struct ras_cmd_block_ecc_info_rsp rsp = {0};
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ req.block_id = info->head.block;
+ req.subblock_id = info->head.sub_block_index;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+ if (!ret) {
+ info->ce_count = rsp.ce_count;
+ info->ue_count = rsp.ue_count;
+ info->de_count = rsp.de_count;
+ }
+
+ return ret;
+}
+
int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
{
- return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_query_block_ecc(adev, info);
+ else
+ return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
}
int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
@@ -1497,6 +1616,9 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
!amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
((smu_funcs && smu_funcs->set_debug_mode) ||
@@ -1526,6 +1648,27 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_cmd_inject_error_req inject_req;
+ struct ras_cmd_inject_error_rsp rsp;
+
+ if (!info)
+ return -EINVAL;
+
+ memset(&inject_req, 0, sizeof(inject_req));
+ inject_req.block_id = info->head.block;
+ inject_req.subblock_id = info->head.sub_block_index;
+ inject_req.address = info->address;
+ inject_req.error_type = info->head.type;
+ inject_req.instance_mask = info->instance_mask;
+ inject_req.method = info->value;
+
+ return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
+ &inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -1543,6 +1686,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
info->head.block,
info->head.sub_block_index);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_error_inject(adev, info);
+
/* inject on guest isn't allowed, return success directly */
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1687,7 +1833,9 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* sysfs begin */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count);
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
@@ -1733,7 +1881,7 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t ppos, size_t count)
{
struct amdgpu_ras *con =
@@ -1745,19 +1893,50 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
unsigned int end = div64_ul(ppos + count - 1, element_size);
ssize_t s = 0;
struct ras_badpage *bps = NULL;
- unsigned int bps_count = 0;
+ int bps_count = 0, i, status;
+ uint64_t address;
memset(buf, 0, count);
- if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
+ bps_count = end - start;
+ bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return 0;
+
+ memset(bps, 0, sizeof(*bps) * bps_count);
+
+ if (amdgpu_uniras_enabled(adev))
+ bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
+ else
+ bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);
+
+ if (bps_count <= 0) {
+ kfree(bps);
return 0;
+ }
+
+ for (i = 0; i < bps_count; i++) {
+ address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;
+ if (amdgpu_ras_check_critical_address(adev, address))
+ continue;
+
+ bps[i].size = AMDGPU_GPU_PAGE_SIZE;
+
+ status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
+ address);
+ if (status == -EBUSY)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (status == -ENOENT)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ else
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
- for (; start < end && start < bps_count; start++)
s += scnprintf(&buf[s], element_size + 1,
"0x%08x : 0x%08x : %1s\n",
- bps[start].bp,
- bps[start].size,
- amdgpu_ras_badpage_flags_str(bps[start].flags));
+ bps[i].bp,
+ bps[i].size,
+ amdgpu_ras_badpage_flags_str(bps[i].flags));
+ }
kfree(bps);
@@ -1773,12 +1952,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
}
+static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
+ u32 *minor, u32 *rev)
+{
+ int i;
+
+ if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
+ return false;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
+ *major = adev->ip_blocks[i].version->major;
+ *minor = adev->ip_blocks[i].version->minor;
+ *rev = adev->ip_blocks[i].version->rev;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct amdgpu_ras *con =
container_of(attr, struct amdgpu_ras, version_attr);
- return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
+ u32 major, minor, rev;
+ ssize_t size = 0;
+
+ size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
+ con->eeprom_control.tbl_hdr.version);
+
+ if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
+ size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
+ major, minor, rev);
+
+ return size;
}
static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
@@ -1864,6 +2073,9 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
if (!obj || obj->attr_inuse)
return -EINVAL;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
+ return 0;
+
get_obj(obj);
snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
@@ -2065,8 +2277,8 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
/* debugfs end */
/* ras fs */
-static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
- amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+ amdgpu_ras_sysfs_badpages_read, NULL, 0);
static DEVICE_ATTR(features, S_IRUGO,
amdgpu_ras_sysfs_features_read, NULL);
static DEVICE_ATTR(version, 0444,
@@ -2088,7 +2300,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
&con->event_state_attr.attr,
NULL
};
- struct bin_attribute *bin_attrs[] = {
+ const struct bin_attribute *bin_attrs[] = {
NULL,
NULL,
};
@@ -2114,11 +2326,10 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
if (amdgpu_bad_page_threshold != 0) {
/* add bad_page_features entry */
- bin_attr_gpu_vram_bad_pages.private = NULL;
con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+ sysfs_bin_attr_init(&con->badpages_attr);
bin_attrs[0] = &con->badpages_attr;
group.bin_attrs = bin_attrs;
- sysfs_bin_attr_init(bin_attrs[0]);
}
r = sysfs_create_group(&adev->dev->kobj, &group);
@@ -2158,7 +2369,7 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
- /**
+ /*
* If the current interrupt is caused by a non-fatal RAS error, skip
* check for fatal error. For fatal errors, FED status of all devices
* in XGMI hive gets set when the first device gets fatal error
@@ -2169,6 +2380,11 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
return;
+ if (amdgpu_uniras_enabled(adev)) {
+ amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
+ return;
+ }
+
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
@@ -2339,6 +2555,16 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_manager *obj;
struct ras_ih_data *data;
+ if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info;
+
+ memset(&ih_info, 0, sizeof(ih_info));
+ ih_info.block = info->head.block;
+ memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));
+
+ return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
+ }
+
obj = amdgpu_ras_find_obj(adev, &info->head);
if (!obj)
return -EINVAL;
@@ -2533,54 +2759,83 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
}
}
-/* recovery begin */
-
-/* return 0 on success.
- * caller need free bps.
- */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count)
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = 0;
- int ret = 0, status;
+ int r = 0;
+ uint32_t i;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
mutex_lock(&con->recovery_lock);
data = con->eh_data;
- if (!data || data->count == 0) {
- *bps = NULL;
- ret = -EINVAL;
- goto out;
- }
+ if (start < data->count) {
+ for (i = start; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
- *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
- if (!*bps) {
- ret = -ENOMEM;
- goto out;
+ bps[r].bp = data->bps[i].retired_page;
+ r++;
+ if (r >= count)
+ break;
+ }
}
+ mutex_unlock(&con->recovery_lock);
- for (; i < data->count; i++) {
- (*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].retired_page,
- .size = AMDGPU_GPU_PAGE_SIZE,
- .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
- };
- status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
- data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
- if (status == -EBUSY)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (status == -ENOENT)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ return r;
+}
+
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
+{
+ struct ras_cmd_bad_pages_info_req cmd_input;
+ struct ras_cmd_bad_pages_info_rsp *output;
+ uint32_t group, start_group, end_group;
+ uint32_t pos, pos_in_group;
+ int r = 0, i;
+
+ if (!bps || !count)
+ return -EINVAL;
+
+ output = kmalloc(sizeof(*output), GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
+
+ memset(&cmd_input, 0, sizeof(cmd_input));
+
+ start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
+ RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+
+ pos = start;
+ for (group = start_group; group < end_group; group++) {
+ memset(output, 0, sizeof(*output));
+ cmd_input.group_index = group;
+ if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
+ &cmd_input, sizeof(cmd_input), output, sizeof(*output)))
+ goto out;
+
+ if (pos >= output->bp_total_cnt)
+ goto out;
+
+ pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
+ if (!output->records[i].ts)
+ continue;
+
+ bps[r].bp = output->records[i].retired_page;
+ r++;
+ if (r >= count)
+ goto out;
+ }
}
- *count = data->count;
out:
- mutex_unlock(&con->recovery_lock);
- return ret;
+ kfree(output);
+ return r;
}
static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
@@ -2629,6 +2884,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ unsigned int error_query_mode;
enum ras_event_type type;
if (hive) {
@@ -2657,11 +2913,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
device_list_handle = &device_list;
}
+ if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
+ if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
+ /* wait 500ms to ensure pmfw polling mca bank info done */
+ msleep(500);
+ }
+ }
+
type = amdgpu_ras_get_fatal_error_event(adev);
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head) {
- amdgpu_ras_query_err_status(remote_adev);
- amdgpu_ras_log_on_err_counter(remote_adev, type);
+ if (amdgpu_uniras_enabled(remote_adev)) {
+ amdgpu_ras_mgr_update_ras_ecc(remote_adev);
+ } else {
+ amdgpu_ras_query_err_status(remote_adev);
+ amdgpu_ras_log_on_err_counter(remote_adev, type);
+ }
}
}
@@ -2713,7 +2980,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
- void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
if (!bps) {
return -ENOMEM;
@@ -2749,8 +3016,13 @@ static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
addr_in.ma.err_addr = bps->address;
addr_in.ma.socket_id = socket;
addr_in.ma.ch_inst = bps->mem_channel;
- /* tell RAS TA the node instance is not used */
- addr_in.ma.node_inst = TA_RAS_INV_NODE;
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+ } else {
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = bps->cu;
+ }
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
@@ -2796,20 +3068,157 @@ static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
return -EINVAL;
}
+static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int count)
+{
+ int j;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+
+ for (j = 0; j < count; j++) {
+ if (amdgpu_ras_check_bad_page_unlock(con,
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ data->count++;
+ data->space_left--;
+ continue;
+ }
+
+ if (!data->space_left &&
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
+ return -ENOMEM;
+ }
+
+ amdgpu_ras_reserve_page(adev, bps[j].retired_page);
+
+ memcpy(&data->bps[data->count], &(bps[j]),
+ sizeof(struct eeprom_table_record));
+ data->count++;
+ data->space_left--;
+ con->bad_page_num++;
+ }
+
+ return 0;
+}
+
+static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ int i = 0;
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+
+ /*old asics just have pa in eeprom*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ goto out;
+ }
+
+ for (i = 0; i < adev->umc.retire_unit; i++)
+ bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps) {
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps[0].address;
+ err_data->err_addr[i].mem_channel = bps[0].mem_channel;
+ err_data->err_addr[i].bank = bps[0].bank;
+ err_data->err_addr[i].err_type = bps[0].err_type;
+ err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
+ }
+ } else {
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
+ return -EINVAL;
+ }
+ } else {
+ if (bps[0].address == 0) {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+ }
+
+ if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
+ if (nps == AMDGPU_NPS1_PARTITION_MODE)
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ else
+ return -EOPNOTSUPP;
+ }
+ }
+
+out:
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
+}
+
+static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ int i = 0;
+ enum amdgpu_memory_partition save_nps;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+ bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+ } else {
+ /* if pmfw manages eeprom, save_nps is not stored on eeprom,
+ * we should always convert mca address into physical address,
+ * make save_nps different from nps
+ */
+ save_nps = nps + 1;
+ }
+
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps->address;
+ err_data->err_addr[i].mem_channel = bps->mem_channel;
+ err_data->err_addr[i].bank = bps->bank;
+ err_data->err_addr[i].err_type = bps->err_type;
+ err_data->err_addr[i].mcumc_id = bps->mcumc_id;
+ }
+ } else {
+ if (bps->address) {
+ if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
+ return -EINVAL;
+ } else {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps->address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+
+ if (amdgpu_ras_mca2pa(adev, bps, err_data))
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
+ adev->umc.retire_unit);
+}
+
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages, bool from_rom)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data *data;
struct ras_err_data err_data;
- struct eeprom_table_record *err_rec;
struct amdgpu_ras_eeprom_control *control =
&adev->psp.ras_context.ras->eeprom_control;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i, j, loop_cnt = 1;
- bool find_pages_per_pa = false;
+ uint32_t i = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
@@ -2820,103 +3229,51 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
sizeof(struct eeprom_table_record), GFP_KERNEL);
if (!err_data.err_addr) {
dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
- err_rec = err_data.err_addr;
- loop_cnt = adev->umc.retire_unit;
if (adev->gmc.gmc_funcs->query_mem_partition_mode)
nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
}
mutex_lock(&con->recovery_lock);
- data = con->eh_data;
- if (!data)
- goto free;
-
- for (i = 0; i < pages; i++) {
- if (from_rom &&
- control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA) {
- if (!find_pages_per_pa) {
- if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
- if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
- /* may use old RAS TA, use PA to find pages in
- * one row
- */
- if (amdgpu_umc_pages_in_a_row(adev, &err_data,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- goto free;
- else
- find_pages_per_pa = true;
+
+ if (from_rom) {
+ /* there is no pa recs in V3, so skip pa recs processing */
+ if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
+ !amdgpu_ras_smu_eeprom_supported(adev)) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ /* deal with retire_unit records a time */
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ con->bad_page_num -= adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
} else {
- /* unsupported cases */
- goto free;
+ break;
}
- }
- } else {
- if (amdgpu_umc_pages_in_a_row(adev, &err_data,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- goto free;
- }
- } else {
- if (from_rom && !find_pages_per_pa) {
- if (bps[i].retired_page & UMC_CHANNEL_IDX_V2) {
- /* bad page in any NPS mode in eeprom */
- if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data))
- goto free;
} else {
- /* legacy bad page in eeprom, generated only in
- * NPS1 mode
- */
- if (amdgpu_ras_mca2pa(adev, &bps[i], &err_data)) {
- /* old RAS TA or ASICs which don't support to
- * convert addrss via mca address
- */
- if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
- find_pages_per_pa = true;
- err_rec = &bps[i];
- loop_cnt = 1;
- } else {
- /* non-nps1 mode, old RAS TA
- * can't support it
- */
- goto free;
- }
- }
+ break;
}
-
- if (!find_pages_per_pa)
- i += (adev->umc.retire_unit - 1);
- } else {
- err_rec = &bps[i];
}
}
-
- for (j = 0; j < loop_cnt; j++) {
- if (amdgpu_ras_check_bad_page_unlock(con,
- err_rec[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- continue;
-
- if (!data->space_left &&
- amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
- ret = -ENOMEM;
- goto free;
- }
-
- amdgpu_ras_reserve_page(adev, err_rec[j].retired_page);
-
- memcpy(&data->bps[data->count], &(err_rec[j]),
- sizeof(struct eeprom_table_record));
- data->count++;
- data->space_left--;
+ for (; i < pages; i++) {
+ ret = __amdgpu_ras_convert_rec_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ con->bad_page_num -= adev->umc.retire_unit;
}
+
+ con->eh_data->count_saved = con->eh_data->count;
+ } else {
+ ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
-free:
if (from_rom)
kfree(err_data.err_addr);
-out:
mutex_unlock(&con->recovery_lock);
return ret;
@@ -2933,7 +3290,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count, unit_num, bad_page_num, i;
+ int save_count, unit_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -2942,31 +3299,45 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
return 0;
}
+ if (!con->eeprom_control.is_eeprom_valid) {
+ dev_warn(adev->dev,
+ "Failed to save EEPROM table data because of EEPROM data corruption!");
+ if (new_cnt)
+ *new_cnt = 0;
+
+ return 0;
+ }
+
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- bad_page_num = control->ras_num_bad_pages;
- save_count = data->count - bad_page_num;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ unit_num = control->ras_num_recs -
+ control->ras_num_recs_old;
+ else
+ unit_num = data->count / adev->umc.retire_unit -
+ control->ras_num_recs;
+
+ save_count = con->bad_page_num - control->ras_num_bad_pages;
mutex_unlock(&con->recovery_lock);
- unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
*new_cnt = unit_num;
/* only new entries are saved */
- if (save_count > 0) {
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA) {
+ if (unit_num && save_count) {
+ /*old asics only save pa to eeprom like before*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[control->ras_num_recs],
- save_count)) {
+ &data->bps[data->count_saved], unit_num)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
} else {
for (i = 0; i < unit_num; i++) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num + i * adev->umc.retire_unit],
- 1)) {
+ &data->bps[data->count_saved +
+ i * adev->umc.retire_unit], 1)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
@@ -2974,6 +3345,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ data->count_saved = data->count;
}
return 0;
@@ -2988,7 +3360,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
struct amdgpu_ras_eeprom_control *control =
&adev->psp.ras_context.ras->eeprom_control;
struct eeprom_table_record *bps;
- int ret;
+ int ret, i = 0;
/* no bad page record, skip eeprom access */
if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
@@ -3002,26 +3374,44 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
if (ret) {
dev_err(adev->dev, "Failed to load EEPROM table records!");
} else {
- if (control->ras_num_recs > 1 &&
- adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
- if ((bps[0].address == bps[1].address) &&
- (bps[0].mem_channel == bps[1].mem_channel))
- control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
- else
- control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
+ as pa recs, so add verion check to avoid it.
+ */
+ if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
+ !amdgpu_ras_smu_eeprom_supported(adev)) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
+ } else {
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
+ break;
+ }
+ }
+ } else {
+ control->ras_num_mca_recs = control->ras_num_recs;
+ }
}
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ if (ret)
+ goto out;
+
ret = amdgpu_ras_eeprom_check(control);
if (ret)
goto out;
/* HW not usable */
- if (amdgpu_ras_is_rma(adev)) {
+ if (amdgpu_ras_is_rma(adev))
ret = -EHWPOISON;
- goto out;
- }
-
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
}
out:
@@ -3029,18 +3419,24 @@ out:
return ret;
}
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr)
{
struct ras_err_handler_data *data = con->eh_data;
+ struct amdgpu_device *adev = con->adev;
int i;
+ if ((addr >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EINVAL;
+
addr >>= AMDGPU_GPU_PAGE_SHIFT;
for (i = 0; i < data->count; i++)
if (addr == data->bps[i].retired_page)
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
@@ -3048,11 +3444,11 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
*
* Note: this check is only for umc block
*/
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool ret = false;
+ int ret = 0;
if (!con || !con->eh_data)
return ret;
@@ -3069,31 +3465,29 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
/*
- * Justification of value bad_page_cnt_threshold in ras structure
- *
- * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
- * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
- * scenarios accordingly.
- *
- * Bad page retirement enablement:
- * - If amdgpu_bad_page_threshold = -2,
- * bad_page_cnt_threshold = typical value by formula.
- *
- * - When the value from user is 0 < amdgpu_bad_page_threshold <
- * max record length in eeprom, use it directly.
- *
- * Bad page retirement disablement:
- * - If amdgpu_bad_page_threshold = 0, bad page retirement
- * functionality is disabled, and bad_page_cnt_threshold will
- * take no effect.
+ * amdgpu_bad_page_threshold is used to config
+ * the threshold for the number of bad pages.
+ * -1: Threshold is set to default value
+ * Driver will issue a warning message when threshold is reached
+ * and continue runtime services.
+ * 0: Disable bad page retirement
+ * Driver will not retire bad pages
+ * which is intended for debugging purpose.
+ * -2: Threshold is determined by a formula
+ * that assumes 1 bad page per 100M of local memory.
+ * Driver will continue runtime services when threhold is reached.
+ * 0 < threshold < max number of bad page records in EEPROM,
+ * A user-defined threshold is set
+ * Driver will halt runtime services when this custom threshold is reached.
*/
-
- if (amdgpu_bad_page_threshold < 0) {
+ if (amdgpu_bad_page_threshold == -2) {
u64 val = adev->gmc.mc_vram_size;
do_div(val, RAS_BAD_PAGE_COVER);
con->bad_page_cnt_threshold = min(lower_32_bits(val),
max_count);
+ } else if (amdgpu_bad_page_threshold == -1) {
+ con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
} else {
con->bad_page_cnt_threshold = min_t(int, max_count,
amdgpu_bad_page_threshold);
@@ -3138,7 +3532,7 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
@@ -3158,7 +3552,7 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
mutex_destroy(&ecc_log->lock);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
@@ -3184,7 +3578,6 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
- unsigned long err_cnt;
/* If gpu reset is ongoing, delay retiring the bad pages */
if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
@@ -3196,13 +3589,9 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
- err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && amdgpu_ras_is_rma(adev))
- amdgpu_ras_reset_gpu(adev);
-
amdgpu_ras_schedule_retirement_dwork(con,
AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
}
@@ -3213,58 +3602,39 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
int ret = 0;
struct ras_ecc_log_info *ecc_log;
struct ras_query_if info;
- uint32_t timeout = 0;
+ u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint64_t de_queried_count;
- uint32_t new_detect_count, total_detect_count;
- uint32_t need_query_count = poison_creation_count;
- bool query_data_timeout = false;
+ u64 de_queried_count;
+ u64 consumption_q_count;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
info.head.block = AMDGPU_RAS_BLOCK__UMC;
ecc_log = &ras->umc_ecc_log;
- total_detect_count = 0;
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+
do {
ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
if (ret)
return ret;
de_queried_count = ecc_log->de_queried_count;
- if (de_queried_count > ecc_log->prev_de_queried_count) {
- new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
- ecc_log->prev_de_queried_count = de_queried_count;
- timeout = 0;
- } else {
- new_detect_count = 0;
- }
-
- if (new_detect_count) {
- total_detect_count += new_detect_count;
- } else {
- if (!timeout && need_query_count)
- timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+ consumption_q_count = ecc_log->consumption_q_count;
- if (timeout) {
- if (!--timeout) {
- query_data_timeout = true;
- break;
- }
- msleep(1);
- }
- }
- } while (total_detect_count < need_query_count);
+ if (de_queried_count && consumption_q_count)
+ break;
- if (query_data_timeout) {
- dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
- (need_query_count - total_detect_count));
- return -ENOENT;
- }
+ msleep(100);
+ } while (--timeout);
- if (total_detect_count)
+ if (de_queried_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
+ if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
+ amdgpu_ras_reset_gpu(adev);
+
return 0;
}
@@ -3300,6 +3670,12 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
reset_flags |= msg.reset;
}
+ /*
+ * Try to ensure poison creation handler is completed first
+ * to set rma if bad page exceed threshold.
+ */
+ flush_delayed_work(&con->page_retirement_dwork);
+
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
@@ -3309,8 +3685,6 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
else
reset = reset_flags;
- flush_delayed_work(&con->page_retirement_dwork);
-
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
@@ -3340,6 +3714,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
+ mutex_lock(&con->poison_lock);
gpu_reset = 0;
do {
@@ -3352,7 +3727,8 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(poison_creation_count, &con->poison_creation_count);
atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
}
- } while (atomic_read(&con->poison_creation_count));
+ } while (atomic_read(&con->poison_creation_count) &&
+ !atomic_read(&con->poison_consumption_count));
if (ret != -EIO) {
msg_count = kfifo_len(&con->poison_fifo);
@@ -3369,6 +3745,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
/* Clear poison creation request */
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
/* Clear poison fifo */
amdgpu_ras_clear_poison_fifo(adev);
@@ -3393,9 +3770,12 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(msg_count, &con->page_retirement_req_cnt);
}
+ atomic_set(&con->poison_consumption_count, 0);
+
/* Wake up work to save bad pages to eeprom */
schedule_delayed_work(&con->page_retirement_dwork, 0);
}
+ mutex_unlock(&con->poison_lock);
}
return 0;
@@ -3410,23 +3790,28 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!con || amdgpu_sriov_vf(adev))
return 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
control = &con->eeprom_control;
+ con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev);
+
ret = amdgpu_ras_eeprom_init(control);
- if (ret)
- return ret;
+ control->is_eeprom_valid = !ret;
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
- control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
+ control->ras_num_pa_recs = control->ras_num_recs;
- /* default status is MCA storage */
- if (control->ras_num_recs <= 1 &&
- adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
- control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ if (adev->umc.ras &&
+ adev->umc.ras->get_retire_flip_bits)
+ adev->umc.ras->get_retire_flip_bits(adev);
- if (control->ras_num_recs) {
+ if (control->ras_num_recs && control->is_eeprom_valid) {
ret = amdgpu_ras_load_bad_pages(adev);
- if (ret)
- return ret;
+ if (ret) {
+ control->is_eeprom_valid = false;
+ return 0;
+ }
amdgpu_dpm_send_hbm_bad_pages_num(
adev, control->ras_num_bad_pages);
@@ -3436,9 +3821,16 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
adev, control->bad_channel_bitmap);
con->update_channel_flag = false;
}
+
+ /* The format action is only applied to new ASICs */
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
+ control->tbl_hdr.version < RAS_TABLE_VER_V3)
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (amdgpu_ras_save_bad_pages(adev, NULL))
+ dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
}
- return ret;
+ return 0;
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
@@ -3469,8 +3861,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
}
mutex_init(&con->recovery_lock);
+ mutex_init(&con->poison_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
+ atomic_set(&con->rma_in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
@@ -3488,6 +3882,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
con->page_retirement_thread =
kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
if (IS_ERR(con->page_retirement_thread)) {
@@ -3560,6 +3955,10 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
kfree(data);
mutex_unlock(&con->recovery_lock);
+ amdgpu_ras_critical_region_init(adev);
+#ifdef CONFIG_X86_MCE_AMD
+ amdgpu_unregister_bad_pages_mca_notifier(adev);
+#endif
return 0;
}
/* recovery end */
@@ -3647,7 +4046,8 @@ static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev
*/
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
- amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
1 << AMDGPU_RAS_BLOCK__JPEG);
else
@@ -3748,8 +4148,13 @@ init_ras_enabled_flag:
adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
adev->ras_hw_enabled & amdgpu_ras_mask;
- /* aca is disabled by default */
- adev->aca.is_enabled = false;
+ /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->aca.is_enabled =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ }
/* bad page feature is not applicable to specific app platform */
if (adev->gmc.is_app_apu &&
@@ -3777,7 +4182,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
atomic_set(&con->ras_ue_count, ue_count);
}
- pm_runtime_mark_last_busy(dev->dev);
Out:
pm_runtime_put_autosuspend(dev->dev);
}
@@ -3837,8 +4241,10 @@ static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 12):
+ con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
+ break;
case IP_VERSION(13, 0, 14):
- con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
+ con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
break;
default:
break;
@@ -3978,6 +4384,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ con->init_task_pid = task_pid_nr(current);
+ get_task_comm(con->init_task_comm, current);
+
+ mutex_init(&con->critical_region_lock);
+ INIT_LIST_HEAD(&con->critical_region_head);
+
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_enabled, adev->ras_enabled);
@@ -4257,6 +4669,9 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (!adev->ras_enabled || !con)
return 0;
+ amdgpu_ras_critical_region_fini(adev);
+ mutex_destroy(&con->critical_region_lock);
+
list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
if (ras_node->ras_obj) {
obj = ras_node->ras_obj;
@@ -4324,8 +4739,10 @@ void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
- if (ras)
+ if (ras) {
ras->ras_err_state = 0;
+ ras->gpu_reset_flags = 0;
+ }
}
void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
@@ -4373,6 +4790,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
struct ras_event_state *event_state;
int ret = 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
if (type >= RAS_EVENT_TYPE_COUNT) {
ret = -EINVAL;
goto out;
@@ -4423,17 +4843,18 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type
return id;
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
- u64 event_id;
+ u64 event_id = RAS_EVENT_INVALID_ID;
- if (amdgpu_ras_mark_ras_event(adev, type))
- return;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
- event_id = amdgpu_ras_acquire_event_id(adev, type);
+ if (!amdgpu_ras_mark_ras_event(adev, type))
+ event_id = amdgpu_ras_acquire_event_id(adev, type);
RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
@@ -4442,6 +4863,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
+
+ return -EBUSY;
}
bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
@@ -4569,6 +4992,28 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
notifier_registered = true;
}
}
+static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ if (!notifier_registered && !mce_adev_list.num_gpu)
+ return;
+ for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
+ if (mce_adev_list.devs[i] == adev)
+ mce_adev_list.devs[i] = NULL;
+ if (!mce_adev_list.devs[i])
+ ++j;
+ }
+
+ if (j == mce_adev_list.num_gpu) {
+ mce_adev_list.num_gpu = 0;
+ /* Unregister x86 notifier with MCE subsystem. */
+ if (notifier_registered) {
+ mce_unregister_decode_chain(&amdgpu_bad_page_nb);
+ notifier_registered = false;
+ }
+ }
+}
#endif
struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
@@ -5116,9 +5561,9 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
"socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
socket_id, aid_id, fw_status);
- if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
+ if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
dev_info(adev->dev,
- "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
+ "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
socket_id, aid_id, fw_status);
}
@@ -5160,6 +5605,9 @@ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
int ret = 0;
+ if (amdgpu_ras_check_critical_address(adev, start))
+ return 0;
+
mutex_lock(&con->page_rsv_lock);
ret = amdgpu_vram_mgr_query_page_status(mgr, start);
if (ret == -ENOENT)
@@ -5191,8 +5639,110 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_is_rma(adev);
+
if (!con)
return false;
return con->is_rma;
}
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr_resource *vres;
+ struct ras_critical_region *region;
+ struct drm_buddy_block *block;
+ int ret = 0;
+
+ if (!bo || !bo->tbo.resource)
+ return -EINVAL;
+
+ vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
+
+ mutex_lock(&con->critical_region_lock);
+
+ /* Check if the bo had been recorded */
+ list_for_each_entry(region, &con->critical_region_head, node)
+ if (region->bo == bo)
+ goto out;
+
+ /* Record new critical amdgpu bo */
+ list_for_each_entry(block, &vres->blocks, link) {
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ region->bo = bo;
+ region->start = amdgpu_vram_mgr_block_start(block);
+ region->size = amdgpu_vram_mgr_block_size(block);
+ list_add_tail(&region->node, &con->critical_region_head);
+ }
+
+out:
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
+}
+
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region, *tmp;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
+ list_del(&region->node);
+ kfree(region);
+ }
+ mutex_unlock(&con->critical_region_lock);
+}
+
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region;
+ bool ret = false;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry(region, &con->critical_region_head, node) {
+ if ((region->start <= addr) &&
+ (addr < (region->start + region->size))) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (amdgpu_uniras_enabled(tmp_adev))
+ amdgpu_ras_mgr_pre_reset(tmp_adev);
+ }
+}
+
+void amdgpu_ras_post_reset(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (amdgpu_uniras_enabled(tmp_adev))
+ amdgpu_ras_mgr_post_reset(tmp_adev);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 82db986c36a0..ff44190d7d98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -47,7 +47,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
-#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
+#define AMDGPU_RAS_GPU_ERR_GENERIC(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
@@ -65,7 +65,7 @@ struct amdgpu_iv_entry;
/* Reserve 8 physical dram row for possible retirement.
* In worst cases, it will lose 8 * 2MB memory in vram domain */
-#define AMDGPU_RAS_RESERVED_VRAM_SIZE (16ULL << 20)
+#define AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20)
/* The high three bits indicates socketid */
#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
@@ -98,6 +98,7 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__JPEG,
AMDGPU_RAS_BLOCK__IH,
AMDGPU_RAS_BLOCK__MPIO,
+ AMDGPU_RAS_BLOCK__MMSCH,
AMDGPU_RAS_BLOCK__LAST,
AMDGPU_RAS_BLOCK__ANY = -1
@@ -491,11 +492,45 @@ struct ras_ecc_err {
struct ras_ecc_log_info {
struct mutex lock;
struct radix_tree_root de_page_tree;
- uint64_t de_queried_count;
- uint64_t prev_de_queried_count;
+ uint64_t de_queried_count;
+ uint64_t consumption_q_count;
+};
+
+struct ras_critical_region {
+ struct list_head node;
+ struct amdgpu_bo *bo;
+ uint64_t start;
+ uint64_t size;
+};
+
+struct ras_eeprom_table_version {
+ uint32_t minor : 16;
+ uint32_t major : 16;
+};
+
+struct ras_eeprom_smu_funcs {
+ int (*get_ras_table_version)(struct amdgpu_device *adev,
+ uint32_t *table_version);
+ int (*get_badpage_count)(struct amdgpu_device *adev, uint32_t *count, uint32_t timeout);
+ int (*get_badpage_mca_addr)(struct amdgpu_device *adev, uint16_t index, uint64_t *mca_addr);
+ int (*set_timestamp)(struct amdgpu_device *adev, uint64_t timestamp);
+ int (*get_timestamp)(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp);
+ int (*get_badpage_ipid)(struct amdgpu_device *adev, uint16_t index, uint64_t *ipid);
+ int (*erase_ras_table)(struct amdgpu_device *adev, uint32_t *result);
+};
+
+enum ras_smu_feature_flags {
+ RAS_SMU_FEATURE_BIT__RAS_EEPROM = BIT_ULL(0),
+};
+
+struct ras_smu_drv {
+ const struct ras_eeprom_smu_funcs *smu_eeprom_funcs;
+ void (*ras_smu_feature_flags)(struct amdgpu_device *adev, uint64_t *flags);
};
struct amdgpu_ras {
+ void *ras_mgr;
/* ras infrastructure */
/* for ras itself. */
uint32_t features;
@@ -514,6 +549,7 @@ struct amdgpu_ras {
/* gpu recovery */
struct work_struct recovery_work;
atomic_t in_recovery;
+ atomic_t rma_in_recovery;
struct amdgpu_device *adev;
/* error handler data */
struct ras_err_handler_data *eh_data;
@@ -556,6 +592,7 @@ struct amdgpu_ras {
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
atomic_t poison_creation_count;
+ atomic_t poison_consumption_count;
struct mutex page_rsv_lock;
DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
struct ras_ecc_log_info umc_ecc_log;
@@ -569,6 +606,21 @@ struct amdgpu_ras {
struct ras_event_manager *event_mgr;
uint64_t reserved_pages_in_bytes;
+
+ pid_t init_task_pid;
+ char init_task_comm[TASK_COMM_LEN];
+
+ int bad_page_num;
+
+ struct list_head critical_region_head;
+ struct mutex critical_region_lock;
+
+ /* Protect poison injection */
+ struct mutex poison_lock;
+
+ /* Disable/Enable uniras switch */
+ bool uniras_enabled;
+ const struct ras_smu_drv *ras_smu_drv;
};
struct ras_fs_data {
@@ -607,6 +659,7 @@ struct ras_err_handler_data {
struct eeprom_table_record *bps;
/* the count of entries */
int count;
+ int count_saved;
/* the space can place new entries */
int space_left;
};
@@ -795,6 +848,12 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
return TA_RAS_BLOCK__VCN;
case AMDGPU_RAS_BLOCK__JPEG:
return TA_RAS_BLOCK__JPEG;
+ case AMDGPU_RAS_BLOCK__IH:
+ return TA_RAS_BLOCK__IH;
+ case AMDGPU_RAS_BLOCK__MPIO:
+ return TA_RAS_BLOCK__MPIO;
+ case AMDGPU_RAS_BLOCK__MMSCH:
+ return TA_RAS_BLOCK__MMSCH;
default:
WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
return TA_RAS_BLOCK__UMC;
@@ -881,7 +940,7 @@ static inline void amdgpu_ras_intr_cleared(void)
atomic_set(&amdgpu_ras_in_intr, 0);
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
@@ -966,6 +1025,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr);
+
int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset);
@@ -977,4 +1039,9 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
const char *fmt, ...);
bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
+
+void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
+ struct list_head *device_list);
+void amdgpu_ras_post_reset(struct amdgpu_device *adev,
+ struct list_head *device_list);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 52c16bfeccaa..64dd7a81bff5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include "amdgpu_reset.h"
+#include "amdgpu_ras_mgr.h"
/* These are memory addresses as would be seen by one or more EEPROM
* chips strung on the I2C bus, usually by manipulating pins 1-3 of a
@@ -123,6 +124,8 @@
RAS_TABLE_V2_1_INFO_SIZE) \
/ RAS_TABLE_RECORD_SIZE)
+#define RAS_SMU_MESSAGE_TIMEOUT_MS 1000 /* 1s */
+
/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
* offset off of RAS_TABLE_START. That is, this is something you can
* add to control->i2c_address, and then tell I2C layer to read
@@ -161,6 +164,7 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
return true;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return (adev->gmc.is_app_apu) ? false : true;
default:
@@ -177,7 +181,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
if (!control)
return false;
- if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ if (adev->bios && amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
/* The address given by VBIOS is an 8-bit, wire-format
* address, i.e. the most significant byte.
*
@@ -223,6 +227,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
return true;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
@@ -275,10 +280,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table header:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table header:%d",
+ res);
} else if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_HEADER_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_HEADER_SIZE);
res = -EIO;
} else {
res = 0;
@@ -321,7 +327,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to write table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to write table ras info\n");
return -ENOMEM;
}
@@ -336,10 +343,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table ras info:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table ras info:%d",
+ res);
} else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_V2_1_INFO_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_V2_1_INFO_SIZE);
res = -EIO;
} else {
res = 0;
@@ -413,9 +421,12 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control
switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
case IP_VERSION(8, 10, 0):
- case IP_VERSION(12, 0, 0):
hdr->version = RAS_TABLE_VER_V2_1;
return;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
+ hdr->version = RAS_TABLE_VER_V3;
+ return;
default:
hdr->version = RAS_TABLE_VER_V1;
return;
@@ -435,42 +446,61 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ u32 erase_res = 0;
u8 csum;
int res;
mutex_lock(&control->ras_tbl_mutex);
- hdr->header = RAS_TABLE_HDR_VAL;
- amdgpu_ras_set_eeprom_table_version(control);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ hdr->header = RAS_TABLE_HDR_VAL;
+ amdgpu_ras_set_eeprom_table_version(control);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = GPU_HEALTH_USABLE;
+
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
- if (hdr->version == RAS_TABLE_VER_V2_1) {
- hdr->first_rec_offset = RAS_RECORD_START_V2_1;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
- RAS_TABLE_V2_1_INFO_SIZE;
- rai->rma_status = GPU_HEALTH_USABLE;
- /**
- * GPU health represented as a percentage.
- * 0 means worst health, 100 means fully health.
- */
- rai->health_percent = 100;
- /* ecc_page_threshold = 0 means disable bad page retirement */
- rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
} else {
- hdr->first_rec_offset = RAS_RECORD_START;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ res = amdgpu_ras_smu_erase_ras_table(adev, &erase_res);
+ if (res || erase_res) {
+ dev_warn(adev->dev, "RAS EEPROM reset failed, res:%d result:%d",
+ res, erase_res);
+ if (!res)
+ res = -EIO;
+ }
}
- csum = __calc_hdr_byte_sum(control);
- if (hdr->version == RAS_TABLE_VER_V2_1)
- csum += __calc_ras_info_byte_sum(control);
- csum = -csum;
- hdr->checksum = csum;
- res = __write_table_header(control);
- if (!res && hdr->version > RAS_TABLE_VER_V1)
- res = __write_table_ras_info(control);
-
control->ras_num_recs = 0;
control->ras_num_bad_pages = 0;
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
control->ras_fri = 0;
amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
@@ -546,6 +576,9 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_check_eeprom_safety_watermark(adev);
+
if (!__is_ras_eeprom_supported(adev) ||
!amdgpu_bad_page_threshold)
return false;
@@ -558,16 +591,17 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
return false;
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
- if (amdgpu_bad_page_threshold == -1) {
+ if (con->eeprom_control.ras_num_bad_pages > con->bad_page_cnt_threshold)
dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
- con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
+ con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
dev_warn(adev->dev,
- "But GPU can be operated due to bad_page_threshold = -1.\n");
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n");
return false;
} else {
- dev_warn(adev->dev, "This GPU is in BAD status.");
- dev_warn(adev->dev, "Please retire it or set a larger "
- "threshold value when reloading driver.\n");
+ dev_warn(adev->dev,
+ "Please consider adjusting the customized threshold.\n");
return true;
}
}
@@ -601,13 +635,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Writing %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Writing %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short write, return error.
*/
- DRM_ERROR("Wrote %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Wrote %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -726,11 +760,13 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
- control->ras_fri)
% control->ras_max_record_count;
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
- control->ras_num_bad_pages = control->ras_num_recs;
+ /*old asics only save pa to eeprom like before*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12)
+ control->ras_num_pa_recs += num;
else
- control->ras_num_bad_pages =
- control->ras_num_recs * adev->umc.retire_unit;
+ control->ras_num_mca_recs += num;
+
+ control->ras_num_bad_pages = con->bad_page_num;
Out:
kfree(buf);
return res;
@@ -748,24 +784,30 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
/* Modify the header if it exceeds.
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) {
+ control->ras_num_bad_pages > ras->bad_page_cnt_threshold) {
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
- control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) {
- control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
- control->tbl_rai.health_percent = 0;
- }
- if (amdgpu_bad_page_threshold != -1)
+ if (adev->cper.enabled && !amdgpu_uniras_enabled(adev) &&
+ amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
+ if ((amdgpu_bad_page_threshold != -1) &&
+ (amdgpu_bad_page_threshold != -2)) {
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
ras->is_rma = true;
+ }
/* ignore the -ENOTSUPP return value */
amdgpu_dpm_send_rma_reason(adev);
}
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE +
control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
@@ -777,8 +819,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("allocating memory for table of size %d bytes failed\n",
- control->tbl_hdr.tbl_size);
+ dev_err(adev->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
res = -ENOMEM;
goto Out;
}
@@ -790,12 +833,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("EEPROM failed reading records:%d\n",
- res);
+ dev_err(adev->dev, "EEPROM failed reading records:%d\n", res);
goto Out;
} else if (res < buf_size) {
- DRM_ERROR("EEPROM read %d out of %d bytes\n",
- res, buf_size);
+ dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res,
+ buf_size);
res = -EIO;
goto Out;
}
@@ -805,8 +847,8 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
* now calculate gpu health percent
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
- control->ras_num_bad_pages < ras->bad_page_cnt_threshold)
+ control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
+ control->ras_num_bad_pages <= ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
control->ras_num_bad_pages) * 100) /
ras->bad_page_cnt_threshold;
@@ -818,7 +860,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
csum += *pp;
csum += __calc_hdr_byte_sum(control);
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
csum += __calc_ras_info_byte_sum(control);
/* avoid sign extension when assigning to "checksum" */
csum = -csum;
@@ -831,6 +873,71 @@ Out:
return res;
}
+int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int ret, retry = 20;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
+ control->ras_num_recs_old = control->ras_num_recs;
+
+ do {
+ /* 1000ms timeout is long enough, smu_get_badpage_count won't
+ * return -EBUSY before timeout.
+ */
+ ret = amdgpu_ras_smu_get_badpage_count(adev,
+ &(control->ras_num_recs), RAS_SMU_MESSAGE_TIMEOUT_MS);
+ if (!ret &&
+ (control->ras_num_recs_old == control->ras_num_recs)) {
+ /* record number update in PMFW needs some time,
+ * smu_get_badpage_count may return immediately without
+ * count update, sleep for a while and retry again.
+ */
+ msleep(50);
+ retry--;
+ } else {
+ break;
+ }
+ } while (retry);
+
+ /* no update of record number is not a real failure,
+ * don't print warning here
+ */
+ if (!ret && (control->ras_num_recs_old == control->ras_num_recs))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int amdgpu_ras_smu_eeprom_append(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev) || !con)
+ return 0;
+
+ control->ras_num_bad_pages = con->bad_page_num;
+
+ if (amdgpu_bad_page_threshold != 0 &&
+ control->ras_num_bad_pages > con->bad_page_cnt_threshold) {
+ dev_warn(adev->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ control->ras_num_bad_pages, con->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
+ if ((amdgpu_bad_page_threshold != -1) &&
+ (amdgpu_bad_page_threshold != -2))
+ con->is_rma = true;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table
* @control: pointer to control structure
@@ -850,22 +957,30 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
{
struct amdgpu_device *adev = to_amdgpu_device(control);
int res, i;
+ uint64_t nps = AMDGPU_NPS1_PARTITION_MODE;
if (!__is_ras_eeprom_supported(adev))
return 0;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_append(control);
+
if (num == 0) {
- DRM_ERROR("will not append 0 records\n");
+ dev_err(adev->dev, "will not append 0 records\n");
return -EINVAL;
} else if (num > control->ras_max_record_count) {
- DRM_ERROR("cannot append %d records than the size of table %d\n",
- num, control->ras_max_record_count);
+ dev_err(adev->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
return -EINVAL;
}
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
/* set the new channel index flag */
for (i = 0; i < num; i++)
- record[i].retired_page |= UMC_CHANNEL_IDX_V2;
+ record[i].retired_page |= (nps << UMC_NPS_SHIFT);
mutex_lock(&control->ras_tbl_mutex);
@@ -879,7 +994,7 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
/* clear channel index flag, the flag is only saved on eeprom */
for (i = 0; i < num; i++)
- record[i].retired_page &= ~UMC_CHANNEL_IDX_V2;
+ record[i].retired_page &= ~(nps << UMC_NPS_SHIFT);
return res;
}
@@ -910,13 +1025,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Reading %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Reading %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short read, return error.
*/
- DRM_ERROR("Read %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Read %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -925,6 +1040,50 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return res;
}
+int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record, u32 rec_idx,
+ const u32 num)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint64_t ts, end_idx;
+ int i, ret;
+ u64 mca, ipid;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
+ if (!adev->umc.ras || !adev->umc.ras->mca_ipid_parse)
+ return -EOPNOTSUPP;
+
+ end_idx = rec_idx + num;
+ for (i = rec_idx; i < end_idx; i++) {
+ ret = amdgpu_ras_smu_get_badpage_mca_addr(adev, i, &mca);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_smu_get_badpage_ipid(adev, i, &ipid);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_smu_get_timestamp(adev, i, &ts);
+ if (ret)
+ return ret;
+
+ record[i - rec_idx].address = mca;
+ /* retired_page (pa) is unused now */
+ record[i - rec_idx].retired_page = 0x1ULL;
+ record[i - rec_idx].ts = ts;
+ record[i - rec_idx].err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+
+ adev->umc.ras->mca_ipid_parse(adev, ipid,
+ (uint32_t *)&(record[i - rec_idx].cu),
+ (uint32_t *)&(record[i - rec_idx].mem_channel),
+ (uint32_t *)&(record[i - rec_idx].mcumc_id), NULL);
+ }
+
+ return 0;
+}
+
/**
* amdgpu_ras_eeprom_read -- read EEPROM
* @control: pointer to control structure
@@ -946,15 +1105,18 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
u8 *buf, *pp;
u32 g0, g1;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_eeprom_read_idx(control, record, 0, num);
+
if (!__is_ras_eeprom_supported(adev))
return 0;
if (num == 0) {
- DRM_ERROR("will not read 0 records\n");
+ dev_err(adev->dev, "will not read 0 records\n");
return -EINVAL;
} else if (num > control->ras_num_recs) {
- DRM_ERROR("too many records to read:%d available:%d\n",
- num, control->ras_num_recs);
+ dev_err(adev->dev, "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
return -EINVAL;
}
@@ -1031,7 +1193,7 @@ uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *co
/* get available eeprom table version first before eeprom table init */
amdgpu_ras_set_eeprom_table_version(control);
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
return RAS_MAX_RECORD_COUNT_V2_1;
else
return RAS_MAX_RECORD_COUNT;
@@ -1117,6 +1279,10 @@ static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf,
int res = -EFAULT;
size_t data_len;
+ /* pmfw manages eeprom data by itself */
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
mutex_lock(&control->ras_tbl_mutex);
/* We want *pos - data_len > 0, which means there's
@@ -1276,7 +1442,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
int buf_size, res;
u8 csum, *buf, *pp;
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
buf_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE +
control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
@@ -1286,7 +1452,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Out of memory checking RAS table checksum.\n");
+ dev_err(adev->dev,
+ "Out of memory checking RAS table checksum.\n");
return -ENOMEM;
}
@@ -1295,7 +1462,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
control->ras_header_offset,
buf, buf_size);
if (res < buf_size) {
- DRM_ERROR("Partial read for checksum, res:%d\n", res);
+ dev_err(adev->dev, "Partial read for checksum, res:%d\n", res);
/* On partial reads, return -EIO.
*/
if (res >= 0)
@@ -1320,7 +1487,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
return -ENOMEM;
}
@@ -1332,7 +1500,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_info_offset,
buf, RAS_TABLE_V2_1_INFO_SIZE);
if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res);
+ dev_err(adev->dev,
+ "Failed to read EEPROM table ras info, res:%d", res);
res = res >= 0 ? -EIO : res;
goto Out;
}
@@ -1344,6 +1513,42 @@ Out:
return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
}
+static int amdgpu_ras_smu_eeprom_init(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t local_time;
+ int res;
+
+ ras->is_rma = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+ mutex_init(&control->ras_tbl_mutex);
+
+ res = amdgpu_ras_smu_get_table_version(adev, &(hdr->version));
+ if (res)
+ return res;
+
+ res = amdgpu_ras_smu_get_badpage_count(adev,
+ &(control->ras_num_recs), 100);
+ if (res)
+ return res;
+
+ local_time = (uint64_t)ktime_get_real_seconds();
+ res = amdgpu_ras_smu_set_timestamp(adev, local_time);
+ if (res)
+ return res;
+
+ control->ras_max_record_count = 4000;
+
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
+
+ return 0;
+}
+
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
@@ -1352,6 +1557,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int res;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_init(control);
+
ras->is_rma = false;
if (!__is_ras_eeprom_supported(adev))
@@ -1373,23 +1581,89 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Failed to read EEPROM table header, res:%d", res);
+ dev_err(adev->dev, "Failed to read EEPROM table header, res:%d",
+ res);
return res >= 0 ? -EIO : res;
}
__decode_table_header_from_buf(hdr, buf);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ dev_info(adev->dev, "Creating a new EEPROM table");
+ return amdgpu_ras_eeprom_reset_table(control);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
- } else {
+ break;
+ case RAS_TABLE_VER_V1:
control->ras_num_recs = RAS_NUM_RECS(hdr);
control->ras_record_offset = RAS_RECORD_START;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
}
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
+ return 0;
+}
+
+static int amdgpu_ras_smu_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+ control->ras_num_bad_pages = ras->bad_page_num;
+
+ if ((ras->bad_page_cnt_threshold < control->ras_num_bad_pages) &&
+ amdgpu_bad_page_threshold != 0) {
+ dev_warn(adev->dev,
+ "RAS records:%d exceed threshold:%d\n",
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ } else {
+ ras->is_rma = true;
+ dev_warn(adev->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+
+ return 0;
+ }
+
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
+ dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
+ control->ras_num_bad_pages,
+ ras->bad_page_cnt_threshold);
return 0;
}
@@ -1398,7 +1672,10 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- int res;
+ int res = 0;
+
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_check(control);
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1410,17 +1687,14 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
- control->ras_num_bad_pages = control->ras_num_recs;
- else
- control->ras_num_bad_pages =
- control->ras_num_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = ras->bad_page_num;
if (hdr->header == RAS_TABLE_HDR_VAL) {
- DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_bad_pages);
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
if (res)
return res;
@@ -1428,8 +1702,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
res = __verify_ras_table_checksum(control);
if (res)
- DRM_ERROR("RAS table incorrect checksum or error:%d\n",
- res);
+ dev_err(adev->dev,
+ "RAS table incorrect checksum or error:%d\n",
+ res);
/* Warn if we are at 90% of the threshold or above
*/
@@ -1439,7 +1714,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
if (res)
return res;
@@ -1447,11 +1722,12 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
res = __verify_ras_table_checksum(control);
if (res) {
- dev_err(adev->dev, "RAS Table incorrect checksum or error:%d\n",
- res);
+ dev_err(adev->dev,
+ "RAS Table incorrect checksum or error:%d\n",
+ res);
return -EINVAL;
}
- if (ras->bad_page_cnt_threshold > control->ras_num_bad_pages) {
+ if (ras->bad_page_cnt_threshold >= control->ras_num_bad_pages) {
/* This means that, the threshold was increased since
* the last time the system was booted, and now,
* ras->bad_page_cnt_threshold - control->num_recs > 0,
@@ -1466,24 +1742,189 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
res = amdgpu_ras_eeprom_correct_header_tag(control,
RAS_TABLE_HDR_VAL);
} else {
- dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
+ dev_warn(adev->dev,
+ "RAS records:%d exceed threshold:%d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
- if (amdgpu_bad_page_threshold == -1) {
- dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
res = 0;
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
} else {
ras->is_rma = true;
- dev_err(adev->dev,
- "RAS records:%d exceed threshold:%d, "
- "GPU will not be initialized. Replace this GPU or increase the threshold",
- control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+ dev_warn(adev->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
}
}
- } else {
- DRM_INFO("Creating a new EEPROM table");
-
- res = amdgpu_ras_eeprom_reset_table(control);
}
return res < 0 ? res : 0;
}
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev) || !ras ||
+ amdgpu_ras_smu_eeprom_supported(adev))
+ return;
+ control = &ras->eeprom_control;
+ if (!control->is_eeprom_valid)
+ return;
+ res = __verify_ras_table_checksum(control);
+ if (res) {
+ dev_warn(adev->dev,
+ "RAS table incorrect checksum or error:%d, try to recover\n",
+ res);
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (!amdgpu_ras_save_bad_pages(adev, NULL))
+ if (!__verify_ras_table_checksum(control)) {
+ dev_info(adev->dev, "RAS table recovery succeed\n");
+ return;
+ }
+ dev_err(adev->dev, "RAS table recovery failed\n");
+ control->is_eeprom_valid = false;
+ }
+ return;
+}
+
+static const struct ras_smu_drv *amdgpu_ras_get_smu_ras_drv(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!ras)
+ return NULL;
+
+ return ras->ras_smu_drv;
+}
+
+static uint64_t amdgpu_ras_smu_get_feature_flags(struct amdgpu_device *adev)
+{
+ const struct ras_smu_drv *ras_smu_drv = amdgpu_ras_get_smu_ras_drv(adev);
+ uint64_t flags = 0ULL;
+
+ if (!ras_smu_drv)
+ goto out;
+
+ if (ras_smu_drv->ras_smu_feature_flags)
+ ras_smu_drv->ras_smu_feature_flags(adev, &flags);
+
+out:
+ return flags;
+}
+
+bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+ uint64_t flags = 0ULL;
+
+ if (!__is_ras_eeprom_supported(adev) || !smu_ras_drv)
+ return false;
+
+ if (!smu_ras_drv->smu_eeprom_funcs)
+ return false;
+
+ flags = amdgpu_ras_smu_get_feature_flags(adev);
+
+ return !!(flags & RAS_SMU_FEATURE_BIT__RAS_EEPROM);
+}
+
+int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_ras_table_version)
+ return smu_ras_drv->smu_eeprom_funcs->get_ras_table_version(adev,
+ table_version);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev,
+ uint32_t *count, uint32_t timeout)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_count)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_count(adev,
+ count, timeout);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr(adev,
+ index, mca_addr);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev,
+ uint64_t timestamp)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->set_timestamp)
+ return smu_ras_drv->smu_eeprom_funcs->set_timestamp(adev,
+ timestamp);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_timestamp)
+ return smu_ras_drv->smu_eeprom_funcs->get_timestamp(adev,
+ index, timestamp);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid(adev,
+ index, ipid);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->erase_ras_table)
+ return smu_ras_drv->smu_eeprom_funcs->erase_ras_table(adev,
+ result);
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 81d55cb7b397..2e5d63957e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -28,6 +28,7 @@
#define RAS_TABLE_VER_V1 0x00010000
#define RAS_TABLE_VER_V2_1 0x00021000
+#define RAS_TABLE_VER_V3 0x00030000
struct amdgpu_device;
@@ -43,19 +44,6 @@ enum amdgpu_ras_eeprom_err_type {
AMDGPU_RAS_EEPROM_ERR_COUNT,
};
-/*
- * one UMC MCA address could map to multiply physical address (PA),
- * such as 1:16, we use eeprom_table_record.address to store MCA
- * address and use eeprom_table_record.retired_page to save PA.
- *
- * AMDGPU_RAS_EEPROM_REC_PA: one record store one PA
- * AMDGPU_RAS_EEPROM_REC_MCA: one record store one MCA address
- */
-enum amdgpu_ras_eeprom_rec_type {
- AMDGPU_RAS_EEPROM_REC_PA,
- AMDGPU_RAS_EEPROM_REC_MCA,
-};
-
struct amdgpu_ras_eeprom_table_header {
uint32_t header;
uint32_t version;
@@ -94,12 +82,19 @@ struct amdgpu_ras_eeprom_control {
/* Number of records in the table.
*/
u32 ras_num_recs;
+ u32 ras_num_recs_old;
/* the bad page number is ras_num_recs or
* ras_num_recs * umc.retire_unit
*/
u32 ras_num_bad_pages;
+ /* Number of records store mca address */
+ u32 ras_num_mca_recs;
+
+ /* Number of records store physical address */
+ u32 ras_num_pa_recs;
+
/* First record index to read, 0-based.
* Range is [0, num_recs-1]. This is
* an absolute index, starting right after
@@ -120,7 +115,8 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
- enum amdgpu_ras_eeprom_rec_type rec_type;
+
+ bool is_eeprom_valid;
};
/*
@@ -166,6 +162,37 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+
+bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev);
+
+int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version);
+
+int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev,
+ uint32_t *count, uint32_t timeout);
+
+int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr);
+
+int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev,
+ uint64_t timestamp);
+
+int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp);
+
+int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid);
+
+int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result);
+
+int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record, u32 rec_idx,
+ const u32 num);
+
+int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 50fcd86e1033..be2e56ce1355 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
@@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = cur->node;
cur->node = ++node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index dabfbdf6f1ce..28c4ad62f50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -340,6 +340,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
case AMDGPU_RESET_SRC_USER:
strscpy(buf, "user trigger", len);
break;
+ case AMDGPU_RESET_SRC_USERQ:
+ strscpy(buf, "user queue trigger", len);
+ break;
default:
strscpy(buf, "unknown", len);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4d9b9701139b..07b4d37f1db6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -43,6 +43,7 @@ enum AMDGPU_RESET_SRCS {
AMDGPU_RESET_SRC_MES,
AMDGPU_RESET_SRC_HWS,
AMDGPU_RESET_SRC_USER,
+ AMDGPU_RESET_SRC_USERQ,
};
struct amdgpu_reset_context {
@@ -160,4 +161,16 @@ int amdgpu_reset_do_xgmi_reset_on_init(
bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev,
+ bool status)
+{
+ adev->pcie_reset_ctx.occurs_dpc = status;
+ adev->no_hw_access = status;
+}
+
+static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev)
+{
+ return adev->pcie_reset_ctx.occurs_dpc;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a6e28fe3f8d6..c596b6df2e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#include "amdgpu_ras_mgr.h"
#include "atom.h"
/*
@@ -99,6 +100,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
return 0;
}
+/**
+ * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * doesn't check the max_dw limit as we may be reemitting
+ * several submissions.
+ */
+static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
+{
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+}
+
/** amdgpu_ring_insert_nop - insert NOP packets
*
* @ring: amdgpu_ring structure holding ring information
@@ -136,8 +160,16 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->funcs->align_mask)
- ib->ptr[ib->length_dw++] = ring->funcs->nop;
+ u32 align_mask = ring->funcs->align_mask;
+ u32 count = ib->length_dw & align_mask;
+
+ if (count) {
+ count = align_mask + 1 - count;
+
+ memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count);
+
+ ib->length_dw += count;
+ }
}
/**
@@ -187,14 +219,10 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
#define amdgpu_ring_get_gpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : \
- (ring->adev->wb.gpu_addr + offset * 4))
+ (ring->adev->wb.gpu_addr + offset * 4)
#define amdgpu_ring_get_cpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- (&ring->adev->wb.wb[offset]))
+ (&ring->adev->wb.wb[offset])
/**
* amdgpu_ring_init - init driver ring struct.
@@ -243,57 +271,42 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->sched_score = sched_score;
ring->vmid_wait = dma_fence_get_stub();
- if (!ring->is_mes_queue) {
- ring->idx = adev->num_rings++;
- adev->rings[ring->idx] = ring;
- }
+ ring->idx = adev->num_rings++;
+ adev->rings[ring->idx] = ring;
r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
- if (ring->is_mes_queue) {
- ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RPTR_OFFS);
- ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_WPTR_OFFS);
- ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_FENCE_OFFS);
- ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
- ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_COND_EXE_OFFS);
- } else {
- r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+ return r;
}
ring->fence_gpu_addr =
@@ -324,45 +337,51 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
- r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
- if (r) {
- dev_err(adev->dev, "failed initializing fences (%d).\n", r);
- return r;
- }
+ if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
+ r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
+ if (r) {
+ dev_err(adev->dev, "failed initializing fences (%d).\n", r);
+ return r;
+ }
- max_ibs_dw = ring->funcs->emit_frame_size +
- amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
- max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+ max_ibs_dw = ring->funcs->emit_frame_size +
+ amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
+ max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
- if (WARN_ON(max_ibs_dw > max_dw))
- max_dw = max_ibs_dw;
+ if (WARN_ON(max_ibs_dw > max_dw))
+ max_dw = max_ibs_dw;
- ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
+ ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
+ } else {
+ ring->ring_size = roundup_pow_of_two(max_dw * 4);
+ ring->count_dw = (ring->ring_size - 4) >> 2;
+ /* ring buffer is empty now */
+ ring->wptr = *ring->rptr_cpu_addr = 0;
+ }
ring->buf_mask = (ring->ring_size / 4) - 1;
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
0xffffffffffffffff : ring->buf_mask;
+ /* Initialize cached_rptr to 0 */
+ ring->cached_rptr = 0;
- /* Allocate ring buffer */
- if (ring->is_mes_queue) {
- int offset = 0;
-
- BUG_ON(ring->ring_size > PAGE_SIZE*4);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RING_OFFS);
- ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- amdgpu_ring_clear_ring(ring);
+ if (!ring->ring_backup) {
+ ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
+ if (!ring->ring_backup)
+ return -ENOMEM;
+ }
- } else if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
+ /* Allocate ring buffer */
+ if (ring->ring_obj == NULL) {
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
+ PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
+ kvfree(ring->ring_backup);
return r;
}
amdgpu_ring_clear_ring(ring);
@@ -392,32 +411,26 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
/* Not to finish a ring which is not initialized */
- if (!(ring->adev) ||
- (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
ring->sched.ready = false;
- if (!ring->is_mes_queue) {
- amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_device_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
- amdgpu_bo_free_kernel(&ring->ring_obj,
- &ring->gpu_addr,
- (void **)&ring->ring);
- } else {
- kfree(ring->fence_drv.fences);
- }
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
+ kvfree(ring->ring_backup);
+ ring->ring_backup = NULL;
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
-
- if (!ring->is_mes_queue)
- ring->adev->rings[ring->idx] = NULL;
}
/**
@@ -454,9 +467,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
unsigned long flags;
ktime_t deadline;
-
- if (unlikely(ring->adev->debug_disable_soft_recovery))
- return false;
+ bool ret;
deadline = ktime_add_us(ktime_get(), 10000);
@@ -468,12 +479,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
- atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
- return dma_fence_is_signaled(fence);
+ ret = dma_fence_is_signaled(fence);
+ /* increment the counter only if soft reset worked */
+ if (ret)
+ atomic_inc(&ring->adev->gpu_reset_counter);
+
+ return ret;
}
/*
@@ -481,6 +496,66 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
*/
#if defined(CONFIG_DEBUG_FS)
+static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ const uint8_t ring_header_size = 12;
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_snapshot_req), GFP_KERNEL);
+ struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_snapshot_rsp), GFP_KERNEL);
+ struct ras_cmd_cper_record_req *record_req __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_record_req), GFP_KERNEL);
+ struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_record_rsp), GFP_KERNEL);
+ uint8_t *ring_header __free(kfree) =
+ kzalloc(ring_header_size, GFP_KERNEL);
+ uint32_t total_cper_num;
+ uint64_t start_cper_id;
+ int r;
+
+ if (!snapshot_req || !snapshot_rsp || !record_req || !record_rsp ||
+ !ring_header)
+ return -ENOMEM;
+
+ if (!(*offset)) {
+ /* Need at least 12 bytes for the header on the first read */
+ if (size < ring_header_size)
+ return -EINVAL;
+
+ if (copy_to_user(buf, ring_header, ring_header_size))
+ return -EFAULT;
+ buf += ring_header_size;
+ size -= ring_header_size;
+ }
+
+ r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev,
+ RAS_CMD__GET_CPER_SNAPSHOT,
+ snapshot_req, sizeof(struct ras_cmd_cper_snapshot_req),
+ snapshot_rsp, sizeof(struct ras_cmd_cper_snapshot_rsp));
+ if (r || !snapshot_rsp->total_cper_num)
+ return r;
+
+ start_cper_id = snapshot_rsp->start_cper_id;
+ total_cper_num = snapshot_rsp->total_cper_num;
+
+ record_req->buf_ptr = (uint64_t)(uintptr_t)buf;
+ record_req->buf_size = size;
+ record_req->cper_start_id = start_cper_id + *offset;
+ record_req->cper_num = total_cper_num;
+ r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD,
+ record_req, sizeof(struct ras_cmd_cper_record_req),
+ record_rsp, sizeof(struct ras_cmd_cper_record_rsp));
+ if (r)
+ return r;
+
+ r = *offset ? record_rsp->real_data_size : record_rsp->real_data_size + ring_header_size;
+ (*offset) += record_rsp->real_cper_num;
+
+ return r;
+}
+
/* Layout of file is 12 bytes consisting of
* - rptr
* - wptr
@@ -493,22 +568,31 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
uint32_t value, result, early[3];
+ uint64_t p;
loff_t i;
int r;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev))
+ return amdgpu_ras_cper_debugfs_read(f, buf, size, pos);
+
if (*pos & 3 || size & 3)
return -EINVAL;
result = 0;
if (*pos < 12) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ mutex_lock(&ring->adev->cper.ring_lock);
+
early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
early[2] = ring->wptr & ring->buf_mask;
for (i = *pos / 4; i < 3 && size; i++) {
r = put_user(early[i], (uint32_t *)buf);
- if (r)
- return r;
+ if (r) {
+ result = r;
+ goto out;
+ }
buf += 4;
result += 4;
size -= 4;
@@ -516,86 +600,94 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
}
}
- while (size) {
- if (*pos >= (ring->ring_size + 12))
- return result;
+ if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
+ while (size) {
+ if (*pos >= (ring->ring_size + 12))
+ return result;
- value = ring->ring[(*pos - 12)/4];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
- buf += 4;
- result += 4;
- size -= 4;
- *pos += 4;
+ value = ring->ring[(*pos - 12)/4];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
+ } else {
+ p = early[0];
+ if (early[0] <= early[1])
+ size = (early[1] - early[0]);
+ else
+ size = ring->ring_size - (early[0] - early[1]);
+
+ while (size) {
+ if (p == early[1])
+ goto out;
+
+ value = ring->ring[p];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto out;
+ }
+
+ buf += 4;
+ result += 4;
+ size--;
+ p++;
+ p &= ring->ptr_mask;
+ }
}
+out:
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ mutex_unlock(&ring->adev->cper.ring_lock);
+
return result;
}
-static const struct file_operations amdgpu_debugfs_ring_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_ring_read,
- .llseek = default_llseek
-};
-
-static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
- volatile u32 *mqd;
- u32 *kbuf;
- int r, i;
- uint32_t value, result;
if (*pos & 3 || size & 3)
return -EINVAL;
- kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto err_free;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ amdgpu_virt_req_ras_cper_dump(ring->adev, false);
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
- if (r)
- goto err_unreserve;
+ return amdgpu_debugfs_ring_read(f, buf, size, pos);
+}
- /*
- * Copy to local buffer to avoid put_user(), which might fault
- * and acquire mmap_sem, under reservation_ww_class_mutex.
- */
- for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
- kbuf[i] = mqd[i];
+static const struct file_operations amdgpu_debugfs_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_ring_read,
+ .llseek = default_llseek
+};
- amdgpu_bo_kunmap(ring->mqd_obj);
- amdgpu_bo_unreserve(ring->mqd_obj);
+static const struct file_operations amdgpu_debugfs_virt_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_virt_ring_read,
+ .llseek = default_llseek
+};
- result = 0;
- while (size) {
- if (*pos >= ring->mqd_size)
- break;
+static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size);
+ void *from = ((u8 *)ring->mqd_ptr) + *pos;
- value = kbuf[*pos/4];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- goto err_free;
- buf += 4;
- result += 4;
- size -= 4;
- *pos += 4;
- }
+ if (*pos > ring->mqd_size)
+ return 0;
- kfree(kbuf);
- return result;
+ if (copy_to_user(buf, from, bytes))
+ return -EFAULT;
-err_unreserve:
- amdgpu_bo_unreserve(ring->mqd_obj);
-err_free:
- kfree(kbuf);
- return r;
+ *pos += bytes;
+ return bytes;
}
static const struct file_operations amdgpu_debugfs_mqd_fops = {
@@ -626,9 +718,14 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
- debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
- &amdgpu_debugfs_ring_fops,
- ring->ring_size + 12);
+ if (amdgpu_sriov_vf(adev))
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_virt_ring_fops,
+ ring->ring_size + 12);
+ else
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_ring_fops,
+ ring->ring_size + 12);
if (ring->mqd_obj) {
sprintf(name, "amdgpu_mqd_%s", ring->name);
@@ -690,6 +787,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
prop->eop_gpu_addr = ring->eop_gpu_addr;
prop->use_doorbell = ring->use_doorbell;
prop->doorbell_index = ring->doorbell_index;
+ prop->kernel_queue = true;
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
@@ -761,3 +859,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
return true;
}
+
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
+ drm_sched_wqueue_stop(&ring->sched);
+ /* back up the non-guilty commands */
+ amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
+}
+
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ unsigned int i;
+ int r;
+
+ /* verify that the ring is functional */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+
+ /* signal the guilty fence and set an error on all fences from the context */
+ if (guilty_fence)
+ amdgpu_fence_driver_guilty_force_completion(guilty_fence);
+ /* Re-emit the non-guilty commands */
+ if (ring->ring_backup_entries_to_copy) {
+ amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
+ for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
+ amdgpu_ring_write(ring, ring->ring_backup[i]);
+ amdgpu_ring_commit(ring);
+ }
+ /* Start the scheduler again */
+ drm_sched_wqueue_start(&ring->sched);
+ return 0;
+}
+
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type)
+{
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (ring->adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (ring->adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (ring->adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (ring->adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (ring->adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index dee5a1b4e572..7a27c6c4bb44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -37,7 +37,7 @@ struct amdgpu_job;
struct amdgpu_vm;
/* max number of rings */
-#define AMDGPU_MAX_RINGS 124
+#define AMDGPU_MAX_RINGS 149
#define AMDGPU_MAX_HWIP_RINGS 64
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_SW_GFX_RINGS 2
@@ -82,6 +82,8 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_KIQ,
AMDGPU_RING_TYPE_MES,
AMDGPU_RING_TYPE_UMSCH_MM,
+ AMDGPU_RING_TYPE_CPER,
+ AMDGPU_RING_TYPE_MAX,
};
enum amdgpu_ib_pool_type {
@@ -113,10 +115,11 @@ struct amdgpu_sched {
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
+ uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
+ u64 signalled_wptr;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
@@ -126,11 +129,33 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+/*
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the relevant GPU caches have been flushed.
+ */
+
+struct amdgpu_fence {
+ struct dma_fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+ ktime_t start_timestamp;
+
+ /* wptr for the fence for resets */
+ u64 wptr;
+ /* fence context for resets */
+ u64 context;
+};
+
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af);
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -140,8 +165,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
- unsigned flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
@@ -163,13 +188,40 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
+ /**
+ * @type:
+ *
+ * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER
+ * use ring buffers. The type field just identifies which component the
+ * ring buffer is associated with.
+ */
enum amdgpu_ring_type type;
uint32_t align_mask;
+
+ /**
+ * @nop:
+ *
+ * Every block in the amdgpu has no-op instructions (e.g., GFX 10
+ * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP,
+ * etc). This field receives the specific no-op for the component
+ * that initializes the ring.
+ */
u32 nop;
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
- unsigned extra_dw;
+
+ /**
+ * @extra_bytes:
+ *
+ * Optional extra space in bytes that is added to the ring size
+ * when allocating the BO that holds the contents of the ring.
+ * This space isn't used for command submission to the ring,
+ * but is just there to satisfy some hardware requirements or
+ * implement workarounds. It's up to the implementation of each
+ * specific ring to initialize this space.
+ */
+ unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -235,10 +287,14 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
- int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid,
+ struct amdgpu_fence *timedout_fence);
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
};
+/**
+ * amdgpu_ring - Holds ring information
+ */
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
@@ -247,16 +303,67 @@ struct amdgpu_ring {
struct amdgpu_bo *ring_obj;
uint32_t *ring;
+ /* backups for resets */
+ uint32_t *ring_backup;
+ unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
- volatile u32 *rptr_cpu_addr;
+ u32 *rptr_cpu_addr;
+
+ /**
+ * @wptr:
+ *
+ * This is part of the Ring buffer implementation and represents the
+ * write pointer. The wptr determines where the host has written.
+ */
u64 wptr;
+
+ /**
+ * @wptr_old:
+ *
+ * Before update wptr with the new value, usually the old value is
+ * stored in the wptr_old.
+ */
u64 wptr_old;
unsigned ring_size;
+
+ /**
+ * @max_dw:
+ *
+ * Maximum number of DWords for ring allocation. This information is
+ * provided at the ring initialization time, and each IP block can
+ * specify a specific value. Check places that invoke
+ * amdgpu_ring_init() to see the maximum size per block.
+ */
unsigned max_dw;
+
+ /**
+ * @count_dw:
+ *
+ * This value starts with the maximum amount of DWords supported by the
+ * ring. This value is updated based on the ring manipulation.
+ */
int count_dw;
uint64_t gpu_addr;
+
+ /**
+ * @ptr_mask:
+ *
+ * Some IPs provide support for 64-bit pointers and others for 32-bit
+ * only; this behavior is component-specific and defined by the field
+ * support_64bit_ptr. If the IP block supports 64-bits, the mask
+ * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask.
+ * Notice that this field is used to keep wptr under a valid range.
+ */
uint64_t ptr_mask;
+
+ /**
+ * @buf_mask:
+ *
+ * Buffer mask is a value used to keep wptr count under its
+ * thresholding. Buffer mask initialized during the ring buffer
+ * initialization time, and it is defined as (ring_size / 4) -1.
+ */
uint32_t buf_mask;
u32 idx;
u32 xcc_id;
@@ -274,19 +381,26 @@ struct amdgpu_ring {
bool use_pollmem;
unsigned wptr_offs;
u64 wptr_gpu_addr;
- volatile u32 *wptr_cpu_addr;
+
+ /**
+ * @wptr_cpu_addr:
+ *
+ * This is the CPU address pointer in the writeback slot. This is used
+ * to commit changes to the GPU.
+ */
+ u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
- volatile u32 *fence_cpu_addr;
+ u32 *fence_cpu_addr;
uint64_t current_ctx;
char name[16];
u32 trail_seq;
unsigned trail_fence_offs;
u64 trail_fence_gpu_addr;
- volatile u32 *trail_fence_cpu_addr;
+ u32 *trail_fence_cpu_addr;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
u32 *set_q_mode_ptr;
u64 set_q_mode_token;
@@ -295,18 +409,15 @@ struct amdgpu_ring {
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
bool no_scheduler;
+ bool no_user_submission;
int hw_prio;
unsigned num_hw_submission;
atomic_t *sched_score;
- /* used for mes */
- bool is_mes_queue;
- uint32_t hw_queue_id;
- struct amdgpu_mes_ctx_data *mes_ctx;
-
bool is_sw_ring;
unsigned int entry_index;
-
+ /* store the cached rptr to restore after reset */
+ uint64_t cached_rptr;
};
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
@@ -336,7 +447,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
-#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
+#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
@@ -369,10 +480,7 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
- int i = 0;
- while (i <= ring->buf_mask)
- ring->ring[i++] = ring->funcs->nop;
-
+ memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1);
}
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
@@ -431,15 +539,6 @@ static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
ring->ring[offset] = cur - offset;
}
-#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
-
-#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- NULL)
-
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
@@ -470,4 +569,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index 1c66da1c3fb4..7e7d6c3865bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -124,7 +124,7 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
}
}
- del_timer(&mux->resubmit_timer);
+ timer_delete(&mux->resubmit_timer);
mux->s_resubmit = false;
}
@@ -135,7 +135,8 @@ static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
{
- struct amdgpu_ring_mux *mux = from_timer(mux, t, resubmit_timer);
+ struct amdgpu_ring_mux *mux = timer_container_of(mux, t,
+ resubmit_timer);
if (!spin_trylock(&mux->lock)) {
amdgpu_ring_mux_schedule_resubmit(mux);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index db5791e1a7ce..5aa830a02d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 i;
int r;
@@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index fce22d3f816b..2ce310b31942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -237,7 +237,21 @@ struct amdgpu_rlc_funcs {
void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
- void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+
+ /**
+ * @get_csb_buffer: Get the clear state to be put into the hardware.
+ *
+ * The parameter adev is used to get the CS data and other gfx info,
+ * and buffer is the RLC CS pointer
+ *
+ * Sometimes, the user space puts a request to clear the state in the
+ * command buffer; this function provides the clear state that gets put
+ * into the hardware. Note that the driver programs Clear State
+ * Indirect Buffer (CSB) explicitly when it sets up the kernel rings,
+ * and it also provides a pointer to it which is used by the firmware
+ * to load the clear state in some cases.
+ */
+ void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
@@ -261,19 +275,19 @@ struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
+ uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
+ uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
+ uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 632295bf3875..8b8a04138711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -25,6 +25,9 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_3_0_sh_mask.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -75,22 +78,14 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
+ r = amdgpu_sdma_get_index_from_ring(ring, &index);
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- sdma[ring->idx].sdma_meta_data);
- csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- r = amdgpu_sdma_get_index_from_ring(ring, &index);
-
- if (r || index > 31)
- csa_mc_addr = 0;
- else
- csa_mc_addr = amdgpu_csa_vaddr(adev) +
- AMDGPU_CSA_SDMA_OFFSET +
- index * AMDGPU_CSA_SDMA_SIZE;
- }
+ if (r || index > 31)
+ csa_mc_addr = 0;
+ else
+ csa_mc_addr = amdgpu_csa_vaddr(adev) +
+ AMDGPU_CSA_SDMA_OFFSET +
+ index * AMDGPU_CSA_SDMA_SIZE;
return csa_mc_addr;
}
@@ -355,23 +350,44 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- u32 i;
+ u64 i, num_ring;
u64 mask = 0;
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring, *page = NULL;
if (!adev)
return -ENODEV;
- mask = (1 << adev->sdma.num_instances) - 1;
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
+ /* Calculate the maximum possible mask value
+ * based on the number of SDMA instances and rings
+ */
+ mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1;
+
if ((val & mask) == 0)
return -EINVAL;
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
- if (val & (1 << i))
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+ if (val & BIT_ULL(i * num_ring))
ring->sched.ready = true;
else
ring->sched.ready = false;
+
+ if (page) {
+ if (val & BIT_ULL(i * num_ring + 1))
+ page->sched.ready = true;
+ else
+ page->sched.ready = false;
+ }
}
/* publish sched.ready flag update effective immediately across smp */
smp_rmb();
@@ -381,16 +397,37 @@ static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- u32 i;
+ u64 i, num_ring;
u64 mask = 0;
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring, *page = NULL;
if (!adev)
return -ENODEV;
+
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+
if (ring->sched.ready)
- mask |= 1 << i;
+ mask |= BIT_ULL(i * num_ring);
+ else
+ mask &= ~BIT_ULL(i * num_ring);
+
+ if (page) {
+ if (page->sched.ready)
+ mask |= BIT_ULL(i * num_ring + 1);
+ else
+ mask &= ~BIT_ULL(i * num_ring + 1);
+ }
}
*val = mask;
@@ -460,3 +497,115 @@ void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
}
}
+
+struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->sdma.has_page_queue &&
+ (ring->me < adev->sdma.num_instances) &&
+ (ring == &adev->sdma.instance[ring->me].ring))
+ return &adev->sdma.instance[ring->me].page;
+ else
+ return NULL;
+}
+
+/**
+* amdgpu_sdma_is_shared_inv_eng - Check if a ring is an SDMA ring that shares a VM invalidation engine
+* @adev: Pointer to the AMDGPU device structure
+* @ring: Pointer to the ring structure to check
+*
+* This function checks if the given ring is an SDMA ring that shares a VM invalidation engine.
+* It returns true if the ring is such an SDMA ring, false otherwise.
+*/
+bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ int i = ring->me;
+
+ if (!adev->sdma.has_page_queue || i >= adev->sdma.num_instances)
+ return false;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ return (ring == &adev->sdma.instance[i].page);
+ else
+ return false;
+}
+
+static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
+{
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * amdgpu_sdma_reset_engine - Reset a specific SDMA engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: Logical ID of the SDMA engine instance to reset
+ * @caller_handles_kernel_queues: Skip kernel queue processing. Caller
+ * will handle it.
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues)
+{
+ int ret = 0;
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+ struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
+ struct amdgpu_ring *page_ring = &sdma_instance->page;
+
+ mutex_lock(&sdma_instance->engine_reset_mutex);
+
+ if (!caller_handles_kernel_queues) {
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&gfx_ring->sched);
+
+ if (adev->sdma.has_page_queue)
+ drm_sched_wqueue_stop(&page_ring->sched);
+ }
+
+ if (sdma_instance->funcs->stop_kernel_queue) {
+ sdma_instance->funcs->stop_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->stop_kernel_queue(page_ring);
+ }
+
+ /* Perform the SDMA reset for the specified instance */
+ ret = amdgpu_sdma_soft_reset(adev, instance_id);
+ if (ret) {
+ dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id);
+ goto exit;
+ }
+
+ if (sdma_instance->funcs->start_kernel_queue) {
+ sdma_instance->funcs->start_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->start_kernel_queue(page_ring);
+ }
+
+exit:
+ if (!caller_handles_kernel_queues) {
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (!ret) {
+ amdgpu_fence_driver_force_completion(gfx_ring);
+ drm_sched_wqueue_start(&gfx_ring->sched);
+ if (adev->sdma.has_page_queue) {
+ amdgpu_fence_driver_force_completion(page_ring);
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
+ }
+ }
+ mutex_unlock(&sdma_instance->engine_reset_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 2db58b5812a8..34311f32be4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,6 +50,12 @@ enum amdgpu_sdma_irq {
#define NUM_SDMA(x) hweight32(x)
+struct amdgpu_sdma_funcs {
+ int (*stop_kernel_queue)(struct amdgpu_ring *ring);
+ int (*start_kernel_queue)(struct amdgpu_ring *ring);
+ int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id);
+};
+
struct amdgpu_sdma_instance {
/* SDMA firmware */
const struct firmware *fw;
@@ -64,6 +70,11 @@ struct amdgpu_sdma_instance {
struct amdgpu_bo *sdma_fw_obj;
uint64_t sdma_fw_gpu_addr;
uint32_t *sdma_fw_ptr;
+ struct mutex engine_reset_mutex;
+ /* track guilty state of GFX and PAGE queues */
+ bool gfx_guilty;
+ bool page_guilty;
+ const struct amdgpu_sdma_funcs *funcs;
};
enum amdgpu_sdma_ras_memory_id {
@@ -102,11 +113,13 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
+ struct amdgpu_irq_src fence_irq;
struct amdgpu_irq_src ecc_irq;
struct amdgpu_irq_src vm_hole_irq;
struct amdgpu_irq_src doorbell_invalid_irq;
struct amdgpu_irq_src pool_timeout_irq;
struct amdgpu_irq_src srbm_write_irq;
+ struct amdgpu_irq_src ctxt_empty_irq;
int num_instances;
uint32_t sdma_mask;
@@ -117,6 +130,9 @@ struct amdgpu_sdma {
struct amdgpu_sdma_ras *ras;
uint32_t *ip_dump;
uint32_t supported_reset;
+ struct list_head reset_callback_list;
+ bool no_user_submission;
+ bool disable_uq;
};
/*
@@ -156,6 +172,9 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues);
+
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
@@ -179,4 +198,7 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev);
int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 41ebe690eeff..3739be1b71e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
dev_err(adev->dev, "Invalid input: %s\n", str);
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index e22cb2b5cd92..a0b479d5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -45,7 +45,11 @@
*/
static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
{
- return AMDGPU_VA_RESERVED_SEQ64_START(adev);
+ u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev);
+
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
}
/**
@@ -88,9 +92,11 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error;
}
- seq64_addr = amdgpu_seq64_get_va_base(adev);
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- AMDGPU_PTE_READABLE);
+ seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0,
+ AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
@@ -133,7 +139,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
vm = &fpriv->vm;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
@@ -156,6 +162,7 @@ error:
*
* @adev: amdgpu_device pointer
* @va: VA to access the seq in process address space
+ * @gpu_addr: GPU address to access the seq
* @cpu_addr: CPU address to access the seq
*
* Alloc a 64 bit memory from seq64 pool.
@@ -163,7 +170,8 @@ error:
* Returns:
* 0 on success or a negative error code on failure
*/
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va,
+ u64 *gpu_addr, u64 **cpu_addr)
{
unsigned long bit_pos;
@@ -172,7 +180,12 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
return -ENOSPC;
__set_bit(bit_pos, adev->seq64.used);
+
*va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+
+ if (gpu_addr)
+ *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr;
+
*cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
return 0;
@@ -233,7 +246,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
*/
r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->seq64.sbo, NULL,
+ &adev->seq64.sbo, &adev->seq64.gpu_addr,
(void **)&adev->seq64.cpu_base_addr);
if (r) {
dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 4203b2ab318d..26a249aaaee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -32,13 +32,14 @@
struct amdgpu_seq64 {
struct amdgpu_bo *sbo;
u32 num_sem;
+ u64 gpu_addr;
u64 *cpu_base_addr;
DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
};
void amdgpu_seq64_fini(struct amdgpu_device *adev);
int amdgpu_seq64_init(struct amdgpu_device *adev);
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 *gpu_addr, u64 **cpu_addr);
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c586ab4c911b..d6ae9974c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -135,11 +135,16 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
struct amdgpu_sync_entry *e;
hash_for_each_possible(sync->fences, e, node, f->context) {
- if (unlikely(e->fence->context != f->context))
- continue;
+ if (dma_fence_is_signaled(e->fence)) {
+ dma_fence_put(e->fence);
+ e->fence = dma_fence_get(f);
+ return true;
+ }
- amdgpu_sync_keep_later(&e->fence, f);
- return true;
+ if (likely(e->fence->context == f->context)) {
+ amdgpu_sync_keep_later(&e->fence, f);
+ return true;
+ }
}
return false;
}
@@ -149,10 +154,12 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
*
* @sync: sync object to add fence to
* @f: fence to sync to
+ * @flags: memory allocation flags to use when allocating sync entry
*
* Add the fence to the sync object.
*/
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags)
{
struct amdgpu_sync_entry *e;
@@ -162,7 +169,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
if (amdgpu_sync_add_later(sync, f))
return 0;
- e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(amdgpu_sync_slab, flags);
if (!e)
return -ENOMEM;
@@ -242,14 +249,13 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (resv == NULL)
return -EINVAL;
-
- /* TODO: Use DMA_RESV_USAGE_READ here */
- dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
+ /* Implicitly sync only to KERNEL, WRITE and READ */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
- r = amdgpu_sync_fence(sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
dma_fence_put(f);
if (r)
return r;
@@ -281,7 +287,7 @@ int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
continue;
- r = amdgpu_sync_fence(sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
if (r)
break;
}
@@ -388,7 +394,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
hash_for_each_safe(source->fences, i, tmp, e, node) {
f = e->fence;
if (!dma_fence_is_signaled(f)) {
- r = amdgpu_sync_fence(clone, f);
+ r = amdgpu_sync_fence(clone, f, GFP_KERNEL);
if (r)
return r;
} else {
@@ -400,6 +406,25 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
}
/**
+ * amdgpu_sync_move - move all fences from src to dst
+ *
+ * @src: source of the fences, empty after function
+ * @dst: destination for the fences
+ *
+ * Moves all fences from source to destination. All fences in destination are
+ * freed and source is empty after the function call.
+ */
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst)
+{
+ unsigned int i;
+
+ amdgpu_sync_free(dst);
+
+ for (i = 0; i < HASH_SIZE(src->fences); ++i)
+ hlist_move_list(&src->fences[i], &dst->fences[i]);
+}
+
+/**
* amdgpu_sync_push_to_job - push fences into job
* @sync: sync object to get the fences from
* @job: job to push the fences into
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index e3272dce798d..51eb4382c91e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -47,7 +47,8 @@ struct amdgpu_sync {
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
@@ -56,6 +57,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst);
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 383fce40d4dd..d13e64a69e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__field(struct dma_fence *, fence)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -457,6 +453,38 @@ DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
TP_ARGS(pasid)
);
+TRACE_EVENT(amdgpu_isolation,
+ TP_PROTO(void *prev, void *next),
+ TP_ARGS(prev, next),
+ TP_STRUCT__entry(
+ __field(void *, prev)
+ __field(void *, next)
+ ),
+
+ TP_fast_assign(
+ __entry->prev = prev;
+ __entry->next = next;
+ ),
+ TP_printk("prev=%p, next=%p",
+ __entry->prev,
+ __entry->next)
+);
+
+TRACE_EVENT(amdgpu_cleaner_shader,
+ TP_PROTO(struct amdgpu_ring *ring, struct dma_fence *fence),
+ TP_ARGS(ring, fence),
+ TP_STRUCT__entry(
+ __string(ring, ring->name)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(ring);
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("ring=%s, seqno=%Lu", __get_str(ring), __entry->seqno)
+);
+
TRACE_EVENT(amdgpu_bo_list_set,
TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
TP_ARGS(list, bo),
@@ -519,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
__string(ring, sched_job->base.sched->name)
- __field(uint64_t, id)
__field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
__assign_str(ring);
- __entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __get_str(ring), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("job ring=%s need pipe sync to fence=%llu:%llu",
+ __get_str(ring), __entry->ctx, __entry->seqno)
);
TRACE_EVENT(amdgpu_reset_reg_dumps,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2d85f1ec3041..2b931e855abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
placement->num_placement = 0;
return;
@@ -187,7 +188,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
- unsigned int i;
int r;
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
@@ -226,7 +226,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
if (r)
return r;
@@ -253,16 +254,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
} else {
- dma_addr_t dma_address;
-
- dma_address = mm_cur->start;
- dma_address += adev->vm_manager.vram_base_offset;
+ u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
- for (i = 0; i < num_pages; ++i) {
- amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
- flags, cpu_addr);
- dma_address += PAGE_SIZE;
- }
+ amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
}
dma_fence_put(amdgpu_job_submit(job));
@@ -284,12 +278,13 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
* move and different for a BO to BO copy.
*
*/
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f)
+__attribute__((nonnull))
+static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
+ struct dma_resv *resv,
+ struct dma_fence **f)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor src_mm, dst_mm;
@@ -299,7 +294,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_bo *abo_src, *abo_dst;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -309,7 +305,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
mutex_lock(&adev->mman.gtt_window_lock);
while (src_mm.remaining) {
uint64_t from, to, cur_size, tiling_flags;
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_compress_disable;
struct dma_fence *next;
/* Never copy more than 256MiB at once to avoid a timeout */
@@ -340,9 +336,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
+ write_compress_disable =
+ AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
- AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
+ AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
+ AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
+ write_compress_disable));
}
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
@@ -358,9 +358,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
- if (f)
- *f = dma_fence_get(fence);
- dma_fence_put(fence);
+ *f = fence;
return r;
}
@@ -401,7 +399,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
- false);
+ false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) {
goto error;
} else if (wipe_fence) {
@@ -442,7 +440,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
- res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
+ res->mem_type == AMDGPU_PL_MMIO_REMAP)
return true;
if (res->mem_type != TTM_PL_VRAM)
@@ -533,10 +532,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
old_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
new_mem->mem_type == AMDGPU_PL_GDS ||
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA ||
- new_mem->mem_type == AMDGPU_PL_DOORBELL) {
+ new_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
/* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
@@ -624,6 +625,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.is_iomem = true;
mem->bus.caching = ttm_uncached;
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->rmmio_remap.bus_addr;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
default:
return -EINVAL;
}
@@ -641,6 +648,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
+ else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
+ return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -688,10 +697,11 @@ struct amdgpu_ttm_tt {
* memory and start HMM tracking CPU page table update
*
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
- * once afterwards to stop HMM tracking
+ * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
+ * that range is a valid memory and it is freed too.
*/
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
- struct hmm_range **range)
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct amdgpu_hmm_range *range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -701,9 +711,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
bool readonly;
int r = 0;
- /* Make sure get_user_pages_done() can cleanup gracefully */
- *range = NULL;
-
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
@@ -727,7 +734,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
- readonly, NULL, pages, range);
+ readonly, NULL, range);
out_unlock:
mmap_read_unlock(mm);
if (r)
@@ -738,38 +745,6 @@ out_unlock:
return r;
}
-/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
- */
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
- if (gtt && gtt->userptr && range)
- amdgpu_hmm_range_get_pages_done(range);
-}
-
-/*
- * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
- * Check if the pages backing this ttm range have been invalidated
- *
- * Returns: true if pages are still valid
- */
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
-
- if (!gtt || !gtt->userptr || !range)
- return false;
-
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
- gtt->userptr, ttm->num_pages);
-
- WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
-
- return !amdgpu_hmm_range_get_pages_done(range);
-}
#endif
/*
@@ -779,12 +754,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = pages ? pages[i] : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
}
/*
@@ -930,7 +905,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
- DRM_ERROR("failed to pin userptr\n");
+ dev_err(adev->dev, "failed to pin userptr\n");
return r;
}
} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
@@ -1056,7 +1031,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
- } else if (ttm->sg && gtt->gobj->import_attach) {
+ } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
struct dma_buf_attachment *attach;
attach = gtt->gobj->import_attach;
@@ -1350,10 +1325,11 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_DOORBELL ||
- mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ mem->mem_type == AMDGPU_PL_PREEMPT ||
+ mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching == ttm_cached)
+ if (ttm && ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
@@ -1505,10 +1481,12 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
if (r)
goto out;
+ mutex_lock(&adev->mman.gtt_window_lock);
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
src_mm.start;
@@ -1523,6 +1501,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
WARN_ON(job->ibs[0].length_dw > num_dw);
fence = amdgpu_job_submit(job);
+ mutex_unlock(&adev->mman.gtt_window_lock);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
r = -ETIMEDOUT;
@@ -1777,25 +1756,21 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
&ctx->c2p_bo,
NULL);
if (ret) {
- DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
return ret;
}
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
}
- if (!adev->gmc.is_app_apu) {
- ret = amdgpu_bo_create_kernel_at(
- adev, adev->gmc.real_vram_size - reserve_size,
- reserve_size, &adev->mman.fw_reserved_memory, NULL);
- if (ret) {
- DRM_ERROR("alloc tmr failed(%d)!\n", ret);
- amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
- NULL, NULL);
- return ret;
- }
- } else {
- DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
+ ret = amdgpu_bo_create_kernel_at(
+ adev, adev->gmc.real_vram_size - reserve_size, reserve_size,
+ &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
+ NULL);
+ return ret;
}
return 0;
@@ -1817,7 +1792,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
adev->gmc.mem_partitions[i].numa.node,
- false, false);
+ TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
}
return 0;
}
@@ -1836,6 +1811,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
adev->mman.ttm_pools = NULL;
}
+/**
+ * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
+ * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
+ * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
+ * GEM object (amdgpu_bo_create).
+ *
+ * Return:
+ * * 0 on success or intentional skip (feature not present/unsupported)
+ * * negative errno on allocation failure
+ */
+static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_bo_param bp;
+ int r;
+
+ /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
+ if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
+ return 0;
+
+ memset(&bp, 0, sizeof(bp));
+
+ /* Create exactly one GEM BO in the MMIO_REMAP domain. */
+ bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
+ bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
+ bp.flags = 0;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
+ * amdgpu_ttm_mmio_remap_bo_init().
+ */
+static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_unref(&adev->rmmio_remap.bo);
+ adev->rmmio_remap.bo = NULL;
+}
+
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1857,25 +1885,31 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
- adev->need_swiotlb,
- dma_addressing_limited(adev->dev));
+ (adev->need_swiotlb ?
+ TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
+ (dma_addressing_limited(adev->dev) ?
+ TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
+ TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ dev_err(adev->dev,
+ "failed initializing buffer object driver(%d).\n", r);
return r;
}
r = amdgpu_ttm_pools_init(adev);
if (r) {
- DRM_ERROR("failed to init ttm pools(%d).\n", r);
+ dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
return r;
}
adev->mman.initialized = true;
- /* Initialize VRAM pool with all of VRAM divided into pages */
- r = amdgpu_vram_mgr_init(adev);
- if (r) {
- DRM_ERROR("Failed initializing VRAM heap.\n");
- return r;
+ if (!adev->gmc.is_app_apu) {
+ /* Initialize VRAM pool with all of VRAM divided into pages */
+ r = amdgpu_vram_mgr_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
+ return r;
+ }
}
/* Change the size here instead of the init above so only lpfn is affected */
@@ -1904,19 +1938,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
- *The reserved vram for driver must be pinned to the specified
- *place on the VRAM, so reserve it early.
+ * The reserved VRAM for the driver must be pinned to a specific
+ * location in VRAM, so reserve it early.
*/
r = amdgpu_ttm_drv_reserve_vram_init(adev);
if (r)
return r;
/*
- * only NAVI10 and onwards ASIC support for IP discovery.
- * If IP discovery enabled, a block of memory should be
- * reserved for IP discovey.
+ * only NAVI10 and later ASICs support IP discovery.
+ * If IP discovery is enabled, a block of memory should be
+ * reserved for it.
*/
- if (adev->mman.discovery_bin) {
+ if (adev->discovery.reserve_tmr) {
r = amdgpu_ttm_reserve_tmr(adev);
if (r)
return r;
@@ -1954,63 +1988,89 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
}
- DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n",
(unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
/* Compute GTT size, either based on TTM limit
* or whatever the user passed on module init.
*/
- if (amdgpu_gtt_size == -1)
- gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
- else
- gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+ gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
+ if (amdgpu_gtt_size != -1) {
+ uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
+
+ drm_warn(&adev->ddev,
+ "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
+ if (gtt_size != configured_size)
+ drm_warn(&adev->ddev,
+ "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
+ configured_size, gtt_size);
+
+ gtt_size = configured_size;
+ }
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
+ dev_err(adev->dev, "Failed initializing GTT heap.\n");
return r;
}
- DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
+ dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
+ if (adev->flags & AMD_IS_APU) {
+ if (adev->gmc.real_vram_size < gtt_size)
+ adev->apu_prefer_gtt = true;
+ }
+
/* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
- DRM_ERROR("Failed initializing doorbell heap.\n");
+ dev_err(adev->dev, "Failed initializing doorbell heap.\n");
return r;
}
/* Create a boorbell page for kernel usages */
r = amdgpu_doorbell_create_kernel_doorbells(adev);
if (r) {
- DRM_ERROR("Failed to initialize kernel doorbells.\n");
+ dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
+ return r;
+ }
+
+ /* Initialize MMIO-remap pool (single page 4K) */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
return r;
}
+ /* Allocate the singleton MMIO_REMAP BO (4K) if supported */
+ r = amdgpu_ttm_mmio_remap_bo_init(adev);
+ if (r)
+ return r;
+
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
return r;
}
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
- DRM_ERROR("Failed initializing GDS heap.\n");
+ dev_err(adev->dev, "Failed initializing GDS heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
if (r) {
- DRM_ERROR("Failed initializing gws heap.\n");
+ dev_err(adev->dev, "Failed initializing gws heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
- DRM_ERROR("Failed initializing oa heap.\n");
+ dev_err(adev->dev, "Failed initializing oa heap.\n");
return r;
}
if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
@@ -2042,12 +2102,16 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
/* return the FW reserved memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
NULL);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
+ NULL);
if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
}
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
+
+ amdgpu_ttm_mmio_remap_bo_fini(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
@@ -2060,15 +2124,20 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
- amdgpu_vram_mgr_fini(adev);
+ if (!adev->gmc.is_app_apu)
+ amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
+ amdgpu_doorbell_fini(adev);
+
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
- DRM_INFO("amdgpu: ttm finalized\n");
+ dev_info(adev->dev, "amdgpu: ttm finalized\n");
}
/**
@@ -2100,8 +2169,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
return;
}
@@ -2109,15 +2179,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
goto error_free_entity;
}
} else {
drm_sched_entity_destroy(&adev->mman.high_pr);
drm_sched_entity_destroy(&adev->mman.low_pr);
- dma_fence_put(man->move);
- man->move = NULL;
+ /* Drop all the old fences since re-creating the scheduler entities
+ * will allocate new contexts.
+ */
+ ttm_resource_manager_cleanup(man);
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
@@ -2140,7 +2213,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
struct dma_resv *resv,
bool vm_needs_flush,
struct amdgpu_job **job,
- bool delayed)
+ bool delayed, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = direct_submit ?
AMDGPU_IB_POOL_DIRECT :
@@ -2150,7 +2223,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
&adev->mman.high_pr;
r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
- num_dw * 4, pool, job);
+ num_dw * 4, pool, job, k_job_id);
if (r)
return r;
@@ -2181,7 +2254,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int r;
if (!direct_submit && !ring->sched.ready) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -2189,7 +2263,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
- resv, vm_needs_flush, &job, false);
+ resv, vm_needs_flush, &job, false,
+ AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
return r;
@@ -2216,7 +2291,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
error_free:
amdgpu_job_free(job);
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2224,7 +2299,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
- bool vm_needs_flush, bool delayed)
+ bool vm_needs_flush, bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2237,7 +2313,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
- &job, delayed);
+ &job, delayed, k_job_id);
if (r)
return r;
@@ -2276,7 +2352,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
- int r;
+ int r = 0;
if (!adev->mman.buffer_funcs_enabled)
return -EINVAL;
@@ -2307,7 +2383,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
goto err;
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
- &next, true, true);
+ &next, true, true,
+ AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
goto err;
@@ -2326,7 +2403,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **f,
- bool delayed)
+ bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -2335,7 +2413,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
int r;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to clear memory with ring turned off.\n");
return -EINVAL;
}
@@ -2355,7 +2434,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
goto error;
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
- &next, true, delayed);
+ &next, true, delayed, k_job_id);
if (r)
goto error;
@@ -2395,7 +2474,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
man = ttm_manager_type(&adev->mman.bdev, mem_type);
break;
default:
- DRM_ERROR("Trying to evict invalid memory type\n");
+ dev_err(adev->dev, "Trying to evict invalid memory type\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 461fb8090ae0..577ee04ce0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -28,13 +28,15 @@
#include <drm/gpu_scheduler.h>
#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
+#include "amdgpu_hmm.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
-#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
+#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -81,11 +83,9 @@ struct amdgpu_mman {
uint64_t stolen_reserved_offset;
uint64_t stolen_reserved_size;
- /* discovery */
- uint8_t *discovery_bin;
- uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
+ struct amdgpu_bo *fw_reserved_memory_extend;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
@@ -119,6 +119,8 @@ struct amdgpu_copy_mem {
#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1
#define AMDGPU_COPY_FLAGS_SET(field, value) \
(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
@@ -152,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
@@ -165,12 +168,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush, uint32_t copy_flags);
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f);
int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct dma_resv *resv,
struct dma_fence **fence);
@@ -178,38 +175,25 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **fence,
- bool delayed);
+ bool delayed,
+ u64 k_job_id);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
- struct hmm_range **range);
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range);
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct amdgpu_hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct page **pages,
- struct hmm_range **range)
+ struct amdgpu_hmm_range *range)
{
return -EPERM;
}
-static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
-}
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- return false;
-}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cf700824b960..e96f24e9ad57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -30,6 +30,11 @@
#define AMDGPU_UCODE_NAME_MAX (128)
+static const struct kicker_device kicker_device_list[] = {
+ {0x744B, 0x00},
+ {0x7551, 0xC8}
+};
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -765,8 +770,10 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
+FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version);
FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -779,8 +786,9 @@ static struct attribute *fw_attrs[] = {
&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
- &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
- &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
+ &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr,
+ &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr,
+ &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr,
NULL
};
@@ -1152,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf)
+ adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf);
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -1216,6 +1227,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
case IP_VERSION(11, 0, 13):
return "beige_goby";
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
return "vangogh";
case IP_VERSION(12, 0, 1):
return "green_sardine";
@@ -1383,6 +1395,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return NULL;
}
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
+ if (adev->pdev->device == kicker_device_list[i].device &&
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
+ }
+
+ return false;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4eedd92f000b..6349aad6da35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -25,6 +25,8 @@
#include "amdgpu_socbb.h"
+#define RS64_FW_UC_START_ADDR_LO 0x3000
+
struct common_firmware_header {
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
uint32_t header_size_bytes; /* size of just the header in bytes */
@@ -600,6 +602,12 @@ struct amdgpu_firmware {
void *fw_buf_ptr;
uint64_t fw_buf_mc;
+ uint32_t pldm_version;
+};
+
+struct kicker_device{
+ unsigned short device;
+ u8 revision;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
@@ -629,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index eafe20d8fe0b..3f0b0e9af4f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -24,6 +24,7 @@
#include <linux/sort.h>
#include "amdgpu.h"
#include "umc_v6_7.h"
+#include "amdgpu_ras_mgr.h"
#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
#define MAX_UMC_HASH_STRING_SIZE 256
@@ -96,67 +97,96 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control = &con->eeprom_control;
unsigned int error_query_mode;
int ret = 0;
unsigned long err_count;
amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
mutex_lock(&con->page_retirement_lock);
- ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
- if (ret == -EOPNOTSUPP &&
- error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
- if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
- adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
- adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
- adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
- else
- err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
+ if (ret == -EOPNOTSUPP &&
+ error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev,
+ ras_error_status);
+
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len =
+ adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev,
+ ras_error_status);
+ }
+ } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
+ (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_count)
+ adev->umc.ras->ecc_info_query_ras_error_count(adev,
+ ras_error_status);
+
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len =
+ adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ecc_info_query_ras_error_address(adev,
+ ras_error_status);
+ }
}
- } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
- (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
- if (adev->umc.ras &&
- adev->umc.ras->ecc_info_query_ras_error_count)
- adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras &&
- adev->umc.ras->ecc_info_query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
- else
- err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
+ } else {
+ if (!amdgpu_ras_eeprom_update_record_num(control)) {
+ err_data->err_addr_cnt = err_data->de_count =
+ control->ras_num_recs - control->ras_num_recs_old;
+ amdgpu_ras_eeprom_read_idx(control, err_data->err_addr,
+ control->ras_num_recs_old, err_data->de_count);
}
}
@@ -166,7 +196,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt, false);
+ err_data->err_addr_cnt, amdgpu_ras_smu_eeprom_supported(adev));
amdgpu_ras_save_bad_pages(adev, &err_count);
amdgpu_dpm_send_hbm_bad_pages_num(adev,
@@ -244,6 +274,15 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
}
amdgpu_ras_error_data_fini(&err_data);
+ } else if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info = {0};
+
+ ih_info.block = block;
+ ih_info.pasid = pasid;
+ ih_info.reset = reset;
+ ih_info.pasid_fn = pasid_fn;
+ ih_info.data = data;
+ amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info);
} else {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int ret;
@@ -252,6 +291,7 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
block, pasid, pasid_fn, data, reset);
if (!ret) {
atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_consumption_count);
wake_up(&con->page_retirement_wq);
}
}
@@ -387,6 +427,45 @@ int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
return 0;
}
+static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func,
+ void *data)
+{
+ uint32_t umc_node_inst;
+ uint32_t node_inst;
+ uint32_t umc_inst;
+ uint32_t ch_inst;
+ int ret;
+
+ /*
+ * This loop is done based on the following -
+ * umc.active mask = mask of active umc instances across all nodes
+ * umc.umc_inst_num = maximum number of umc instancess per node
+ * umc.node_inst_num = maximum number of node instances
+ * Channel instances are not assumed to be harvested.
+ */
+ dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d",
+ adev->umc.active_mask, adev->umc.umc_inst_num);
+ for_each_set_bit(umc_node_inst, &(adev->umc.active_mask),
+ adev->umc.node_inst_num * adev->umc.umc_inst_num) {
+ node_inst = umc_node_inst / adev->umc.umc_inst_num;
+ umc_inst = umc_node_inst % adev->umc.umc_inst_num;
+ LOOP_UMC_CH_INST(ch_inst) {
+ dev_dbg(adev->dev,
+ "node_inst :%d umc_inst: %d ch_inst: %d",
+ node_inst, umc_inst, ch_inst);
+ ret = func(adev, node_inst, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev,
+ "Node %d umc %d ch %d func returns %d\n",
+ node_inst, umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data)
{
@@ -395,6 +474,9 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
uint32_t ch_inst = 0;
int ret = 0;
+ if (adev->aid_mask)
+ return amdgpu_umc_loop_all_aid(adev, func, data);
+
if (adev->umc.node_inst_num) {
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
ret = func(adev, node_inst, umc_inst, ch_inst, data);
@@ -487,6 +569,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
pfns[i] = err_data.err_addr[i].retired_page;
}
ret = i;
+ adev->umc.err_addr_cnt = err_data.err_addr_cnt;
out:
kfree(err_data.err_addr);
@@ -519,3 +602,26 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
return 0;
}
+
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps)
+{
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+ int ret;
+
+ /* nps: the pa belongs to */
+ addr_in.pa.pa = pa | ((uint64_t)nps << 58);
+ addr_in.addr_type = TA_RAS_PA_TO_MCA;
+ ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out);
+ if (ret) {
+ dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx",
+ pa);
+
+ return ret;
+ }
+
+ *mca = addr_out.ma.err_addr;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index a4a7e61817aa..28dff750c47e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -71,6 +71,25 @@
*/
#define UMC_CHANNEL_IDX_V2 BIT_ULL(47)
+/*
+ * save nps value to eeprom_table_record.retired_page[47:40],
+ * the channel index flag above will be retired.
+ */
+#define UMC_NPS_SHIFT 40
+#define UMC_NPS_MASK 0xffULL
+
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define RETIRE_FLIP_BITS_NUM 4
+
+struct amdgpu_umc_flip_bits {
+ uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -93,6 +112,9 @@ struct amdgpu_umc_ras {
bool dump_addr);
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
+ void (*get_retire_flip_bits)(struct amdgpu_device *adev);
+ void (*mca_ipid_parse)(struct amdgpu_device *adev, uint64_t ipid,
+ uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid);
};
struct amdgpu_umc_funcs {
@@ -123,6 +145,10 @@ struct amdgpu_umc {
/* active mask for umc node instance */
unsigned long active_mask;
+
+ struct amdgpu_umc_flip_bits flip_bits;
+
+ unsigned long err_addr_cnt;
};
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
@@ -165,4 +191,6 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
uint64_t err_addr, uint32_t ch, uint32_t umc,
uint32_t node, uint32_t socket,
struct ta_ras_query_address_output *addr_out, bool dump_addr);
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index dde15c6a96e1..cd707d70a0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -32,462 +32,7 @@
#include "amdgpu_umsch_mm.h"
#include "umsch_mm_v4_0.h"
-struct umsch_mm_test_ctx_data {
- uint8_t process_csa[PAGE_SIZE];
- uint8_t vpe_ctx_csa[PAGE_SIZE];
- uint8_t vcn_ctx_csa[PAGE_SIZE];
-};
-
-struct umsch_mm_test_mqd_data {
- uint8_t vpe_mqd[PAGE_SIZE];
- uint8_t vcn_mqd[PAGE_SIZE];
-};
-
-struct umsch_mm_test_ring_data {
- uint8_t vpe_ring[PAGE_SIZE];
- uint8_t vpe_ib[PAGE_SIZE];
- uint8_t vcn_ring[PAGE_SIZE];
- uint8_t vcn_ib[PAGE_SIZE];
-};
-
-struct umsch_mm_test_queue_info {
- uint64_t mqd_addr;
- uint64_t csa_addr;
- uint32_t doorbell_offset_0;
- uint32_t doorbell_offset_1;
- enum UMSCH_SWIP_ENGINE_TYPE engine;
-};
-
-struct umsch_mm_test {
- struct amdgpu_bo *ctx_data_obj;
- uint64_t ctx_data_gpu_addr;
- uint32_t *ctx_data_cpu_addr;
-
- struct amdgpu_bo *mqd_data_obj;
- uint64_t mqd_data_gpu_addr;
- uint32_t *mqd_data_cpu_addr;
-
- struct amdgpu_bo *ring_data_obj;
- uint64_t ring_data_gpu_addr;
- uint32_t *ring_data_cpu_addr;
-
-
- struct amdgpu_vm *vm;
- struct amdgpu_bo_va *bo_va;
- uint32_t pasid;
- uint32_t vm_cntx_cntl;
- uint32_t num_queues;
-};
-
-static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
- uint64_t addr, uint32_t size)
-{
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
- if (!*bo_va) {
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, *bo_va, addr, 0, size,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r)
- goto error_del_bo_va;
-
-
- r = amdgpu_vm_bo_update(adev, *bo_va, false);
- if (r)
- goto error_del_bo_va;
-
- amdgpu_sync_fence(&sync, (*bo_va)->last_pt_update);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r)
- goto error_del_bo_va;
-
- amdgpu_sync_fence(&sync, vm->last_update);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
-
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, *bo_va);
- amdgpu_sync_free(&sync);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
- uint64_t addr)
-{
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
-
- r = amdgpu_vm_bo_unmap(adev, bo_va, addr);
- if (r)
- goto out_unlock;
-
- amdgpu_vm_bo_del(adev, bo_va);
-
-out_unlock:
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static void setup_vpe_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr;
- uint64_t ring_gpu_addr = test->ring_data_gpu_addr;
-
- mqd->rb_base_lo = (ring_gpu_addr >> 8);
- mqd->rb_base_hi = (ring_gpu_addr >> 40);
- mqd->rb_size = PAGE_SIZE / 4;
- mqd->wptr_val = 0;
- mqd->rptr_val = 0;
- mqd->unmapped = 1;
-
- if (adev->vpe.collaborate_mode)
- memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
-
- qinfo->mqd_addr = test->mqd_data_gpu_addr;
- qinfo->csa_addr = test->ctx_data_gpu_addr +
- offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
- qinfo->doorbell_offset_0 = 0;
- qinfo->doorbell_offset_1 = 0;
-}
-
-static void setup_vcn_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
-}
-
-static int add_test_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct umsch_mm_add_queue_input queue_input = {};
- int r;
-
- queue_input.process_id = test->pasid;
- queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(test->vm->root.bo);
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
-
- queue_input.process_quantum = 100000; /* 10ms */
- queue_input.process_csa_addr = test->ctx_data_gpu_addr +
- offsetof(struct umsch_mm_test_ctx_data, process_csa);
-
- queue_input.context_quantum = 10000; /* 1ms */
- queue_input.context_csa_addr = qinfo->csa_addr;
-
- queue_input.inprocess_context_priority = CONTEXT_PRIORITY_LEVEL_NORMAL;
- queue_input.context_global_priority_level = CONTEXT_PRIORITY_LEVEL_NORMAL;
- queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0;
- queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1;
-
- queue_input.engine_type = qinfo->engine;
- queue_input.mqd_addr = qinfo->mqd_addr;
- queue_input.vm_context_cntl = test->vm_cntx_cntl;
-
- amdgpu_umsch_mm_lock(&adev->umsch_mm);
- r = adev->umsch_mm.funcs->add_queue(&adev->umsch_mm, &queue_input);
- amdgpu_umsch_mm_unlock(&adev->umsch_mm);
- if (r)
- return r;
-
- return 0;
-}
-
-static int remove_test_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct umsch_mm_remove_queue_input queue_input = {};
- int r;
-
- queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0;
- queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1;
- queue_input.context_csa_addr = qinfo->csa_addr;
-
- amdgpu_umsch_mm_lock(&adev->umsch_mm);
- r = adev->umsch_mm.funcs->remove_queue(&adev->umsch_mm, &queue_input);
- amdgpu_umsch_mm_unlock(&adev->umsch_mm);
- if (r)
- return r;
-
- return 0;
-}
-
-static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *test)
-{
- struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr;
- uint32_t *ring = test->ring_data_cpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ring) / 4;
- uint32_t *ib = test->ring_data_cpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ib) / 4;
- uint64_t ib_gpu_addr = test->ring_data_gpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ib);
- uint32_t *fence = ib + 2048 / 4;
- uint64_t fence_gpu_addr = ib_gpu_addr + 2048;
- const uint32_t test_pattern = 0xdeadbeef;
- int i;
-
- ib[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
- ib[1] = lower_32_bits(fence_gpu_addr);
- ib[2] = upper_32_bits(fence_gpu_addr);
- ib[3] = test_pattern;
-
- ring[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0);
- ring[1] = (ib_gpu_addr & 0xffffffe0);
- ring[2] = upper_32_bits(ib_gpu_addr);
- ring[3] = 4;
- ring[4] = 0;
- ring[5] = 0;
-
- mqd->wptr_val = (6 << 2);
- if (adev->vpe.collaborate_mode)
- (++mqd)->wptr_val = (6 << 2);
-
- WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (*fence == test_pattern)
- return 0;
- udelay(1);
- }
-
- dev_err(adev->dev, "vpe queue submission timeout\n");
-
- return -ETIMEDOUT;
-}
-
-static int submit_vcn_queue(struct amdgpu_device *adev, struct umsch_mm_test *test)
-{
- return 0;
-}
-
-static int setup_umsch_mm_test(struct amdgpu_device *adev,
- struct umsch_mm_test *test)
-{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
- int r;
-
- test->vm_cntx_cntl = hub->vm_cntx_cntl;
-
- test->vm = kzalloc(sizeof(*test->vm), GFP_KERNEL);
- if (!test->vm) {
- r = -ENOMEM;
- return r;
- }
-
- r = amdgpu_vm_init(adev, test->vm, -1);
- if (r)
- goto error_free_vm;
-
- r = amdgpu_pasid_alloc(16);
- if (r < 0)
- goto error_fini_vm;
- test->pasid = r;
-
- r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ctx_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->ctx_data_obj,
- &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
- if (r)
- goto error_free_pasid;
-
- memset(test->ctx_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ctx_data));
-
- r = amdgpu_bo_create_kernel(adev, PAGE_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->mqd_data_obj,
- &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
- if (r)
- goto error_free_ctx_data_obj;
-
- memset(test->mqd_data_cpu_addr, 0, PAGE_SIZE);
-
- r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ring_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->ring_data_obj,
- NULL,
- (void **)&test->ring_data_cpu_addr);
- if (r)
- goto error_free_mqd_data_obj;
-
- memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data));
-
- test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va,
- test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data));
- if (r)
- goto error_free_ring_data_obj;
-
- return 0;
-
-error_free_ring_data_obj:
- amdgpu_bo_free_kernel(&test->ring_data_obj, NULL,
- (void **)&test->ring_data_cpu_addr);
-error_free_mqd_data_obj:
- amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
-error_free_ctx_data_obj:
- amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
-error_free_pasid:
- amdgpu_pasid_free(test->pasid);
-error_fini_vm:
- amdgpu_vm_fini(adev, test->vm);
-error_free_vm:
- kfree(test->vm);
-
- return r;
-}
-
-static void cleanup_umsch_mm_test(struct amdgpu_device *adev,
- struct umsch_mm_test *test)
-{
- unmap_ring_data(adev, test->vm, test->ring_data_obj,
- test->bo_va, test->ring_data_gpu_addr);
- amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
- amdgpu_bo_free_kernel(&test->ring_data_obj, NULL,
- (void **)&test->ring_data_cpu_addr);
- amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
- amdgpu_pasid_free(test->pasid);
- amdgpu_vm_fini(adev, test->vm);
- kfree(test->vm);
-}
-
-static int setup_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i, r;
-
- for (i = 0; i < test->num_queues; i++) {
- if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE)
- setup_vpe_queue(adev, test, &qinfo[i]);
- else
- setup_vcn_queue(adev, test, &qinfo[i]);
-
- r = add_test_queue(adev, test, &qinfo[i]);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static int submit_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i, r;
-
- for (i = 0; i < test->num_queues; i++) {
- if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE)
- r = submit_vpe_queue(adev, test);
- else
- r = submit_vcn_queue(adev, test);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static void cleanup_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i;
-
- for (i = 0; i < test->num_queues; i++)
- remove_test_queue(adev, test, &qinfo[i]);
-}
-
-static int umsch_mm_test(struct amdgpu_device *adev)
-{
- struct umsch_mm_test_queue_info qinfo[] = {
- { .engine = UMSCH_SWIP_ENGINE_TYPE_VPE },
- };
- struct umsch_mm_test test = { .num_queues = ARRAY_SIZE(qinfo) };
- int r;
-
- r = setup_umsch_mm_test(adev, &test);
- if (r)
- return r;
-
- r = setup_test_queues(adev, &test, qinfo);
- if (r)
- goto cleanup;
-
- r = submit_test_queues(adev, &test, qinfo);
- if (r)
- goto cleanup;
-
- cleanup_test_queues(adev, &test, qinfo);
- cleanup_umsch_mm_test(adev, &test);
-
- return 0;
-
-cleanup:
- cleanup_test_queues(adev, &test, qinfo);
- cleanup_umsch_mm_test(adev, &test);
- return r;
-}
+MODULE_FIRMWARE("amdgpu/umsch_mm_4_0_0.bin");
int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws)
{
@@ -581,14 +126,14 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
case IP_VERSION(4, 0, 6):
- fw_name = "amdgpu/umsch_mm_4_0_0.bin";
+ fw_name = "4_0_0";
break;
default:
- break;
+ return -EINVAL;
}
r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, AMDGPU_UCODE_REQUIRED,
- "%s", fw_name);
+ "amdgpu/umsch_mm_%s.bin", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
@@ -792,7 +337,7 @@ static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
return 0;
- return umsch_mm_test(adev);
+ return 0;
}
static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
new file mode 100644
index 000000000000..9a969175900e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -0,0 +1,1482 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_auth.h>
+#include <drm/drm_exec.h>
+#include <linux/pm_runtime.h>
+#include <drm/drm_drv.h>
+
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_hmm.h"
+#include "amdgpu_userq_fence.h"
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
+{
+ int i;
+ u32 userq_ip_mask = 0;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ userq_ip_mask |= (1 << i);
+ }
+
+ return userq_ip_mask;
+}
+
+static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
+ enum amdgpu_ring_type ring_type, int reset_type)
+{
+
+ if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
+ return false;
+
+ switch (ring_type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
+{
+ if (amdgpu_device_should_recover_gpu(adev)) {
+ amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->userq_reset_work);
+ /* Wait for the reset job to complete */
+ flush_work(&adev->userq_reset_work);
+ }
+}
+
+static int
+amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const int queue_types[] = {
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_SDMA
+ };
+ const int num_queue_types = ARRAY_SIZE(queue_types);
+ bool gpu_reset = false;
+ int r = 0;
+ int i;
+
+ /* Warning if current process mutex is not held */
+ WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
+
+ if (unlikely(adev->debug_disable_gpu_ring_reset)) {
+ dev_err(adev->dev, "userq reset disabled by debug mask\n");
+ return 0;
+ }
+
+ /*
+ * If GPU recovery feature is disabled system-wide,
+ * skip all reset detection logic
+ */
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
+ /*
+ * Iterate through all queue types to detect and reset problematic queues
+ * Process each queue type in the defined order
+ */
+ for (i = 0; i < num_queue_types; i++) {
+ int ring_type = queue_types[i];
+ const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
+
+ if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
+ continue;
+
+ if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
+ funcs && funcs->detect_and_reset) {
+ r = funcs->detect_and_reset(adev, ring_type);
+ if (r) {
+ gpu_reset = true;
+ break;
+ }
+ }
+ }
+
+ if (gpu_reset)
+ amdgpu_userq_gpu_reset(adev);
+
+ return r;
+}
+
+static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
+ struct amdgpu_bo_va_mapping *va_map, u64 addr)
+{
+ struct amdgpu_userq_va_cursor *va_cursor;
+ struct userq_va_list;
+
+ va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
+ if (!va_cursor)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&va_cursor->list);
+ va_cursor->gpu_addr = addr;
+ atomic_set(&va_map->bo_va->userq_va_mapped, 1);
+ list_add(&va_cursor->list, &queue->userq_va_list);
+
+ return 0;
+}
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *va_map;
+ struct amdgpu_vm *vm = queue->vm;
+ u64 user_addr;
+ u64 size;
+ int r = 0;
+
+ user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!va_map) {
+ r = -EINVAL;
+ goto out_err;
+ }
+ /* Only validate the userq whether resident in the VM mapping range */
+ if (user_addr >= va_map->start &&
+ va_map->last - user_addr + 1 >= size) {
+ amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+ }
+
+ r = -EINVAL;
+out_err:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return r;
+}
+
+static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ bool r;
+
+ if (amdgpu_bo_reserve(vm->root.bo, false))
+ return false;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
+ r = true;
+ else
+ r = false;
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ return r;
+}
+
+static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ int r = 0;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
+ dev_dbg(queue->userq_mgr->adev->dev,
+ "validate the userq mapping:%p va:%llx r:%d\n",
+ queue, va_cursor->gpu_addr, r);
+ }
+
+ if (r != 0)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_userq_va_cursor *va_cursor)
+{
+ atomic_set(&mapping->bo_va->userq_va_mapped, 0);
+ list_del(&va_cursor->list);
+ kfree(va_cursor);
+}
+
+static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ struct amdgpu_bo_va_mapping *mapping;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
+ if (!mapping) {
+ r = -EINVAL;
+ goto err;
+ }
+ dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
+ queue, va_cursor->gpu_addr);
+ amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
+ }
+err:
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ return r;
+}
+
+static int
+amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ bool found_hung_queue = false;
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->preempt(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
+ }
+ }
+
+ if (found_hung_queue)
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
+ r = userq_funcs->restore(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ bool found_hung_queue = false;
+ int r = 0;
+
+ if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
+ (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
+ r = userq_funcs->unmap(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
+ }
+
+ if (found_hung_queue)
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+
+ return r;
+}
+
+static int
+amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+ r = userq_funcs->map(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct dma_fence *f = queue->last_fence;
+ int ret = 0;
+
+ if (f && !dma_fence_is_signaled(f)) {
+ ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0) {
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ return -ETIME;
+ }
+ }
+
+ return ret;
+}
+
+static void
+amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ int queue_id)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+
+ /* Drop the userq reference. */
+ amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ amdgpu_userq_fence_driver_free(queue);
+ /* Use interrupt-safe locking since IRQ handlers may access these XArrays */
+ xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
+ xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
+ queue->userq_mgr = NULL;
+ list_del(&queue->userq_va_list);
+ kfree(queue);
+
+ up_read(&adev->reset_domain->sem);
+}
+
+static struct amdgpu_usermode_queue *
+amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
+{
+ return xa_load(&uq_mgr->userq_mgr_xa, qid);
+}
+
+void
+amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+ /* Flush any pending resume work to create ev_fence */
+ flush_delayed_work(&uq_mgr->resume_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ mutex_unlock(&uq_mgr->userq_mutex);
+ /*
+ * Looks like there was no pending resume work,
+ * add one now to create a valid eviction fence
+ */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+ goto retry;
+ }
+}
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ bp.type = ttm_bo_type_kernel;
+ bp.size = size;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(userq_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
+ goto free_obj;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
+ goto unresv;
+ }
+
+ r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
+ goto unresv;
+ }
+
+ userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
+ amdgpu_bo_unreserve(userq_obj->obj);
+ memset(userq_obj->cpu_ptr, 0, size);
+ return 0;
+
+unresv:
+ amdgpu_bo_unreserve(userq_obj->obj);
+
+free_obj:
+ amdgpu_bo_unref(&userq_obj->obj);
+ return r;
+}
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj)
+{
+ amdgpu_bo_kunmap(userq_obj->obj);
+ amdgpu_bo_unref(&userq_obj->obj);
+}
+
+uint64_t
+amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp)
+{
+ uint64_t index;
+ struct drm_gem_object *gobj;
+ struct amdgpu_userq_obj *db_obj = db_info->db_obj;
+ int r, db_size;
+
+ gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
+ if (gobj == NULL) {
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ drm_gem_object_put(gobj);
+
+ r = amdgpu_bo_reserve(db_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unref_bo;
+ }
+
+ /* Pin the BO before generating the index, unpin in queue destroy */
+ r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unresv_bo;
+ }
+
+ switch (db_info->queue_type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_HW_IP_COMPUTE:
+ case AMDGPU_HW_IP_DMA:
+ db_size = sizeof(u64);
+ break;
+ default:
+ drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+ db_info->queue_type);
+ r = -EINVAL;
+ goto unpin_bo;
+ }
+
+ index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
+ db_info->doorbell_offset, db_size);
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev),
+ "[Usermode queues] doorbell index=%lld\n", index);
+ amdgpu_bo_unreserve(db_obj->obj);
+ return index;
+
+unpin_bo:
+ amdgpu_bo_unpin(db_obj->obj);
+unresv_bo:
+ amdgpu_bo_unreserve(db_obj->obj);
+unref_bo:
+ amdgpu_bo_unref(&db_obj->obj);
+ return r;
+}
+
+static int
+amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ int r = 0;
+
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ queue = amdgpu_userq_find(uq_mgr, queue_id);
+ if (!queue) {
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return -EINVAL;
+ }
+ amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
+ r = amdgpu_bo_reserve(queue->db_obj.obj, true);
+ if (!r) {
+ amdgpu_bo_unpin(queue->db_obj.obj);
+ amdgpu_bo_unreserve(queue->db_obj.obj);
+ }
+ amdgpu_bo_unref(&queue->db_obj.obj);
+ atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove_recursive(queue->debugfs_queue);
+#endif
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ /*TODO: It requires a reset for userq hw unmap error*/
+ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
+ drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ }
+ amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
+ mutex_unlock(&uq_mgr->userq_mutex);
+
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static int amdgpu_userq_priority_permit(struct drm_file *filp,
+ int priority)
+{
+ if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static int
+amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_db_info db_info;
+ char *queue_name;
+ bool skip_map_queue;
+ u32 qid;
+ uint64_t index;
+ int r = 0;
+ int priority =
+ (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
+
+ r = amdgpu_userq_priority_permit(filp, priority);
+ if (r)
+ return r;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /*
+ * There could be a situation that we are creating a new queue while
+ * the other queues under this UQ_mgr are suspended. So if there is any
+ * resume work pending, wait for it to get done.
+ *
+ * This will also make sure we have a valid eviction fence ready to be used.
+ */
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ uq_funcs = adev->userq_funcs[args->in.ip_type];
+ if (!uq_funcs) {
+ drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
+ if (!queue) {
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ INIT_LIST_HEAD(&queue->userq_va_list);
+ queue->doorbell_handle = args->in.doorbell_handle;
+ queue->queue_type = args->in.ip_type;
+ queue->vm = &fpriv->vm;
+ queue->priority = priority;
+
+ db_info.queue_type = queue->queue_type;
+ db_info.doorbell_handle = queue->doorbell_handle;
+ db_info.db_obj = &queue->db_obj;
+ db_info.doorbell_offset = args->in.doorbell_offset;
+
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
+
+ /* Convert relative doorbell offset into absolute doorbell index */
+ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
+ if (index == (uint64_t)-EINVAL) {
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
+ kfree(queue);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue->doorbell_index = index;
+ xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
+ r = amdgpu_userq_fence_driver_alloc(adev, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
+ goto unlock;
+ }
+
+ r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
+ amdgpu_userq_fence_driver_free(queue);
+ kfree(queue);
+ goto unlock;
+ }
+
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+ r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
+ if (r) {
+ kfree(queue);
+ up_read(&adev->reset_domain->sem);
+ goto unlock;
+ }
+
+ r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ r = -ENOMEM;
+ up_read(&adev->reset_domain->sem);
+ goto unlock;
+ }
+ up_read(&adev->reset_domain->sem);
+ queue->userq_mgr = uq_mgr;
+
+ /* don't map the queue if scheduling is halted */
+ if (adev->userq_halt_for_enforce_isolation &&
+ ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
+ skip_map_queue = true;
+ else
+ skip_map_queue = false;
+ if (!skip_map_queue) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
+ xa_erase(&uq_mgr->userq_mgr_xa, qid);
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ goto unlock;
+ }
+ }
+
+ queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
+ if (!queue_name) {
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ /* Queue dentry per client to hold MQD information */
+ queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+#endif
+ kfree(queue_name);
+
+ args->out.queue_id = qid;
+ atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+
+ return r;
+}
+
+static int amdgpu_userq_input_args_validate(struct drm_device *dev,
+ union drm_amdgpu_userq *args,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
+ return -EINVAL;
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+
+ if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
+ args->in.queue_va == 0 ||
+ args->in.queue_size == 0) {
+ drm_file_err(filp, "invalidate userq queue va or size\n");
+ return -EINVAL;
+ }
+ if (!args->in.wptr_va || !args->in.rptr_va) {
+ drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
+ return -EINVAL;
+ }
+ break;
+ case AMDGPU_USERQ_OP_FREE:
+ if (args->in.ip_type ||
+ args->in.doorbell_handle ||
+ args->in.doorbell_offset ||
+ args->in.flags ||
+ args->in.queue_va ||
+ args->in.queue_size ||
+ args->in.rptr_va ||
+ args->in.wptr_va ||
+ args->in.mqd ||
+ args->in.mqd_size)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
+ r = amdgpu_userq_destroy(filp, args->in.queue_id);
+ if (r)
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
+ break;
+
+ default:
+ drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ unsigned long queue_id;
+ int ret = 0, r;
+
+ /* Resume all the queues for this process */
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+
+ if (!amdgpu_userq_buffer_vas_mapped(queue)) {
+ drm_file_err(uq_mgr->file,
+ "trying restore queue without va mapping\n");
+ queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
+ continue;
+ }
+
+ r = amdgpu_userq_restore_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
+ return ret;
+}
+
+static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+/* Handle all BOs on the invalidated list, validate them and update the PTs */
+static int
+amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
+ struct amdgpu_vm *vm)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
+ if (unlikely(ret))
+ return ret;
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ return ret;
+
+ /* This moves the bo_va to the done list */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
+}
+
+/* Make sure the whole VM is ready to be used */
+static int
+amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ bool invalidated = false, new_addition = false;
+ struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_hmm_range *range;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ unsigned long key, tmp_key;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ struct drm_exec exec;
+ struct xarray xa;
+ int ret;
+
+ xa_init(&xa);
+
+retry_lock:
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ /* This validates PDs, PTs and per VM BOs */
+ ret = amdgpu_vm_validate(adev, vm, NULL,
+ amdgpu_userq_validate_vm,
+ NULL);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ /* This locks and validates the remaining evicted BOs */
+ ret = amdgpu_userq_bo_validate(adev, &exec, vm);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
+
+ if (invalidated) {
+ xa_for_each(&xa, tmp_key, range) {
+ bo = range->bo;
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
+ }
+ invalidated = false;
+ }
+
+ ret = amdgpu_vm_handle_moved(adev, vm, NULL);
+ if (ret)
+ goto unlock_all;
+
+ key = 0;
+ /* Validate User Ptr BOs */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
+ bo = bo_va->base.bo;
+ if (!bo)
+ continue;
+
+ if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
+ continue;
+
+ range = xa_load(&xa, key);
+ if (range && range->bo != bo) {
+ xa_erase(&xa, key);
+ amdgpu_hmm_range_free(range);
+ range = NULL;
+ }
+
+ if (!range) {
+ range = amdgpu_hmm_range_alloc(bo);
+ if (!range) {
+ ret = -ENOMEM;
+ goto unlock_all;
+ }
+
+ xa_store(&xa, key, range, GFP_KERNEL);
+ new_addition = true;
+ }
+ key++;
+ }
+
+ if (new_addition) {
+ drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (ret)
+ goto unlock_all;
+ }
+
+ invalidated = true;
+ new_addition = false;
+ goto retry_lock;
+ }
+
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
+ if (ret)
+ goto unlock_all;
+
+ /*
+ * We need to wait for all VM updates to finish before restarting the
+ * queues. Using the done list like that is now ok since everything is
+ * locked in place.
+ */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status)
+ dma_fence_wait(bo_va->last_pt_update, false);
+ dma_fence_wait(vm->last_update, false);
+
+ ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
+
+unlock_all:
+ drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ amdgpu_hmm_range_free(range);
+ }
+ xa_destroy(&xa);
+ return ret;
+}
+
+static void amdgpu_userq_restore_worker(struct work_struct *work)
+{
+ struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ int ret;
+
+ flush_delayed_work(&fpriv->evf_mgr.suspend_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ ret = amdgpu_userq_vm_validate(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
+ goto unlock;
+ }
+
+ ret = amdgpu_userq_restore_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static int
+amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ unsigned long queue_id;
+ int ret = 0, r;
+
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+ /* Try to unmap all the queues in this process ctx */
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+ r = amdgpu_userq_preempt_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
+ return ret;
+}
+
+void amdgpu_userq_reset_work(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ userq_reset_work);
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_USERQ;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+}
+
+static int
+amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ unsigned long queue_id;
+ int ret;
+
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+ struct dma_fence *f = queue->last_fence;
+
+ if (!f || dma_fence_is_signaled(f))
+ continue;
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0) {
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ int ret;
+
+ /* Wait for any pending userqueue fence work to finish */
+ ret = amdgpu_userq_wait_for_signal(uq_mgr);
+ if (ret)
+ dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n");
+
+ ret = amdgpu_userq_evict_all(uq_mgr);
+ if (ret)
+ dev_err(adev->dev, "Failed to evict userqueue\n");
+
+ /* Signal current eviction fence */
+ amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
+
+ if (evf_mgr->fd_closing) {
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ return;
+ }
+
+ /* Schedule a resume work */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+}
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev)
+{
+ mutex_init(&userq_mgr->userq_mutex);
+ xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
+ userq_mgr->adev = adev;
+ userq_mgr->file = file_priv;
+
+ INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
+ return 0;
+}
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ unsigned long queue_id;
+
+ cancel_delayed_work_sync(&userq_mgr->resume_work);
+
+ mutex_lock(&userq_mgr->userq_mutex);
+ amdgpu_userq_detect_and_reset_queues(userq_mgr);
+ xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
+ amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
+ amdgpu_userq_unmap_helper(userq_mgr, queue);
+ amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
+ }
+
+ xa_destroy(&userq_mgr->userq_mgr_xa);
+ mutex_unlock(&userq_mgr->userq_mutex);
+ mutex_destroy(&userq_mgr->userq_mutex);
+}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
+
+ if (!ip_mask)
+ return 0;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ cancel_delayed_work_sync(&uqm->resume_work);
+ guard(mutex)(&uqm->userq_mutex);
+ amdgpu_userq_detect_and_reset_queues(uqm);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
+
+ if (!ip_mask)
+ return 0;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ guard(mutex)(&uqm->userq_mutex);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ if (adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already stopped!\n");
+ adev->userq_halt_for_enforce_isolation = true;
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ amdgpu_userq_detect_and_reset_queues(uqm);
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+
+ return ret;
+}
+
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ if (!adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already started!\n");
+ adev->userq_halt_for_enforce_isolation = false;
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ mutex_lock(&uqm->userq_mutex);
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+
+ return ret;
+}
+
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_bo_va *bo_va = mapping->bo_va;
+ struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
+ int ret = 0;
+
+ if (!ip_mask)
+ return 0;
+
+ dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
+ /**
+ * The userq VA mapping reservation should include the eviction fence,
+ * if the eviction fence can't signal successfully during unmapping,
+ * then driver will warn to flag this improper unmap of the userq VA.
+ * Note: The eviction fence may be attached to different BOs, and this
+ * unmap is only for one kind of userq VAs, so at this point suppose
+ * the eviction fence is always unsignaled.
+ */
+ if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
+ ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
+{
+ const struct amdgpu_userq_funcs *userq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ cancel_delayed_work_sync(&uqm->resume_work);
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ amdgpu_userq_wait_for_last_fence(uqm, queue);
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ userq_funcs->unmap(uqm, queue);
+ /* just mark all queues as hung at this point.
+ * if unmap succeeds, we could map again
+ * in amdgpu_userq_post_reset() if vram is not lost
+ */
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_fence_driver_force_completion(queue);
+ }
+ }
+}
+
+int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
+{
+ /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
+ * at this point, we should be able to map it again
+ * and continue if vram is not lost.
+ */
+ struct amdgpu_userq_mgr *uqm;
+ struct amdgpu_usermode_queue *queue;
+ const struct amdgpu_userq_funcs *userq_funcs;
+ unsigned long queue_id;
+ int r = 0;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Re-map queue */
+ r = userq_funcs->map(uqm, queue);
+ if (r) {
+ dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
+ continue;
+ }
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
new file mode 100644
index 000000000000..c37444427a14
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_USERQ_H_
+#define AMDGPU_USERQ_H_
+#include "amdgpu_eviction_fence.h"
+
+#define AMDGPU_MAX_USERQ_COUNT 512
+
+#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
+#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+
+enum amdgpu_userq_state {
+ AMDGPU_USERQ_STATE_UNMAPPED = 0,
+ AMDGPU_USERQ_STATE_MAPPED,
+ AMDGPU_USERQ_STATE_PREEMPTED,
+ AMDGPU_USERQ_STATE_HUNG,
+ AMDGPU_USERQ_STATE_INVALID_VA,
+};
+
+struct amdgpu_mqd_prop;
+
+struct amdgpu_userq_obj {
+ void *cpu_ptr;
+ uint64_t gpu_addr;
+ struct amdgpu_bo *obj;
+};
+
+struct amdgpu_userq_va_cursor {
+ u64 gpu_addr;
+ struct list_head list;
+};
+
+struct amdgpu_usermode_queue {
+ int queue_type;
+ enum amdgpu_userq_state state;
+ uint64_t doorbell_handle;
+ uint64_t doorbell_index;
+ uint64_t flags;
+ struct amdgpu_mqd_prop *userq_prop;
+ struct amdgpu_userq_mgr *userq_mgr;
+ struct amdgpu_vm *vm;
+ struct amdgpu_userq_obj mqd;
+ struct amdgpu_userq_obj db_obj;
+ struct amdgpu_userq_obj fw_obj;
+ struct amdgpu_userq_obj wptr_obj;
+ struct xarray fence_drv_xa;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *last_fence;
+ u32 xcp_id;
+ int priority;
+ struct dentry *debugfs_queue;
+
+ struct list_head userq_va_list;
+};
+
+struct amdgpu_userq_funcs {
+ int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args,
+ struct amdgpu_usermode_queue *queue);
+ void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *uq);
+ int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*map)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*preempt)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*restore)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*detect_and_reset)(struct amdgpu_device *adev,
+ int queue_type);
+};
+
+/* Usermode queues for gfx */
+struct amdgpu_userq_mgr {
+ /**
+ * @userq_mgr_xa: Per-process user queue map (queue ID → queue)
+ * Key: queue_id (unique ID within the process's userq manager)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_mgr_xa;
+ struct mutex userq_mutex;
+ struct amdgpu_device *adev;
+ struct delayed_work resume_work;
+ struct drm_file *file;
+ atomic_t userq_count[AMDGPU_RING_TYPE_MAX];
+};
+
+struct amdgpu_db_info {
+ uint64_t doorbell_handle;
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ struct amdgpu_userq_obj *db_obj;
+};
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev);
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size);
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj);
+
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp);
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev);
+int amdgpu_userq_resume(struct amdgpu_device *adev);
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+void amdgpu_userq_reset_work(struct work_struct *work);
+void amdgpu_userq_pre_reset(struct amdgpu_device *adev);
+int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost);
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size);
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
new file mode 100644
index 000000000000..eba9fb359047
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -0,0 +1,1011 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_syncobj.h>
+
+#include "amdgpu.h"
+#include "amdgpu_userq_fence.h"
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops;
+static struct kmem_cache *amdgpu_userq_fence_slab;
+
+int amdgpu_userq_fence_slab_init(void)
+{
+ amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
+ sizeof(struct amdgpu_userq_fence),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!amdgpu_userq_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amdgpu_userq_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(amdgpu_userq_fence_slab);
+}
+
+static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
+{
+ if (!f || f->ops != &amdgpu_userq_fence_ops)
+ return NULL;
+
+ return container_of(f, struct amdgpu_userq_fence, base);
+}
+
+static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ return le64_to_cpu(*fence_drv->cpu_addr);
+}
+
+static void
+amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
+ u64 seq)
+{
+ if (fence_drv->cpu_addr)
+ *fence_drv->cpu_addr = cpu_to_le64(seq);
+}
+
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long flags;
+ int r;
+
+ fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
+ if (!fence_drv)
+ return -ENOMEM;
+
+ /* Acquire seq64 memory */
+ r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
+ &fence_drv->cpu_addr);
+ if (r)
+ goto free_fence_drv;
+
+ memset(fence_drv->cpu_addr, 0, sizeof(u64));
+
+ kref_init(&fence_drv->refcount);
+ INIT_LIST_HEAD(&fence_drv->fences);
+ spin_lock_init(&fence_drv->fence_list_lock);
+
+ fence_drv->adev = adev;
+ fence_drv->context = dma_fence_context_alloc(1);
+ get_task_comm(fence_drv->timeline_name, current);
+
+ xa_lock_irqsave(&adev->userq_xa, flags);
+ r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
+ fence_drv, GFP_KERNEL));
+ xa_unlock_irqrestore(&adev->userq_xa, flags);
+ if (r)
+ goto free_seq64;
+
+ userq->fence_drv = fence_drv;
+
+ return 0;
+
+free_seq64:
+ amdgpu_seq64_free(adev, fence_drv->va);
+free_fence_drv:
+ kfree(fence_drv);
+
+ return r;
+}
+
+static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long index;
+
+ if (xa_empty(xa))
+ return;
+
+ xa_lock(xa);
+ xa_for_each(xa, index, fence_drv) {
+ __xa_erase(xa, index);
+ amdgpu_userq_fence_driver_put(fence_drv);
+ }
+
+ xa_unlock(xa);
+}
+
+void
+amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
+{
+ amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
+ xa_destroy(&userq->fence_drv_xa);
+ /* Drop the fence_drv reference held by user queue */
+ amdgpu_userq_fence_driver_put(userq->fence_drv);
+}
+
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ struct amdgpu_userq_fence *userq_fence, *tmp;
+ struct dma_fence *fence;
+ unsigned long flags;
+ u64 rptr;
+ int i;
+
+ if (!fence_drv)
+ return;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ rptr = amdgpu_userq_fence_read(fence_drv);
+
+ list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
+ fence = &userq_fence->base;
+
+ if (rptr < fence->seqno)
+ break;
+
+ dma_fence_signal(fence);
+
+ for (i = 0; i < userq_fence->fence_drv_array_count; i++)
+ amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+
+ list_del(&userq_fence->link);
+ dma_fence_put(fence);
+ }
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void amdgpu_userq_fence_driver_destroy(struct kref *ref)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
+ struct amdgpu_userq_fence_driver,
+ refcount);
+ struct amdgpu_userq_fence_driver *xa_fence_drv;
+ struct amdgpu_device *adev = fence_drv->adev;
+ struct amdgpu_userq_fence *fence, *tmp;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long index, flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
+ f = &fence->base;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_set_error(f, -ECANCELED);
+ dma_fence_signal(f);
+ }
+
+ list_del(&fence->link);
+ dma_fence_put(f);
+ }
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ xa_lock_irqsave(xa, flags);
+ xa_for_each(xa, index, xa_fence_drv)
+ if (xa_fence_drv == fence_drv)
+ __xa_erase(xa, index);
+ xa_unlock_irqrestore(xa, flags);
+
+ /* Free seq64 memory */
+ amdgpu_seq64_free(adev, fence_drv->va);
+ kfree(fence_drv);
+}
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_get(&fence_drv->refcount);
+}
+
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
+}
+
+static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
+{
+ *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+ return *userq_fence ? 0 : -ENOMEM;
+}
+
+static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
+ struct amdgpu_userq_fence *userq_fence,
+ u64 seq, struct dma_fence **f)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *fence;
+ unsigned long flags;
+
+ fence_drv = userq->fence_drv;
+ if (!fence_drv)
+ return -EINVAL;
+
+ spin_lock_init(&userq_fence->lock);
+ INIT_LIST_HEAD(&userq_fence->link);
+ fence = &userq_fence->base;
+ userq_fence->fence_drv = fence_drv;
+
+ dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+ dma_fence_get(fence);
+
+ if (!xa_empty(&userq->fence_drv_xa)) {
+ struct amdgpu_userq_fence_driver *stored_fence_drv;
+ unsigned long index, count = 0;
+ int i = 0;
+
+ xa_lock(&userq->fence_drv_xa);
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
+ count++;
+
+ userq_fence->fence_drv_array =
+ kvmalloc_array(count,
+ sizeof(struct amdgpu_userq_fence_driver *),
+ GFP_ATOMIC);
+
+ if (userq_fence->fence_drv_array) {
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
+ userq_fence->fence_drv_array[i] = stored_fence_drv;
+ __xa_erase(&userq->fence_drv_xa, index);
+ i++;
+ }
+ }
+
+ userq_fence->fence_drv_array_count = i;
+ xa_unlock(&userq->fence_drv_xa);
+ } else {
+ userq_fence->fence_drv_array = NULL;
+ userq_fence->fence_drv_array_count = 0;
+ }
+
+ /* Check if hardware has already processed the job */
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ if (!dma_fence_is_signaled(fence))
+ list_add_tail(&userq_fence->link, &fence_drv->fences);
+ else
+ dma_fence_put(fence);
+
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ *f = fence;
+
+ return 0;
+}
+
+static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_userq_fence";
+}
+
+static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+
+ return fence->fence_drv->timeline_name;
+}
+
+static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 rptr, wptr;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+ wptr = fence->base.seqno;
+
+ if (rptr >= wptr)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
+ struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
+ struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
+
+ /* Release the fence driver reference */
+ amdgpu_userq_fence_driver_put(fence_drv);
+
+ kvfree(userq_fence->fence_drv_array);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+}
+
+static void amdgpu_userq_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_userq_fence_free);
+}
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops = {
+ .get_driver_name = amdgpu_userq_fence_get_driver_name,
+ .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
+ .signaled = amdgpu_userq_fence_signaled,
+ .release = amdgpu_userq_fence_release,
+};
+
+/**
+ * amdgpu_userq_fence_read_wptr - Read the userq wptr value
+ *
+ * @queue: user mode queue structure pointer
+ * @wptr: write pointer value
+ *
+ * Read the wptr value from userq's MQD. The userq signal IOCTL
+ * creates a dma_fence for the shared buffers that expects the
+ * RPTR value written to seq64 memory >= WPTR.
+ *
+ * Returns wptr value on success, error on failure.
+ */
+static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
+ u64 *wptr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ u64 addr, *ptr;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ addr = queue->userq_prop->wptr_gpu_addr;
+ addr &= AMDGPU_GMC_HOLE_MASK;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
+ if (!mapping) {
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
+ return -EINVAL;
+ }
+
+ bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ DRM_ERROR("Failed to reserve userqueue wptr bo");
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, (void **)&ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the userqueue wptr bo");
+ goto map_error;
+ }
+
+ *wptr = le64_to_cpu(*ptr);
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+
+map_error:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return r;
+}
+
+static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
+{
+ dma_fence_put(fence);
+}
+
+static void
+amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
+ int error)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ unsigned long flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+
+ f = rcu_dereference_protected(&fence->base,
+ lockdep_is_held(&fence_drv->fence_list_lock));
+ if (f && !dma_fence_is_signaled_locked(f))
+ dma_fence_set_error(f, error);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void
+amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
+{
+ struct dma_fence *f = userq->last_fence;
+
+ if (f) {
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 wptr = fence->base.seqno;
+
+ amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
+ amdgpu_userq_fence_write(fence_drv, wptr);
+ amdgpu_userq_fence_driver_process(fence_drv);
+
+ }
+}
+
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct drm_amdgpu_userq_signal *args = data;
+ struct drm_gem_object **gobj_write = NULL;
+ struct drm_gem_object **gobj_read = NULL;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_fence *userq_fence;
+ struct drm_syncobj **syncobj = NULL;
+ u32 *bo_handles_write, num_write_bo_handles;
+ u32 *syncobj_handles, num_syncobj_handles;
+ u32 *bo_handles_read, num_read_bo_handles;
+ int r, i, entry, rentry, wentry;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ u64 wptr;
+
+ num_syncobj_handles = args->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ /* Array of pointers to the looked up syncobjs */
+ syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
+ if (!syncobj) {
+ r = -ENOMEM;
+ goto free_syncobj_handles;
+ }
+
+ for (entry = 0; entry < num_syncobj_handles; entry++) {
+ syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
+ if (!syncobj[entry]) {
+ r = -ENOENT;
+ goto free_syncobj;
+ }
+ }
+
+ num_read_bo_handles = args->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read)) {
+ r = PTR_ERR(bo_handles_read);
+ goto free_syncobj;
+ }
+
+ /* Array of pointers to the GEM read objects */
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_bo_handles_read;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ num_write_bo_handles = args->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto put_gobj_read;
+ }
+
+ /* Array of pointers to the GEM write objects */
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto free_bo_handles_write;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ /* Retrieve the user queue */
+ queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id);
+ if (!queue) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+
+ r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+ if (r)
+ goto put_gobj_write;
+
+ r = amdgpu_userq_fence_alloc(&userq_fence);
+ if (r)
+ goto put_gobj_write;
+
+ /* We are here means UQ is active, make sure the eviction fence is valid */
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ /* Create a new fence */
+ r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
+ if (r) {
+ mutex_unlock(&userq_mgr->userq_mutex);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ goto put_gobj_write;
+ }
+
+ dma_fence_put(queue->last_fence);
+ queue->last_fence = dma_fence_get(fence);
+ mutex_unlock(&userq_mgr->userq_mutex);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+ }
+
+ for (i = 0; i < num_read_bo_handles; i++) {
+ if (!gobj_read || !gobj_read[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_read[i]->resv, fence,
+ DMA_RESV_USAGE_READ);
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ if (!gobj_write || !gobj_write[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_write[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ /* Add the created fence to syncobj/BO's */
+ for (i = 0; i < num_syncobj_handles; i++)
+ drm_syncobj_replace_fence(syncobj[i], fence);
+
+ /* drop the reference acquired in fence creation function */
+ dma_fence_put(fence);
+
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+free_syncobj:
+ while (entry-- > 0)
+ if (syncobj[entry])
+ drm_syncobj_put(syncobj[entry]);
+ kfree(syncobj);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+
+ return r;
+}
+
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
+ u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
+ struct drm_amdgpu_userq_fence_info *fence_info = NULL;
+ struct drm_amdgpu_userq_wait *wait_info = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_usermode_queue *waitq;
+ struct drm_gem_object **gobj_write;
+ struct drm_gem_object **gobj_read;
+ struct dma_fence **fences = NULL;
+ u16 num_points, num_fences = 0;
+ int r, i, rentry, wentry, cnt;
+ struct drm_exec exec;
+
+ num_read_bo_handles = wait_info->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
+ size_mul(sizeof(u32), num_read_bo_handles));
+ if (IS_ERR(bo_handles_read))
+ return PTR_ERR(bo_handles_read);
+
+ num_write_bo_handles = wait_info->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
+ size_mul(sizeof(u32), num_write_bo_handles));
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto free_bo_handles_read;
+ }
+
+ num_syncobj = wait_info->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj));
+ if (IS_ERR(syncobj_handles)) {
+ r = PTR_ERR(syncobj_handles);
+ goto free_bo_handles_write;
+ }
+
+ num_points = wait_info->num_syncobj_timeline_handles;
+ timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_handles)) {
+ r = PTR_ERR(timeline_handles);
+ goto free_syncobj_handles;
+ }
+
+ timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_points)) {
+ r = PTR_ERR(timeline_points);
+ goto free_timeline_handles;
+ }
+
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_timeline_points;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto put_gobj_read;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+ }
+
+ if (!wait_info->num_fences) {
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ num_fences++;
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Count syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ num_fences++;
+ dma_fence_put(fence);
+ }
+
+ /* Count GEM objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence)
+ num_fences++;
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence)
+ num_fences++;
+ }
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve userq_fence_info. If num_fences = 0 we skip filling
+ * userq_fence_info and return the actual number of fences on
+ * args->num_fences.
+ */
+ wait_info->num_fences = num_fences;
+ } else {
+ /* Array of fence info */
+ fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
+ if (!fence_info) {
+ r = -ENOMEM;
+ goto exec_fini;
+ }
+
+ /* Array of fences */
+ fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ r = -ENOMEM;
+ goto free_fence_info;
+ }
+
+ /* Retrieve GEM read objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ /* Retrieve GEM write objects fence */
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ dma_fence_get(f);
+ fences[num_fences++] = f;
+ }
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Retrieve syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ }
+
+ /*
+ * Keep only the latest fences to reduce the number of values
+ * given back to userspace.
+ */
+ num_fences = dma_fence_dedup_array(fences, num_fences);
+
+ waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id);
+ if (!waitq) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ for (i = 0, cnt = 0; i < num_fences; i++) {
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence *userq_fence;
+ u32 index;
+
+ userq_fence = to_amdgpu_userq_fence(fences[i]);
+ if (!userq_fence) {
+ /*
+ * Just waiting on other driver fences should
+ * be good for now
+ */
+ r = dma_fence_wait(fences[i], true);
+ if (r) {
+ dma_fence_put(fences[i]);
+ goto free_fences;
+ }
+
+ dma_fence_put(fences[i]);
+ continue;
+ }
+
+ fence_drv = userq_fence->fence_drv;
+ /*
+ * We need to make sure the user queue release their reference
+ * to the fence drivers at some point before queue destruction.
+ * Otherwise, we would gather those references until we don't
+ * have any more space left and crash.
+ */
+ r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
+ xa_limit_32b, GFP_KERNEL);
+ if (r)
+ goto free_fences;
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+
+ /* Store drm syncobj's gpu va address and value */
+ fence_info[cnt].va = fence_drv->va;
+ fence_info[cnt].value = fences[i]->seqno;
+
+ dma_fence_put(fences[i]);
+ /* Increment the actual userq fence count */
+ cnt++;
+ }
+
+ wait_info->num_fences = cnt;
+ /* Copy userq fence info to user space */
+ if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
+ fence_info, wait_info->num_fences * sizeof(*fence_info))) {
+ r = -EFAULT;
+ goto free_fences;
+ }
+
+ kfree(fences);
+ kfree(fence_info);
+ }
+
+ drm_exec_fini(&exec);
+ for (i = 0; i < num_read_bo_handles; i++)
+ drm_gem_object_put(gobj_read[i]);
+ kfree(gobj_read);
+
+ for (i = 0; i < num_write_bo_handles; i++)
+ drm_gem_object_put(gobj_write[i]);
+ kfree(gobj_write);
+
+ kfree(timeline_points);
+ kfree(timeline_handles);
+ kfree(syncobj_handles);
+ kfree(bo_handles_write);
+ kfree(bo_handles_read);
+
+ return 0;
+
+free_fences:
+ while (num_fences-- > 0)
+ dma_fence_put(fences[num_fences]);
+ kfree(fences);
+free_fence_info:
+ kfree(fence_info);
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_timeline_points:
+ kfree(timeline_points);
+free_timeline_handles:
+ kfree(timeline_handles);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
new file mode 100644
index 000000000000..d76add2afc77
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_USERQ_FENCE_H__
+#define __AMDGPU_USERQ_FENCE_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_userq.h"
+
+struct amdgpu_userq_fence {
+ struct dma_fence base;
+ /*
+ * This lock is necessary to synchronize the
+ * userqueue dma fence operations.
+ */
+ spinlock_t lock;
+ struct list_head link;
+ unsigned long fence_drv_array_count;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence_driver **fence_drv_array;
+};
+
+struct amdgpu_userq_fence_driver {
+ struct kref refcount;
+ u64 va;
+ u64 gpu_addr;
+ u64 *cpu_addr;
+ u64 context;
+ /*
+ * This lock is necesaary to synchronize the access
+ * to the fences list by the fence driver.
+ */
+ spinlock_t fence_list_lock;
+ struct list_head fences;
+ struct amdgpu_device *adev;
+ char timeline_name[TASK_COMM_LEN];
+};
+
+int amdgpu_userq_fence_slab_init(void);
+void amdgpu_userq_fence_slab_fini(void);
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_destroy(struct kref *ref);
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
new file mode 100644
index 000000000000..1e40ca3b1584
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_UTILS_H_
+#define AMDGPU_UTILS_H_
+
+/* ---------- Generic 2‑bit capability attribute encoding ----------
+ * 00 INVALID, 01 RO, 10 WO, 11 RW
+ */
+enum amdgpu_cap_attr {
+ AMDGPU_CAP_ATTR_INVALID = 0,
+ AMDGPU_CAP_ATTR_RO = 1 << 0,
+ AMDGPU_CAP_ATTR_WO = 1 << 1,
+ AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO),
+};
+
+#define AMDGPU_CAP_ATTR_BITS 2
+#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1)
+
+/* Internal helper to build helpers for a given enum NAME */
+#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \
+enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \
+struct NAME##_caps { \
+ DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \
+}; \
+static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \
+{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \
+static inline void NAME##_attr_init(struct NAME##_caps *c) \
+{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \
+static inline int NAME##_attr_set(struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \
+{ \
+ if (!c) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \
+ return -EINVAL; \
+ bitmap_write(c->bmap, (unsigned long)attr, \
+ NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ return 0; \
+} \
+static inline int NAME##_attr_get(const struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \
+{ \
+ unsigned long v; \
+ if (!c || !out) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ *out = (enum amdgpu_cap_attr)v; \
+ return 0; \
+} \
+static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \
+static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \
+static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; }
+
+/* Element expander for enum creation */
+#define _CAP_ENUM_ELEM(x) x,
+
+/* Public macro: declare enum + helpers from an X‑macro list */
+#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \
+ enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \
+ DECLARE_ATTR_CAP_CLASS_HELPERS(NAME)
+
+#endif /* AMDGPU_UTILS_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 74758b5ffc6c..5c38f0d30c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
64, direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b9060bcd4806..a7d8f1ce6ac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -41,6 +41,9 @@
#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
+#ifdef CONFIG_DRM_AMDGPU_SI
+#define FIRMWARE_VCE_V1_0 "amdgpu/vce_1_0_0.bin"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
@@ -61,6 +64,9 @@
#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
+#ifdef CONFIG_DRM_AMDGPU_SI
+MODULE_FIRMWARE(FIRMWARE_VCE_V1_0);
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
MODULE_FIRMWARE(FIRMWARE_KABINI);
@@ -88,82 +94,93 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
/**
- * amdgpu_vce_sw_init - allocate memory, load vce firmware
+ * amdgpu_vce_firmware_name() - determine the firmware file name for VCE
*
* @adev: amdgpu_device pointer
- * @size: size for the new BO
*
- * First step to get VCE online, allocate memory and load the firmware
+ * Each chip that has VCE IP may need a different firmware.
+ * This function returns the name of the VCE firmware file
+ * appropriate for the current chip.
*/
-int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+static const char *amdgpu_vce_firmware_name(struct amdgpu_device *adev)
{
- const char *fw_name;
- const struct common_firmware_header *hdr;
- unsigned int ucode_version, version_major, version_minor, binary_id;
- int i, r;
-
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_PITCAIRN:
+ case CHIP_TAHITI:
+ case CHIP_VERDE:
+ return FIRMWARE_VCE_V1_0;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
- fw_name = FIRMWARE_BONAIRE;
- break;
+ return FIRMWARE_BONAIRE;
case CHIP_KAVERI:
- fw_name = FIRMWARE_KAVERI;
- break;
+ return FIRMWARE_KAVERI;
case CHIP_KABINI:
- fw_name = FIRMWARE_KABINI;
- break;
+ return FIRMWARE_KABINI;
case CHIP_HAWAII:
- fw_name = FIRMWARE_HAWAII;
- break;
+ return FIRMWARE_HAWAII;
case CHIP_MULLINS:
- fw_name = FIRMWARE_MULLINS;
- break;
+ return FIRMWARE_MULLINS;
#endif
case CHIP_TONGA:
- fw_name = FIRMWARE_TONGA;
- break;
+ return FIRMWARE_TONGA;
case CHIP_CARRIZO:
- fw_name = FIRMWARE_CARRIZO;
- break;
+ return FIRMWARE_CARRIZO;
case CHIP_FIJI:
- fw_name = FIRMWARE_FIJI;
- break;
+ return FIRMWARE_FIJI;
case CHIP_STONEY:
- fw_name = FIRMWARE_STONEY;
- break;
+ return FIRMWARE_STONEY;
case CHIP_POLARIS10:
- fw_name = FIRMWARE_POLARIS10;
- break;
+ return FIRMWARE_POLARIS10;
case CHIP_POLARIS11:
- fw_name = FIRMWARE_POLARIS11;
- break;
+ return FIRMWARE_POLARIS11;
case CHIP_POLARIS12:
- fw_name = FIRMWARE_POLARIS12;
- break;
+ return FIRMWARE_POLARIS12;
case CHIP_VEGAM:
- fw_name = FIRMWARE_VEGAM;
- break;
+ return FIRMWARE_VEGAM;
case CHIP_VEGA10:
- fw_name = FIRMWARE_VEGA10;
- break;
+ return FIRMWARE_VEGA10;
case CHIP_VEGA12:
- fw_name = FIRMWARE_VEGA12;
- break;
+ return FIRMWARE_VEGA12;
case CHIP_VEGA20:
- fw_name = FIRMWARE_VEGA20;
- break;
+ return FIRMWARE_VEGA20;
default:
- return -EINVAL;
+ return NULL;
}
+}
+
+/**
+ * amdgpu_vce_early_init() - try to load VCE firmware
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tries to load the VCE firmware.
+ *
+ * When not found, returns ENOENT so that the driver can
+ * still load and initialize the rest of the IP blocks.
+ * The GPU can function just fine without VCE, they will just
+ * not support video encoding.
+ */
+int amdgpu_vce_early_init(struct amdgpu_device *adev)
+{
+ const char *fw_name = amdgpu_vce_firmware_name(adev);
+ const struct common_firmware_header *hdr;
+ unsigned int ucode_version, version_major, version_minor, binary_id;
+ int r;
+
+ if (!fw_name)
+ return -ENOENT;
r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
- dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
- fw_name);
+ dev_err(adev->dev,
+ "amdgpu_vce: Firmware \"%s\" not found or failed to validate (%d)\n",
+ fw_name, r);
+
amdgpu_ucode_release(&adev->vce.fw);
- return r;
+ return -ENOENT;
}
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
@@ -172,11 +189,35 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
version_major = (ucode_version >> 20) & 0xfff;
version_minor = (ucode_version >> 8) & 0xfff;
binary_id = ucode_version & 0xff;
- DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
+ dev_info(adev->dev, "Found VCE firmware Version: %d.%d Binary ID: %d\n",
version_major, version_minor, binary_id);
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
(binary_id << 8));
+ return 0;
+}
+
+/**
+ * amdgpu_vce_sw_init() - allocate memory for VCE BO
+ *
+ * @adev: amdgpu_device pointer
+ * @size: size for the new BO
+ *
+ * First step to get VCE online: allocate memory for VCE BO.
+ * The VCE firmware binary is copied into the VCE BO later,
+ * in amdgpu_vce_resume. The VCE executes its code from the
+ * VCE BO and also uses the space in this BO for its stack and data.
+ *
+ * Ideally this BO should be placed in VRAM for optimal performance,
+ * although technically it also runs from system RAM (albeit slowly).
+ */
+int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+{
+ int i, r;
+
+ if (!adev->vce.fw)
+ return -ENOENT;
+
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
@@ -285,40 +326,23 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
*/
int amdgpu_vce_resume(struct amdgpu_device *adev)
{
- void *cpu_addr;
const struct common_firmware_header *hdr;
unsigned int offset;
- int r, idx;
+ int idx;
if (adev->vce.vcpu_bo == NULL)
return -EINVAL;
- r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
- if (r) {
- dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
- dev_err(adev->dev, "(%d) VCE map failed\n", r);
- return r;
- }
-
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ memset_io(adev->vce.cpu_addr, 0, amdgpu_bo_size(adev->vce.vcpu_bo));
+ memcpy_toio(adev->vce.cpu_addr, adev->vce.fw->data + offset,
adev->vce.fw->size - offset);
drm_dev_exit(idx);
}
- amdgpu_bo_kunmap(adev->vce.vcpu_bo);
-
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
-
return 0;
}
@@ -427,6 +451,24 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
/**
+ * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns how many GART pages we need before GTT for the VCE IP block.
+ * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details.
+ * For VCE2+, this is not needed so return zero.
+ */
+u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev)
+{
+ /* VCE IP block not added yet, so can't use amdgpu_ip_version */
+ if (adev->family == AMDGPU_FAMILY_SI)
+ return 512;
+
+ return 0;
+}
+
+/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
* @ring: ring we should submit the msg to
@@ -449,7 +491,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -540,7 +582,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 6e53f872d084..1c3464ce5037 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -51,14 +51,17 @@ struct amdgpu_vce {
struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
+ uint32_t keyselect;
};
+int amdgpu_vce_early_init(struct amdgpu_device *adev);
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
+u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
struct amdgpu_ib *ib);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 83faf6e6788a..5e0786ea911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -92,48 +92,56 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
-int amdgpu_vcn_early_init(struct amdgpu_device *adev)
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
char ucode_prefix[25];
- int r, i;
+ int r;
+ adev->vcn.inst[i].adev = adev;
+ adev->vcn.inst[i].inst = i;
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
- r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
- AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_%d.bin", ucode_prefix, i);
- else
- r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+
+ if (i != 0 && adev->vcn.per_inst_fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_%d.bin", ucode_prefix, i);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ if (!adev->vcn.inst[0].fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin", ucode_prefix);
- if (r) {
- amdgpu_ucode_release(&adev->vcn.inst[i].fw);
- return r;
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ } else {
+ r = 0;
}
+ adev->vcn.inst[i].fw = adev->vcn.inst[0].fw;
}
+
return r;
}
-int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
{
unsigned long bo_size;
const struct common_firmware_header *hdr;
unsigned char fw_check;
unsigned int fw_shared_size, log_offset;
- int i, r;
-
- INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
- mutex_init(&adev->vcn.vcn_pg_lock);
- mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
- atomic_set(&adev->vcn.total_submission_cnt, 0);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++)
- atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
+ int r;
+ mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
+ mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
+ atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
+ INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
+ atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
+ adev->vcn.inst[i].indirect_sram = true;
/*
* Some Steam Deck's BIOS versions are incompatible with the
@@ -146,18 +154,19 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
- !strncmp("F7A0114", bios_ver, 7))) {
- adev->vcn.indirect_sram = false;
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.inst[i].indirect_sram = false;
dev_info(adev->dev,
- "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
}
}
/* from vcn4 and above, only unified queue is used */
- adev->vcn.using_unified_queue =
+ adev->vcn.inst[i].using_unified_queue =
amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
- hdr = (const struct common_firmware_header *)adev->vcn.inst[0].fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version);
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -175,16 +184,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
enc_major = fw_check;
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
- DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ dev_info(adev->dev,
+ "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ i, enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
+ i, version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -207,82 +217,81 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (amdgpu_vcnfw_log)
bo_size += AMDGPU_VCNFW_LOG_SIZE;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ &adev->vcn.inst[i].cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ return r;
+ }
+
+ adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
+ bo_size - fw_shared_size;
+ adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
+ bo_size - fw_shared_size;
+
+ adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
- r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ if (amdgpu_vcnfw_log) {
+ adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.log_offset = log_offset;
+ }
+
+ if (adev->vcn.inst[i].indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
- &adev->vcn.inst[i].vcpu_bo,
- &adev->vcn.inst[i].gpu_addr,
- &adev->vcn.inst[i].cpu_addr);
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ &adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r;
}
-
- adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
- bo_size - fw_shared_size;
- adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
- bo_size - fw_shared_size;
-
- adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
-
- if (amdgpu_vcnfw_log) {
- adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
- adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
- adev->vcn.inst[i].fw_shared.log_offset = log_offset;
- }
-
- if (adev->vcn.indirect_sram) {
- r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->vcn.inst[i].dpg_sram_bo,
- &adev->vcn.inst[i].dpg_sram_gpu_addr,
- &adev->vcn.inst[i].dpg_sram_cpu_addr);
- if (r) {
- dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
- return r;
- }
- }
}
return 0;
}
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
- int i, j;
+ int j;
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
- amdgpu_bo_free_kernel(
- &adev->vcn.inst[j].dpg_sram_bo,
- &adev->vcn.inst[j].dpg_sram_gpu_addr,
- (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+ amdgpu_bo_free_kernel(
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
- kvfree(adev->vcn.inst[j].saved_bo);
+ kvfree(adev->vcn.inst[i].saved_bo);
- amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
- &adev->vcn.inst[j].gpu_addr,
- (void **)&adev->vcn.inst[j].cpu_addr);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ (void **)&adev->vcn.inst[i].cpu_addr);
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]);
- amdgpu_ucode_release(&adev->vcn.inst[j].fw);
+ if (adev->vcn.per_inst_fw) {
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ adev->vcn.inst[i].fw = NULL;
}
- mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
- mutex_destroy(&adev->vcn.vcn_pg_lock);
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
- return 0;
+ mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
@@ -300,179 +309,230 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t
return ret;
}
-int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
+static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i)
{
unsigned int size;
void *ptr;
- int i, idx;
+ int idx;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->vcn.inst[i].vcpu_bo == NULL)
- return 0;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return 0;
- size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
- ptr = adev->vcn.inst[i].cpu_addr;
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
- adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
- if (!adev->vcn.inst[i].saved_bo)
- return -ENOMEM;
+ adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vcn.inst[i].saved_bo)
+ return -ENOMEM;
- if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
- drm_dev_exit(idx);
- }
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+ drm_dev_exit(idx);
}
return 0;
}
-int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
+{
+ int ret, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
{
bool in_ras_intr = amdgpu_ras_intr_triggered();
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- /* err_event_athub will corrupt VCPU buffer, so we need to
+ /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
- if (in_ras_intr)
+ if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
return 0;
- return amdgpu_vcn_save_vcpu_bo(adev);
+ return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
}
-int amdgpu_vcn_resume(struct amdgpu_device *adev)
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
{
unsigned int size;
void *ptr;
- int i, idx;
+ int idx;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->vcn.inst[i].vcpu_bo == NULL)
- return -EINVAL;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return -EINVAL;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
- size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
- ptr = adev->vcn.inst[i].cpu_addr;
+ if (adev->vcn.inst[i].saved_bo != NULL) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ drm_dev_exit(idx);
+ }
+ kvfree(adev->vcn.inst[i].saved_bo);
+ adev->vcn.inst[i].saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned int offset;
- if (adev->vcn.inst[i].saved_bo != NULL) {
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.inst[i].fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
- kvfree(adev->vcn.inst[i].saved_bo);
- adev->vcn.inst[i].saved_bo = NULL;
- } else {
- const struct common_firmware_header *hdr;
- unsigned int offset;
-
- hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(adev->vcn.inst[i].cpu_addr,
- adev->vcn.inst[i].fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- drm_dev_exit(idx);
- }
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
- }
- memset_io(ptr, 0, size);
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
+ memset_io(ptr, 0, size);
}
+
return 0;
}
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
- unsigned int i, j;
- int r = 0;
+ unsigned int i = vcn_inst->inst, j;
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
-
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
- /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- !adev->vcn.using_unified_queue) {
- struct dpg_pause_state new_state;
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]);
- if (fence[j] ||
- unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
- new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else
- new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !adev->vcn.inst[i].using_unified_queue) {
+ struct dpg_pause_state new_state;
- adev->vcn.pause_dpg_mode(adev, j, &new_state);
- }
+ if (fence[i] ||
+ unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt)))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
- fences += fence[j];
+ adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state);
}
- if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_GATE);
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec);
+ fences += fence[i];
+
+ if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
} else {
- schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
}
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- int r = 0;
+ struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
- atomic_inc(&adev->vcn.total_submission_cnt);
+ atomic_inc(&vcn_inst->total_submission_cnt);
- if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- }
+ cancel_delayed_work_sync(&vcn_inst->idle_work);
- mutex_lock(&adev->vcn.vcn_pg_lock);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
+ mutex_lock(&vcn_inst->vcn_pg_lock);
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- !adev->vcn.using_unified_queue) {
+ !vcn_inst->using_unified_queue) {
struct dpg_pause_state new_state;
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
- atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+ atomic_inc(&vcn_inst->dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
} else {
unsigned int fences = 0;
unsigned int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+ for (i = 0; i < vcn_inst->num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]);
- if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+ if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
}
- adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
+ vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
- mutex_unlock(&adev->vcn.vcn_pg_lock);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -482,12 +542,13 @@ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
- !adev->vcn.using_unified_queue)
+ !adev->vcn.inst[ring->me].using_unified_queue)
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
- atomic_dec(&ring->adev->vcn.total_submission_cnt);
+ atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt);
- schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work,
+ VCN_IDLE_TIMEOUT);
}
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
@@ -505,7 +566,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -565,19 +626,19 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
64, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
ib = &job->ibs[0];
- ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
+ ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0);
ib->ptr[1] = addr;
- ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
+ ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0);
ib->ptr[3] = addr >> 32;
- ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
+ ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0);
ib->ptr[5] = 0;
for (i = 6; i < 16; i += 2) {
- ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
+ ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0);
ib->ptr[i+1] = 0;
}
ib->length_dw = 16;
@@ -740,12 +801,12 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
uint32_t ib_pack_in_dw;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -753,7 +814,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
ib->length_dw = 0;
/* single queue headers */
- if (adev->vcn.using_unified_queue) {
+ if (adev->vcn.inst[ring->me].using_unified_queue) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
@@ -772,7 +833,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -870,12 +931,12 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
uint64_t addr;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -884,7 +945,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
ib->length_dw = 0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -906,7 +967,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -937,12 +998,12 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
uint64_t addr;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -951,7 +1012,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
ib->length_dw = 0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -973,7 +1034,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -1058,36 +1119,32 @@ enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
}
}
-void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i)
{
- int i;
unsigned int idx;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
- /* currently only support 2 FW instances */
- if (i >= 2) {
- dev_info(adev->dev, "More then 2 VCN FW instances!\n");
- break;
- }
- idx = AMDGPU_UCODE_ID_VCN + i;
- adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(4, 0, 3) ||
- amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(5, 0, 1))
- break;
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1))
+ && (i > 0))
+ return;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ /* currently only support 2 FW instances */
+ if (i >= 2) {
+ dev_info(adev->dev, "More then 2 VCN FW instances!\n");
+ return;
}
+ idx = AMDGPU_UCODE_ID_VCN + i;
+ adev->firmware.ucode[idx].ucode_id = idx;
+ adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
}
}
@@ -1100,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
- volatile struct amdgpu_vcn_fwlog *plog;
+ struct amdgpu_vcn_fwlog *plog;
unsigned int read_pos, write_pos, available, i, read_bytes = 0;
unsigned int read_num[2] = {0};
@@ -1113,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
- plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
+ plog = (struct amdgpu_vcn_fwlog *)log_buf;
read_pos = plog->rptr;
write_pos = plog->wptr;
@@ -1180,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
- volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
+ uint32_t *flag = vcn->fw_shared.cpu_addr;
void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
- volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
- volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
+ struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;
@@ -1390,10 +1447,191 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
struct dentry *root = minor->debugfs_root;
char name[32];
- if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.using_unified_queue)
+ if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue)
return;
sprintf(name, "amdgpu_vcn_sched_mask");
debugfs_create_file(name, 0600, root, adev,
&amdgpu_debugfs_vcn_sched_mask_fops);
#endif
}
+
+/**
+ * vcn_set_powergating_state - set VCN block powergating state
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ ret |= vinst->set_pg_state(vinst, state);
+ }
+
+ return ret;
+}
+
+/**
+ * amdgpu_vcn_reset_engine - Reset a specific VCN engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: VCN engine instance to reset
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
+ uint32_t instance_id)
+{
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
+ int r, i;
+
+ mutex_lock(&vinst->engine_reset_mutex);
+ /* Stop the scheduler's work queue for the dec and enc rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
+
+ /* Perform the VCN reset for the specified instance */
+ r = vinst->reset(vinst);
+ if (r)
+ goto unlock;
+ r = amdgpu_ring_test_ring(&vinst->ring_dec);
+ if (r)
+ goto unlock;
+ for (i = 0; i < vinst->num_enc_rings; i++) {
+ r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
+ if (r)
+ goto unlock;
+ }
+ amdgpu_fence_driver_force_completion(&vinst->ring_dec);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
+
+ /* Restart the scheduler's work queue for the dec and enc rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ drm_sched_wqueue_start(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
+
+unlock:
+ mutex_unlock(&vinst->engine_reset_mutex);
+
+ return r;
+}
+
+/**
+ * amdgpu_vcn_ring_reset - Reset a VCN ring
+ * @ring: ring to reset
+ * @vmid: vmid of guilty job
+ * @timedout_fence: fence of timed out job
+ *
+ * This helper is for VCN blocks without unified queues because
+ * resetting the engine resets all queues in that case. With
+ * unified queues we have one queue per engine.
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ return -EINVAL;
+
+ return amdgpu_vcn_reset_engine(adev, ring->me);
+}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index adaf4388ad28..82624b44e661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -66,7 +66,6 @@
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
-#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
@@ -101,7 +100,8 @@
#define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
@@ -162,7 +162,8 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
@@ -238,6 +239,14 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+struct amdgpu_hwip_reg_entry;
+
+enum amdgpu_vcn_caps {
+ AMDGPU_VCN_RRMT_ENABLED,
+};
+
+#define AMDGPU_VCN_CAPS(caps) BIT(AMDGPU_VCN_##caps)
+
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -289,6 +298,8 @@ struct amdgpu_vcn_fw_shared {
};
struct amdgpu_vcn_inst {
+ struct amdgpu_device *adev;
+ int inst;
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
@@ -310,6 +321,22 @@ struct amdgpu_vcn_inst {
const struct firmware *fw; /* VCN firmware */
uint8_t vcn_config;
uint32_t vcn_codec_disable_mask;
+ atomic_t total_submission_cnt;
+ struct mutex vcn_pg_lock;
+ enum amd_powergating_state cur_state;
+ struct delayed_work idle_work;
+ unsigned fw_version;
+ unsigned num_enc_rings;
+ bool indirect_sram;
+ struct amdgpu_vcn_reg internal;
+ struct mutex vcn1_jpeg1_workaround;
+ int (*pause_dpg_mode)(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
+ int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+ int (*reset)(struct amdgpu_vcn_inst *vinst);
+ bool using_unified_queue;
+ struct mutex engine_reset_mutex;
};
struct amdgpu_vcn_ras {
@@ -317,34 +344,30 @@ struct amdgpu_vcn_ras {
};
struct amdgpu_vcn {
- unsigned fw_version;
- struct delayed_work idle_work;
- unsigned num_enc_rings;
- enum amd_powergating_state cur_state;
- bool indirect_sram;
-
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- struct amdgpu_vcn_reg internal;
- struct mutex vcn_pg_lock;
- struct mutex vcn1_jpeg1_workaround;
- atomic_t total_submission_cnt;
unsigned harvest_config;
- int (*pause_dpg_mode)(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
struct ras_common_if *ras_if;
struct amdgpu_vcn_ras *ras;
uint16_t inst_mask;
uint8_t num_inst_per_aid;
- bool using_unified_queue;
/* IP reg dump */
uint32_t *ip_dump;
uint32_t supported_reset;
+ uint32_t caps;
+
+ bool per_inst_fw;
+ unsigned fw_version;
+
+ bool workload_profile_active;
+ struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -480,7 +503,7 @@ struct amdgpu_vcn5_fw_shared {
struct amdgpu_fw_shared_rb_setup rb_setup;
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
- uint8_t pad3[9];
+ uint8_t pad3[404];
};
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
@@ -493,11 +516,11 @@ enum vcn_ring_type {
VCN_UNIFIED_RING,
};
-int amdgpu_vcn_early_init(struct amdgpu_device *adev);
-int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
-int amdgpu_vcn_suspend(struct amdgpu_device *adev);
-int amdgpu_vcn_resume(struct amdgpu_device *adev);
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
@@ -515,7 +538,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
-void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev);
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i);
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn);
void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
@@ -535,4 +558,16 @@ int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 0af469ec6fcc..47a6ce4fdc74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -44,6 +44,18 @@
vf2pf_info->ucode_info[ucode].version = ver; \
} while (0)
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+
+const char *amdgpu_virt_dynamic_crit_table_name[] = {
+ "IP DISCOVERY",
+ "VBIOS IMG",
+ "RAS TELEMETRY",
+ "DATA EXCHANGE",
+ "BAD PAGE INFO",
+ "INIT HEADER",
+ "LAST",
+};
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -150,9 +162,10 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
virt->ops->req_init_data(adev);
if (adev->virt.req_init_data_ver > 0)
- DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n",
+ adev->virt.req_init_data_ver);
else
- DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+ dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n");
}
/**
@@ -205,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
if (r) {
- DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
+ dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r);
return r;
}
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
- DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
+ dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n",
adev->virt.mm_table.gpu_addr,
adev->virt.mm_table.cpu_addr);
return 0;
@@ -390,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE,
&bo, NULL))
- DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_dbg(adev->dev,
+ "RAS WARN: reserve vram for retired page %llx fail\n",
+ bp);
data->bps_bo[i] = bo;
}
data->last_reserved = i + 1;
@@ -598,8 +613,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0;
- vf2pf_info->fb_usage =
- ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
+ vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;
vf2pf_info->fb_vis_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
@@ -614,10 +629,11 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->decode_usage = 0;
vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
- vf2pf_info->mes_info_addr = (uint64_t)adev->mes.resource_1_gpu_addr;
-
- if (adev->mes.resource_1) {
- vf2pf_info->mes_info_size = adev->mes.resource_1->tbo.base.size;
+ if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ vf2pf_info->mes_info_addr =
+ (uint64_t)(adev->mes.resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE);
+ vf2pf_info->mes_info_size =
+ adev->mes.resource_1[0]->tbo.base.size - AMDGPU_GPU_PAGE_SIZE;
}
vf2pf_info->checksum =
amd_sriov_msg_checksum(
@@ -657,10 +673,34 @@ out:
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
+static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data)
+{
+ uint32_t dataexchange_offset =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
+ uint32_t dataexchange_size =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
+ uint64_t pos = 0;
+
+ dev_info(adev->dev,
+ "Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ dataexchange_offset, dataexchange_size);
+
+ if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) {
+ dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n");
+ return -EINVAL;
+ }
+
+ pos = (uint64_t)dataexchange_offset;
+ amdgpu_device_vram_access(adev, pos, pfvf_data,
+ dataexchange_size, false);
+
+ return 0;
+}
+
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
{
if (adev->virt.vf2pf_update_interval_ms != 0) {
- DRM_INFO("clean up the vf2pf work item\n");
+ dev_info(adev->dev, "clean up the vf2pf work item\n");
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
adev->virt.vf2pf_update_interval_ms = 0;
}
@@ -668,13 +708,15 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
+ uint32_t *pfvf_data = NULL;
+
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
adev->virt.vf2pf_update_retry_cnt = 0;
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
- DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
+ dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
/* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
@@ -683,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
} else if (adev->bios != NULL) {
/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ pfvf_data =
+ kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10,
+ GFP_KERNEL);
+ if (!pfvf_data) {
+ dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n");
+ return;
+ }
- amdgpu_virt_read_pf2vf_data(adev);
+ if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data))
+ goto free_pfvf_data;
+
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)pfvf_data;
+
+ amdgpu_virt_read_pf2vf_data(adev);
+
+free_pfvf_data:
+ kfree(pfvf_data);
+ pfvf_data = NULL;
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
}
}
@@ -700,23 +765,38 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
if (adev->mman.fw_vram_usage_va) {
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
- adev->virt.fw_reserve.p_vf2pf =
- (struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
- adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
+ (AMD_SRIOV_MSG_SIZE_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
+ }
} else if (adev->mman.drv_vram_usage_va) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
}
amdgpu_virt_read_pf2vf_data(adev);
@@ -739,7 +819,7 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
}
}
-void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+static u32 amdgpu_virt_init_detect_asic(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -775,8 +855,17 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+ return reg;
+}
+
+static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
+{
+ bool is_sriov = false;
+
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
+ is_sriov = true;
+
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
@@ -805,10 +894,252 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
amdgpu_virt_request_init_data(adev);
break;
default: /* other chip doesn't support SRIOV */
- DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
+ is_sriov = false;
+ dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
+
+ return is_sriov;
+}
+
+static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
+{
+ ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
+
+ ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
+ mutex_init(&adev->virt.ras.ras_telemetry_mutex);
+ mutex_init(&adev->virt.access_req_mutex);
+
+ adev->virt.ras.cper_rptr = 0;
+}
+
+static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end)
+{
+ uint32_t sum = 0;
+
+ if (buf_start >= buf_end)
+ return 0;
+
+ for (; buf_start < buf_end; buf_start++)
+ sum += buf_start[0];
+
+ return 0xffffffff - sum;
+}
+
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_init_data_header *init_data_hdr = NULL;
+ u64 init_hdr_offset = adev->virt.init_data_header.offset;
+ u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */
+ u64 vram_size;
+ u64 end;
+ int r = 0;
+ uint8_t checksum = 0;
+
+ /* Skip below init if critical region version != v2 */
+ if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2)
+ return 0;
+
+ if (init_hdr_offset < 0) {
+ dev_err(adev->dev, "Invalid init header offset\n");
+ return -EINVAL;
+ }
+
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ return -EINVAL;
+ vram_size <<= 20;
+
+ if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) {
+ dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n");
+ return -EINVAL;
+ }
+
+ /* Allocate for init_data_hdr */
+ init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL);
+ if (!init_data_hdr)
+ return -ENOMEM;
+
+ amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr,
+ sizeof(struct amd_sriov_msg_init_data_header), false);
+
+ /* Table validation */
+ if (strncmp(init_data_hdr->signature,
+ AMDGPU_SRIOV_CRIT_DATA_SIGNATURE,
+ AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) {
+ dev_err(adev->dev, "Invalid init data signature: %.4s\n",
+ init_data_hdr->signature);
+ r = -EINVAL;
+ goto out;
+ }
+
+ checksum = amdgpu_virt_crit_region_calc_checksum(
+ (uint8_t *)&init_data_hdr->initdata_offset,
+ (uint8_t *)init_data_hdr +
+ sizeof(struct amd_sriov_msg_init_data_header));
+ if (checksum != init_data_hdr->checksum) {
+ dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n",
+ checksum, init_data_hdr->checksum);
+ r = -EINVAL;
+ goto out;
+ }
+
+ memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn));
+ memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl));
+
+ adev->virt.crit_regn.offset = init_data_hdr->initdata_offset;
+ adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb;
+
+ /* Validation and initialization for each table entry */
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) {
+ if (!init_data_hdr->ip_discovery_size_in_kb ||
+ init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID],
+ init_data_hdr->ip_discovery_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset =
+ init_data_hdr->ip_discovery_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb =
+ init_data_hdr->ip_discovery_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) {
+ if (!init_data_hdr->vbios_img_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID],
+ init_data_hdr->vbios_img_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset =
+ init_data_hdr->vbios_img_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb =
+ init_data_hdr->vbios_img_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) {
+ if (!init_data_hdr->ras_tele_info_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID],
+ init_data_hdr->ras_tele_info_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset =
+ init_data_hdr->ras_tele_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb =
+ init_data_hdr->ras_tele_info_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) {
+ if (!init_data_hdr->dataexchange_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID],
+ init_data_hdr->dataexchange_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset =
+ init_data_hdr->dataexchange_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb =
+ init_data_hdr->dataexchange_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) {
+ if (!init_data_hdr->bad_page_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID],
+ init_data_hdr->bad_page_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset =
+ init_data_hdr->bad_page_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb =
+ init_data_hdr->bad_page_size_in_kb;
+ }
+
+ /* Validation for critical region info */
+ if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n",
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* reserved memory starts from crit region base offset with the size of 5MB */
+ adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset;
+ adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10;
+ dev_info(adev->dev,
+ "critical region v%d requested to reserve memory start at %08llx with %llu KB.\n",
+ init_data_hdr->version,
+ adev->mman.fw_vram_usage_start_offset,
+ adev->mman.fw_vram_usage_size >> 10);
+
+ adev->virt.is_dynamic_crit_regn_enabled = true;
+
+out:
+ kfree(init_data_hdr);
+ init_data_hdr = NULL;
+
+ return r;
+}
+
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size)
+{
+ uint32_t data_offset = 0;
+ uint32_t data_size = 0;
+ enum amd_sriov_msg_table_id_enum data_table_id = data_id;
+
+ if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID)
+ return -EINVAL;
+
+ data_offset = adev->virt.crit_regn_tbl[data_table_id].offset;
+ data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10;
+
+ /* Validate on input params */
+ if (!binary || !size || *size < (uint64_t)data_size)
+ return -EINVAL;
+
+ /* Proceed to copy the dynamic content */
+ amdgpu_device_vram_access(adev,
+ (uint64_t)data_offset, (uint32_t *)binary, data_size, false);
+ *size = (uint64_t)data_size;
+
+ dev_dbg(adev->dev,
+ "Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size);
+
+ return 0;
+}
+
+void amdgpu_virt_init(struct amdgpu_device *adev)
+{
+ bool is_sriov = false;
+ uint32_t reg = amdgpu_virt_init_detect_asic(adev);
+
+ is_sriov = amdgpu_virt_init_req_data(adev, reg);
+
+ if (is_sriov)
+ amdgpu_virt_init_ras(adev);
}
static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
@@ -1017,6 +1348,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
void *scratch_reg2;
void *scratch_reg3;
void *spare_int;
+ unsigned long flags;
if (!adev->gfx.rlc.rlcg_reg_access_supported) {
dev_err(adev->dev,
@@ -1038,7 +1370,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
- mutex_lock(&adev->virt.rlcg_reg_lock);
+ spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
if (reg_access_ctrl->spare_int)
spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
@@ -1097,7 +1429,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
ret = readl(scratch_reg0);
- mutex_unlock(&adev->virt.rlcg_reg_lock);
+ spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
return ret;
}
@@ -1246,7 +1578,8 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc
case AMDGPU_RAS_BLOCK__MPIO:
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
default:
- dev_err(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block);
+ dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n",
+ block);
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
}
}
@@ -1260,7 +1593,7 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return 0;
tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
@@ -1282,14 +1615,19 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo
{
struct amdgpu_virt *virt = &adev->virt;
+ if (!virt->ops || !virt->ops->req_ras_err_count)
+ return -EOPNOTSUPP;
+
/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
* will ignore incoming guest messages. Ratelimit the guest messages to
* prevent guest self DOS.
*/
- if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) {
+ if (__ratelimit(&virt->ras.ras_error_cnt_rs) || force_update) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
if (!virt->ops->req_ras_err_count(adev))
amdgpu_virt_cache_host_error_counts(adev,
- adev->virt.fw_reserve.ras_telemetry);
+ virt->fw_reserve.ras_telemetry);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
}
return 0;
@@ -1320,6 +1658,103 @@ int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_bl
return 0;
}
+static int
+amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ u32 *more)
+{
+ struct amd_sriov_ras_cper_dump *cper_dump = NULL;
+ struct cper_hdr *entry = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t checksum, used_size, i;
+ int ret = 0;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
+ return -EINVAL;
+
+ cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
+ if (!cper_dump)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *more = cper_dump->more;
+
+ if (cper_dump->wptr < adev->virt.ras.cper_rptr) {
+ dev_warn(
+ adev->dev,
+ "guest specified rptr that was too high! guest rptr: 0x%llx, host rptr: 0x%llx\n",
+ adev->virt.ras.cper_rptr, cper_dump->wptr);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+ goto out;
+ }
+
+ entry = (struct cper_hdr *)&cper_dump->buf[0];
+
+ for (i = 0; i < cper_dump->count; i++) {
+ amdgpu_cper_ring_write(ring, entry, entry->record_length);
+ entry = (struct cper_hdr *)((char *)entry +
+ entry->record_length);
+ }
+
+ if (cper_dump->overflow_count)
+ dev_warn(adev->dev,
+ "host reported CPER overflow of 0x%llx entries!\n",
+ cper_dump->overflow_count);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+out:
+ kfree(cper_dump);
+
+ return ret;
+}
+
+static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+ uint32_t more = 0;
+
+ if (!virt->ops || !virt->ops->req_ras_cper_dump)
+ return -EOPNOTSUPP;
+
+ do {
+ if (!virt->ops->req_ras_cper_dump(adev, virt->ras.cper_rptr))
+ ret = amdgpu_virt_write_cpers_to_ring(
+ adev, virt->fw_reserve.ras_telemetry, &more);
+ else
+ ret = 0;
+ } while (more && !ret);
+
+ return ret;
+}
+
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
+ if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ ret = amdgpu_virt_req_ras_cper_dump_internal(adev);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ return ret;
+}
+
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
{
unsigned long ue_count, ce_count;
@@ -1331,3 +1766,82 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
return 0;
}
+
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ enum amd_sriov_ras_telemetry_gpu_block sriov_block;
+
+ sriov_block = amdgpu_ras_block_to_sriov(adev, block);
+
+ if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
+ !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
+ return false;
+
+ return true;
+}
+
+/*
+ * amdgpu_virt_request_bad_pages() - request bad pages
+ * @adev: amdgpu device.
+ * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
+ */
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_bad_pages)
+ virt->ops->req_bad_pages(adev);
+}
+
+static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ bool *hit)
+{
+ struct amd_sriov_ras_chk_criti *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ if (hit)
+ *hit = tmp->hit ? true : false;
+
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = -EPERM;
+
+ if (!virt->ops || !virt->ops->req_ras_chk_criti)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_chk_criti(adev, addr))
+ r = amdgpu_virt_cache_chk_criti_hit(
+ adev, virt->fw_reserve.ras_telemetry, hit);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 5381b8d596e6..01d5bca2dee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -54,6 +54,12 @@
#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2
+/* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */
+#define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA"
+#define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4
+
+#define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id)))
+
enum amdgpu_sriov_vf_mode {
SRIOV_VF_MODE_BARE_METAL = 0,
SRIOV_VF_MODE_ONE_VF,
@@ -96,6 +102,9 @@ struct amdgpu_virt_ops {
enum amdgpu_ras_block block);
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
int (*req_ras_err_count)(struct amdgpu_device *adev);
+ int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
+ int (*req_bad_pages)(struct amdgpu_device *adev);
+ int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
};
/*
@@ -140,15 +149,21 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
+ AMDGIM_FEATURE_RAS_CPER = (1 << 11),
+ AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12),
};
enum AMDGIM_REG_ACCESS_FLAG {
/* Use PSP to program IH_RB_CNTL */
- AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
/* Use RLC to program MMHUB regs */
- AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
- AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ /* Use PSP to program L1_TLB_CNTL */
+ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
+ /* Use RLCG to program SQ_CONFIG1 */
+ AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4),
};
struct amdgim_pf2vf_info_v1 {
@@ -242,6 +257,23 @@ struct amdgpu_virt_ras_err_handler_data {
int last_reserved;
};
+struct amdgpu_virt_ras {
+ struct ratelimit_state ras_error_cnt_rs;
+ struct ratelimit_state ras_cper_dump_rs;
+ struct ratelimit_state ras_chk_criti_rs;
+ struct mutex ras_telemetry_mutex;
+ uint64_t cper_rptr;
+};
+
+#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT)
+
+DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+
+struct amdgpu_virt_region {
+ uint32_t offset;
+ uint32_t size_kb;
+};
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -251,11 +283,16 @@ struct amdgpu_virt {
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq;
+
struct work_struct flr_work;
+ struct work_struct req_bad_pages_work;
+ struct work_struct handle_bad_pages_work;
+
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
+ struct amdgpu_virt_caps virt_caps;
uint32_t gim_feature;
uint32_t reg_access_mode;
int req_init_data_ver;
@@ -264,6 +301,12 @@ struct amdgpu_virt {
bool ras_init_done;
uint32_t reg_access;
+ /* dynamic(v2) critical regions */
+ struct amdgpu_virt_region init_data_header;
+ struct amdgpu_virt_region crit_regn;
+ struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID];
+ bool is_dynamic_crit_regn_enabled;
+
/* vf2pf message */
struct delayed_work vf2pf_work;
uint32_t vf2pf_update_interval_ms;
@@ -279,13 +322,18 @@ struct amdgpu_virt {
/* the ucode id to signal the autoload */
uint32_t autoload_ucode_id;
- struct mutex rlcg_reg_lock;
+ /* Spinlock to protect access to the RLCG register interface */
+ spinlock_t rlcg_reg_lock;
+
+ struct mutex access_req_mutex;
union amd_sriov_ras_caps ras_en_caps;
union amd_sriov_ras_caps ras_telemetry_en_caps;
-
- struct ratelimit_state ras_telemetry_rs;
+ struct amdgpu_virt_ras ras;
struct amd_sriov_ras_telemetry_error_count count_cache;
+
+ /* hibernate and resume with different VF feature for xgmi enabled system */
+ bool is_xgmi_node_migrate_enabled;
};
struct amdgpu_video_codec_info;
@@ -321,9 +369,17 @@ struct amdgpu_video_codec_info;
(amdgpu_sriov_vf((adev)) && \
((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+#define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN)))
+
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+#define amdgpu_sriov_reg_access_sq_config(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
@@ -339,6 +395,12 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \
(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk))
+#define amdgpu_sriov_ras_cper_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
+
+#define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
@@ -352,6 +414,8 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_multi_vf_mode(adev) \
+ (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
#define amdgpu_sriov_is_debug(adev) \
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
@@ -362,6 +426,10 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
#define amdgpu_sriov_is_mes_info_enable(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
+
+#define amdgpu_virt_xgmi_migrate_enabled(adev) \
+ ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
@@ -377,7 +445,11 @@ void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
-void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+void amdgpu_virt_init(struct amdgpu_device *adev);
+
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev);
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size);
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
@@ -405,5 +477,10 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
struct ras_err_data *err_data);
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update);
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 03308261f894..79bad9cbe2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -14,7 +14,6 @@
#include "dce_v8_0.h"
#endif
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
@@ -188,8 +187,8 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
- hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
+ hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return ret;
}
@@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_VEGAM:
- dce_v11_0_disable_dce(adev);
- break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
@@ -627,7 +619,7 @@ static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev));
}
-static bool amdgpu_vkms_is_idle(void *handle)
+static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5c07777d3239..a67285118c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -128,43 +128,14 @@ struct amdgpu_vm_tlb_seq_struct {
};
/**
- * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
- *
- * @adev: amdgpu_device pointer
- * @vm: amdgpu_vm pointer
- * @pasid: the pasid the VM is using on this GPU
- *
- * Set the pasid this VM is using on this GPU, can also be used to remove the
- * pasid by passing in zero.
+ * amdgpu_vm_assert_locked - check if VM is correctly locked
+ * @vm: the VM which schould be tested
*
+ * Asserts that the VM root PD is locked.
*/
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid)
+static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
{
- int r;
-
- if (vm->pasid == pasid)
- return 0;
-
- if (vm->pasid) {
- r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
- if (r < 0)
- return r;
-
- vm->pasid = 0;
- }
-
- if (pasid) {
- r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
- GFP_KERNEL));
- if (r < 0)
- return r;
-
- vm->pasid = pasid;
- }
-
-
- return 0;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
}
/**
@@ -181,6 +152,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
+ amdgpu_vm_assert_locked(vm);
spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
@@ -198,6 +170,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
spin_unlock(&vm_bo->vm->status_lock);
@@ -213,6 +186,7 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
spin_unlock(&vm_bo->vm->status_lock);
@@ -260,6 +234,7 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
if (vm_bo->bo->parent) {
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
@@ -279,6 +254,7 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
spin_unlock(&vm_bo->vm->status_lock);
@@ -295,10 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
+
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -327,6 +306,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
bool shared;
+ dma_resv_assert_held(bo->tbo.base.resv);
spin_lock(&vm->status_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
@@ -485,6 +465,46 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
}
/**
+ * amdgpu_vm_lock_done_list - lock all BOs on the done list
+ * @vm: vm providing the BOs
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
+ *
+ * Lock the BOs on the done list in the DRM execution context.
+ */
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct list_head *prev = &vm->done;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ /* We can only trust prev->next while holding the lock */
+ spin_lock(&vm->status_lock);
+ while (!list_is_head(prev->next, &vm->done)) {
+ bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
+
+ bo = bo_va->base.bo;
+ if (bo) {
+ amdgpu_bo_ref(bo);
+ spin_unlock(&vm->status_lock);
+
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
+ amdgpu_bo_unref(&bo);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ prev = prev->next;
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
+}
+
+/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
* @adev: amdgpu device pointer
@@ -616,18 +636,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
-
- if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
- struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
-
- pr_warn_ratelimited("Evicted user BO is not reserved\n");
- if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->pid);
- amdgpu_vm_put_task_info(ti);
- }
-
- return -EINVAL;
- }
+ dma_resv_assert_held(bo->tbo.base.resv);
r = validate(param, bo);
if (r)
@@ -654,22 +663,31 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if VM is not evicting.
+ * True if VM is not evicting and all VM entities are not stopped
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool empty;
bool ret;
+ amdgpu_vm_assert_locked(vm);
+
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
- empty = list_empty(&vm->evicted);
+ ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
- return ret && empty;
+ spin_lock(&vm->immediate.lock);
+ ret &= !vm->immediate.stopped;
+ spin_unlock(&vm->immediate.lock);
+
+ spin_lock(&vm->delayed.lock);
+ ret &= !vm->delayed.stopped;
+ spin_unlock(&vm->delayed.lock);
+
+ return ret;
}
/**
@@ -754,6 +772,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
@@ -761,8 +780,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool gds_switch_needed = ring->funcs->emit_gds_switch &&
job->gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush;
- struct dma_fence *fence = NULL;
+ bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
+ struct dma_fence *fence = NULL;
unsigned int patch;
int r;
@@ -785,8 +805,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
+ cleaner_shader_needed = job->run_cleaner_shader &&
+ adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader && job->base.s_fence &&
+ &job->base.s_fence->scheduled == isolation->spearhead;
+
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
- !(job->enforce_isolation && !job->vmid))
+ !cleaner_shader_needed)
return 0;
amdgpu_ring_ib_begin(ring);
@@ -797,9 +822,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
- if (adev->gfx.enable_cleaner_shader &&
- ring->funcs->emit_cleaner_shader &&
- job->enforce_isolation)
+ if (cleaner_shader_needed)
ring->funcs->emit_cleaner_shader(ring);
if (vm_flush_needed) {
@@ -813,7 +836,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
- if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+ if (ring->funcs->emit_gds_switch &&
gds_switch_needed) {
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
@@ -821,10 +844,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
job->oa_size);
}
- if (vm_flush_needed || pasid_mapping_needed) {
- r = amdgpu_fence_emit(ring, &fence, NULL, 0);
+ if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
+ r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
if (r)
return r;
+ fence = &job->hw_vm_fence->base;
+ /* get a ref for the job */
+ dma_fence_get(fence);
}
if (vm_flush_needed) {
@@ -843,6 +869,18 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
id->pasid_mapping = dma_fence_get(fence);
mutex_unlock(&id_mgr->lock);
}
+
+ /*
+ * Make sure that all other submissions wait for the cleaner shader to
+ * finish before we push them to the HW.
+ */
+ if (cleaner_shader_needed) {
+ trace_amdgpu_cleaner_shader(ring, fence);
+ mutex_lock(&adev->enforce_isolation_mutex);
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(fence);
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
dma_fence_put(fence);
amdgpu_ring_patch_cond_exec(ring, patch);
@@ -934,6 +972,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
LIST_HEAD(relocated);
int r, idx;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->relocated, &relocated);
spin_unlock(&vm->status_lock);
@@ -949,7 +989,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
if (r)
goto error;
@@ -1028,7 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
}
/* Prepare a TLB flush fence to be attached to PTs */
- if (!params->unlocked && vm->is_compute_context) {
+ if (!params->unlocked) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */
@@ -1118,7 +1159,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, sync);
+ r = vm->update_funcs->prepare(&params, sync,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
if (r)
goto error_free;
@@ -1254,7 +1296,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- if (obj->import_attach && bo_va->is_xgmi) {
+ if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -1311,13 +1353,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
* but in case of something, we filter the flags in first place
*/
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
update_flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
update_flags &= ~AMDGPU_PTE_WRITEABLE;
/* Apply ASIC specific mapping flags */
- amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
+ amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
+ &update_flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1458,7 +1501,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
struct dma_fence *fence)
{
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_add_prt_cb(adev, fence);
kfree(mapping);
}
@@ -1614,7 +1657,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
* validation
*/
if (vm->is_compute_context &&
- bo_va->base.bo->tbo.base.import_attach &&
+ drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
(!bo_va->base.bo->tbo.resource ||
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
@@ -1737,7 +1780,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->invalids);
amdgpu_vm_it_insert(mapping, &vm->va);
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
@@ -1797,7 +1840,7 @@ static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1856,7 +1899,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1912,6 +1955,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
+ int r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1932,6 +1976,17 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
return -ENOENT;
}
+ /* It's unlikely to happen that the mapping userq hasn't been idled
+ * during user requests GEM unmap IOCTL except for forcing the unmap
+ * from user space.
+ */
+ if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
+ r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
+ if (unlikely(r == -EBUSY))
+ dev_warn_once(adev->dev,
+ "Attempt to unmap an active userq buffer\n");
+ }
+
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
mapping->bo_va = NULL;
@@ -2038,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = before->bo_va->base.bo;
amdgpu_vm_it_insert(before, &vm->va);
- if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (before->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2053,7 +2108,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = after->bo_va->base.bo;
amdgpu_vm_it_insert(after, &vm->va);
- if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (after->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2378,10 +2433,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
else
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
- DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
- vm_size, adev->vm_manager.num_level + 1,
- adev->vm_manager.block_size,
- adev->vm_manager.fragment_size);
+ dev_info(
+ adev->dev,
+ "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
+ vm_size, adev->vm_manager.num_level + 1,
+ adev->vm_manager.block_size, adev->vm_manager.fragment_size);
}
/**
@@ -2392,13 +2448,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
- true, timeout);
+ timeout = drm_sched_entity_flush(&vm->immediate, timeout);
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
+ return drm_sched_entity_flush(&vm->delayed, timeout);
}
static void amdgpu_vm_destroy_task_info(struct kref *kref)
@@ -2430,7 +2484,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
*/
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
{
- kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+ if (task_info)
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
}
/**
@@ -2490,11 +2545,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
if (!vm->task_info)
return;
- if (vm->task_info->pid == current->pid)
+ if (vm->task_info->task.pid == current->pid)
return;
- vm->task_info->pid = current->pid;
- get_task_comm(vm->task_info->task_name, current);
+ vm->task_info->task.pid = current->pid;
+ get_task_comm(vm->task_info->task.comm, current);
if (current->group_leader->mm != current->mm)
return;
@@ -2509,6 +2564,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: requested vm
* @xcp_id: GPU partition selection id
+ * @pasid: the pasid the VM is using on this GPU
*
* Init @vm fields.
*
@@ -2516,7 +2572,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int32_t xcp_id)
+ int32_t xcp_id, uint32_t pasid)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
@@ -2534,8 +2590,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
- INIT_LIST_HEAD(&vm->pt_freed);
- INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
INIT_KFIFO(vm->faults);
r = amdgpu_vm_init_entities(adev, vm);
@@ -2549,8 +2603,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2592,7 +2646,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_vm_create_task_info(vm);
if (r)
- DRM_DEBUG("Failed to create task info for VM\n");
+ dev_dbg(adev->dev, "Failed to create task info for VM\n");
+
+ /* Store new PASID in XArray (if non-zero) */
+ if (pasid != 0) {
+ r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2600,6 +2663,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0;
error_free_root:
+ /* If PASID was partially set, erase it from XArray before failing */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
amdgpu_vm_pt_free_root(adev, vm);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2643,8 +2711,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2674,20 +2742,6 @@ unreserve_bo:
return r;
}
-/**
- * amdgpu_vm_release_compute - release a compute vm
- * @adev: amdgpu_device pointer
- * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
- *
- * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
- * pasid from vm. Compute should stop use of vm after this call.
- */
-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
-{
- amdgpu_vm_set_pasid(adev, vm, 0);
- vm->is_compute_context = false;
-}
-
static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
{
for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
@@ -2717,11 +2771,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
- flush_work(&vm->pt_free_work);
-
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_set_pasid(adev, vm, 0);
+ /* Remove PASID mapping before destroying VM */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
@@ -2731,7 +2787,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
prt_fini_needed = false;
}
@@ -2762,10 +2818,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
- if (vm->reserved_vmid[i]) {
- amdgpu_vmid_free_reserved(adev, i);
- vm->reserved_vmid[i] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, i);
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
@@ -2775,7 +2828,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dev_warn(adev->dev,
"VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
- ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
}
amdgpu_vm_put_task_info(vm->task_info);
@@ -2790,8 +2843,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
- unsigned i;
-
/* Concurrent flushes are only possible starting with Vega10 and
* are broken on Navi10 and Navi14.
*/
@@ -2800,11 +2851,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->asic_type == CHIP_NAVI14);
amdgpu_vmid_mgr_init(adev);
- adev->vm_manager.fence_context =
- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- adev->vm_manager.seqno[i] = 0;
-
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
@@ -2861,6 +2907,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
/* No valid flags defined yet */
if (args->in.flags)
@@ -2869,17 +2916,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
- if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
- }
-
- break;
+ return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
case AMDGPU_VM_OP_UNRESERVE_VMID:
- if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
default:
return -EINVAL;
@@ -2983,7 +3022,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%d)\n", r);
+ dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
@@ -3017,6 +3056,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
@@ -3157,3 +3198,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
}
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info)
+{
+ dev_err(adev->dev,
+ " Process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task.comm, task_info->task.pid);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index a3e128e373bc..15d757c016cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -236,9 +236,8 @@ struct amdgpu_vm_pte_funcs {
};
struct amdgpu_task_info {
+ struct drm_wedge_task_info task;
char process_name[TASK_COMM_LEN];
- char task_name[TASK_COMM_LEN];
- pid_t pid;
pid_t tgid;
struct kref refcount;
};
@@ -309,7 +308,7 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync);
+ struct amdgpu_sync *sync, u64 k_job_id);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -350,12 +349,16 @@ struct amdgpu_vm {
/* Memory statistics for this vm, protected by status_lock */
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for either
+ * PDs, PTs or per VM BOs. The state transits are:
+ *
+ * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ */
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
- /* BOs for user mode queues that need a validation */
- struct list_head evicted_user;
-
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@@ -365,18 +368,28 @@ struct amdgpu_vm {
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for BOs which
+ * have their own dma_resv object and not depend on the root PD. Their
+ * state transits are:
+ *
+ * evicted_user or invalidated -> done
+ */
+
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
- /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
- struct list_head pt_freed;
- struct work_struct pt_free_work;
+ /*
+ * This list contains amdgpu_bo_va_mapping objects which have been freed
+ * but not updated in the PTs
+ */
+ struct list_head freed;
/* contains the page directory */
struct amdgpu_vm_bo_base root;
@@ -399,7 +412,7 @@ struct amdgpu_vm {
struct dma_fence *last_unlocked;
unsigned int pasid;
- bool reserved_vmid[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
@@ -440,10 +453,6 @@ struct amdgpu_vm_manager {
unsigned int first_kfd_vmid;
bool concurrent_flush;
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
uint64_t max_pfn;
uint32_t num_level;
uint32_t block_size;
@@ -487,16 +496,14 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid);
-
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -544,11 +551,11 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
@@ -673,4 +680,12 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info);
+
+#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->valids, list)
+#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->invalids, list)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 0c1ef5850a5e..22e2e5b47341 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
*
* @p: see amdgpu_vm_update_params definition
* @sync: sync obj with fences to wait on
+ * @k_job_id: the id for tracing/debug purposes
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync,
+ u64 k_job_id)
{
if (!sync)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index b0bf21682115..f794fb1cc06e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
@@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
if (r)
goto exit;
@@ -547,27 +549,6 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
amdgpu_bo_unref(&entry->bo);
}
-void amdgpu_vm_pt_free_work(struct work_struct *work)
-{
- struct amdgpu_vm_bo_base *entry, *next;
- struct amdgpu_vm *vm;
- LIST_HEAD(pt_freed);
-
- vm = container_of(work, struct amdgpu_vm, pt_free_work);
-
- spin_lock(&vm->status_lock);
- list_splice_init(&vm->pt_freed, &pt_freed);
- spin_unlock(&vm->status_lock);
-
- /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
- amdgpu_bo_reserve(vm->root.bo, true);
-
- list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
- amdgpu_vm_pt_free(entry);
-
- amdgpu_bo_unreserve(vm->root.bo);
-}
-
/**
* amdgpu_vm_pt_free_list - free PD/PT levels
*
@@ -580,19 +561,15 @@ void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
struct amdgpu_vm_update_params *params)
{
struct amdgpu_vm_bo_base *entry, *next;
- struct amdgpu_vm *vm = params->vm;
bool unlocked = params->unlocked;
if (list_empty(&params->tlb_flush_waitlist))
return;
- if (unlocked) {
- spin_lock(&vm->status_lock);
- list_splice_init(&params->tlb_flush_waitlist, &vm->pt_freed);
- spin_unlock(&vm->status_lock);
- schedule_work(&vm->pt_free_work);
- return;
- }
+ /*
+ * unlocked unmap clear page table leaves, warning to free the page entry.
+ */
+ WARN_ON(unlocked);
list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
amdgpu_vm_pt_free(entry);
@@ -900,7 +877,15 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = ((uint64_t)mask + 1) << shift;
+
+ if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
+ /*
+ * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
+ * only clear one entry. Next entry search again for PDE or PTE leave.
+ */
+ entry_end = 1ULL << shift;
+ else
+ entry_end = ((uint64_t)mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 46d9fb433ab2..36805dcfa159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
/* Allocate a new job for @count PTE updates */
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
- unsigned int count)
+ unsigned int count, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
@@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
- ndw * 4, pool, &p->job);
+ ndw * 4, pool, &p->job, k_job_id);
if (r)
return r;
@@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @sync: amdgpu_sync object with fences to wait for
+ * @k_job_id: identifier of the job, for tracing purpose
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync, u64 k_job_id)
{
int r;
- r = amdgpu_vm_sdma_alloc_job(p, 0);
+ r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
if (r)
return r;
@@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_vm_sdma_alloc_job(p, count);
+ r = amdgpu_vm_sdma_alloc_job(p, count,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
index 51cddfa3f1e8..5d26797356a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
}
static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_tlb_fence_get_driver_name,
.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
};
@@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
INIT_WORK(&f->work, amdgpu_tlb_fence_work);
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
- vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+ dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
/* TODO: We probably need a separate wq here */
dma_fence_get(&f->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 121ee17b522b..aa78c2ee9e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 1):
+ return adev->pm.fw_version < 0x0a640500;
+ default:
+ return false;
+ }
+}
+
+static int vpe_get_dpm_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
+}
+
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+ if (fences)
+ goto reschedule;
+
+ if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
+ goto reschedule;
+
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ return;
- if (fences == 0)
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
- else
- schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+reschedule:
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)
@@ -379,9 +405,10 @@ static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
if (ret)
goto out;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vpe.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
if (ret)
goto out;
@@ -435,6 +462,8 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
vpe_ring_stop(vpe);
/* Power off VPE */
@@ -445,10 +474,6 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
static int vpe_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- cancel_delayed_work_sync(&adev->vpe.idle_work);
-
return vpe_hw_fini(ip_block);
}
@@ -874,6 +899,27 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
+static int vpe_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_GATE);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -942,6 +988,7 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.preempt_ib = vpe_ring_preempt_ib,
.begin_use = vpe_ring_begin_use,
.end_use = vpe_ring_end_use,
+ .reset = vpe_ring_reset,
};
static void vpe_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7d26a962f811..9d934c07fa6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -24,11 +24,11 @@
#include <linux/dma-mapping.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_res_cursor.h"
-#include "amdgpu_atomfirmware.h"
#include "atom.h"
#define AMDGPU_MAX_SG_SEGMENT_SIZE (2UL << 30)
@@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
!adev->gmc.vram_vendor)
return 0;
+ if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager))
+ return 0;
+
return attr->mode;
}
@@ -396,43 +399,33 @@ out:
return ret;
}
-static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info)
{
- DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
-}
-
-static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
- return false;
-}
+ struct amdgpu_vram_mgr_resource *vres;
+ struct drm_buddy_block *block;
+ u64 start, size;
+ int ret = -ENOENT;
-static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
- return true;
-}
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
+ list_for_each_entry(block, &vres->blocks, link) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+ if ((start <= address) && (address < (start + size))) {
+ info->start = start;
+ info->size = size;
+ memcpy(&info->task, &vres->task, sizeof(vres->task));
+ ret = 0;
+ goto out;
+ }
+ }
+ }
-static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
-}
+out:
+ mutex_unlock(&mgr->lock);
-static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_resource **res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
- return -ENOSPC;
+ return ret;
}
/**
@@ -463,7 +456,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
int r;
lpfn = (u64)place->lpfn << PAGE_SHIFT;
- if (!lpfn)
+ if (!lpfn || lpfn > man->size)
lpfn = man->size;
fpfn = (u64)place->fpfn << PAGE_SHIFT;
@@ -567,7 +560,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
else
remaining_size -= size;
}
- mutex_unlock(&mgr->lock);
+
+ vres->task.pid = task_pid_nr(current);
+ get_task_comm(vres->task.comm, current);
+ list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
@@ -584,6 +580,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
(u64)vres->base.size,
&vres->blocks);
}
+ mutex_unlock(&mgr->lock);
vres->base.start = 0;
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
@@ -645,12 +642,15 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
uint64_t vis_usage = 0;
mutex_lock(&mgr->lock);
+
+ list_del(&vres->vres_node);
+ memset(&vres->task, 0, sizeof(vres->task));
+
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
- amdgpu_vram_mgr_do_reserve(man);
-
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
+ amdgpu_vram_mgr_do_reserve(man);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
@@ -783,6 +783,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
}
/**
+ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Reset the cleared drm buddy blocks.
+ */
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
+{
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct drm_buddy *mm = &mgr->mm;
+
+ mutex_lock(&mgr->lock);
+ drm_buddy_reset_clear(mm, false);
+ mutex_unlock(&mgr->lock);
+}
+
+/**
* amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
*
* @man: TTM memory type manager
@@ -879,14 +896,6 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
mutex_unlock(&mgr->lock);
}
-static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
- .alloc = amdgpu_dummy_vram_mgr_new,
- .free = amdgpu_dummy_vram_mgr_del,
- .intersects = amdgpu_dummy_vram_mgr_intersects,
- .compatible = amdgpu_dummy_vram_mgr_compatible,
- .debug = amdgpu_dummy_vram_mgr_debug
-};
-
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
@@ -908,24 +917,22 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct ttm_resource_manager *man = &mgr->manager;
int err;
+ man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);
mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ INIT_LIST_HEAD(&mgr->allocated_vres_list);
mgr->default_page_size = PAGE_SIZE;
- if (!adev->gmc.is_app_apu) {
- man->func = &amdgpu_vram_mgr_func;
-
- err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
- if (err)
- return err;
- } else {
- man->func = &amdgpu_dummy_vram_mgr_func;
- DRM_INFO("Setup dummy vram mgr\n");
- }
+ man->func = &amdgpu_vram_mgr_func;
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index b256cbc2bc27..5f5fd9a911c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -35,12 +35,26 @@ struct amdgpu_vram_mgr {
struct list_head reserved_pages;
atomic64_t vis_usage;
u64 default_page_size;
+ struct list_head allocated_vres_list;
+};
+
+struct amdgpu_vres_task {
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+};
+
+struct amdgpu_vram_block_info {
+ u64 start;
+ u64 size;
+ struct amdgpu_vres_task task;
};
struct amdgpu_vram_mgr_resource {
struct ttm_resource base;
struct list_head blocks;
unsigned long flags;
+ struct list_head vres_node;
+ struct amdgpu_vres_task task;
};
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
@@ -66,7 +80,13 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
{
- to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
+ struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res);
+
+ WARN_ON(ares->flags & DRM_BUDDY_CLEARED);
+ ares->flags |= DRM_BUDDY_CLEARED;
}
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index e209b5e101df..1083db8cea2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -27,6 +27,9 @@
#include <drm/drm_drv.h>
#include "../amdxcp/amdgpu_xcp_drv.h"
+static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
+static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
+
static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
{
@@ -117,6 +120,25 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
xcp->valid = true;
}
+static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ int xcp_id)
+{
+ struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t inst_mask;
+ uint64_t uid;
+ int i;
+
+ if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
+ inst_mask) {
+ i = GET_INST(GC, (ffs(inst_mask) - 1));
+ uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
+ AMDGPU_UID_TYPE_XCD, i);
+ if (uid)
+ xcp->unique_id = uid;
+ }
+}
+
int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
@@ -155,6 +177,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
else
xcp_mgr->xcp[i].mem_id = mem_id;
}
+ __amdgpu_xcp_set_unique_id(xcp_mgr, i);
}
xcp_mgr->num_xcps = num_xcps;
@@ -189,7 +212,7 @@ static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
-
+ amdgpu_xcp_sysfs_entries_update(xcp_mgr);
out:
mutex_unlock(&xcp_mgr->xcp_lock);
@@ -215,15 +238,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
}
-int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
{
- int mode;
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ return true;
if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return xcp_mgr->mode;
+ return true;
- if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
+ xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
+ return true;
+
+ return false;
+}
+
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int mode;
+
+ if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
return xcp_mgr->mode;
if (!(flags & AMDGPU_XCP_FL_LOCKED))
@@ -263,9 +298,10 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
if (ret == -ENOSPC) {
dev_warn(adev->dev,
"Skip xcp node #%d when out of drm node resource.", i);
- return 0;
+ ret = 0;
+ goto out;
} else if (ret) {
- return ret;
+ goto out;
}
/* Redirect all IOCTLs to the primary device */
@@ -278,9 +314,14 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
p_ddev->vma_offset_manager = ddev->vma_offset_manager;
p_ddev->driver = &amdgpu_partition_driver;
adev->xcp_mgr->xcp[i].ddev = p_ddev;
+
+ dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
}
+ ret = 0;
+out:
+ amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
- return 0;
+ return ret;
}
int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
@@ -288,6 +329,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
struct amdgpu_xcp_mgr_funcs *xcp_funcs)
{
struct amdgpu_xcp_mgr *xcp_mgr;
+ int i;
if (!xcp_funcs || !xcp_funcs->get_ip_details)
return -EINVAL;
@@ -306,6 +348,8 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
adev->xcp_mgr = xcp_mgr;
+ for (i = 0; i < MAX_XCP; ++i)
+ xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
return amdgpu_xcp_dev_alloc(adev);
}
@@ -382,6 +426,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
+ amdgpu_xcp_drm_dev_free(p_ddev);
}
}
@@ -427,12 +472,229 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
return;
sched = entity->entity.rq->sched;
- if (sched->ready) {
+ if (drm_sched_wqueue_ready(sched)) {
ring = to_amdgpu_ring(entity->entity.rq->sched);
atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
}
}
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds)
+{
+ u32 sel_xcp_id;
+ int i;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+
+ if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
+ u32 least_ref_cnt = ~0;
+
+ fpriv->xcp_id = 0;
+ for (i = 0; i < xcp_mgr->num_xcps; i++) {
+ u32 total_ref_cnt;
+
+ total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
+ if (total_ref_cnt < least_ref_cnt) {
+ fpriv->xcp_id = i;
+ least_ref_cnt = total_ref_cnt;
+ }
+ }
+ }
+ sel_xcp_id = fpriv->xcp_id;
+
+ if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+ *num_scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+ *scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+ atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+ dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
+ } else {
+ dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
+ uint32_t inst_idx,
+ struct amdgpu_ring *ring)
+{
+ int xcp_id;
+ enum AMDGPU_XCP_IP_BLOCK ip_blk;
+ uint32_t inst_mask;
+
+ ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
+ if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
+ return;
+
+ inst_mask = 1 << inst_idx;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ ip_blk = AMDGPU_XCP_GFX;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ip_blk = AMDGPU_XCP_SDMA;
+ break;
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ ip_blk = AMDGPU_XCP_VCN;
+ break;
+ default:
+ dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
+ return;
+ }
+
+ for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+ if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+ ring->xcp_id = xcp_id;
+ dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+ ring->xcp_id);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
+ break;
+ }
+ }
+}
+
+static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int sel_xcp_id)
+{
+ unsigned int *num_gpu_sched;
+
+ num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+ .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+ adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+ .sched[(*num_gpu_sched)++] = &ring->sched;
+ dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
+ ring->name, sel_xcp_id, ring->funcs->type,
+ ring->hw_prio, *num_gpu_sched);
+}
+
+static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+ memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+ }
+
+ if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ ring = adev->rings[i];
+ if (!ring || !ring->sched.ready || ring->no_scheduler)
+ continue;
+
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+ }
+
+ return 0;
+}
+
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
+ else
+ amdgpu_set_xcp_id(adev, ring->me, ring);
+ }
+
+ return amdgpu_xcp_sched_list_update(adev);
+}
+
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ xcp_mgr->supp_xcp_modes = 0;
+
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_QPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 6:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_TPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 4:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 2:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 1:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ /* TODO:
+ * Stop user queues and threads, and make sure GPU is empty of work.
+ */
+
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+ return 0;
+}
+
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+ amdgpu_amdkfd_device_init(xcp_mgr->adev);
+ /* If KFD init failed, return failure */
+ if (!xcp_mgr->adev->kfd.init_complete)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*====================== xcp sysfs - configuration ======================*/
#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
struct amdgpu_xcp_res_details *xcp_res, char *buf) \
@@ -635,7 +897,7 @@ static const struct attribute *xcp_attrs[] = {
NULL,
};
-void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
+static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
{
struct amdgpu_xcp_res_details *xcp_res;
struct amdgpu_xcp_cfg *xcp_cfg;
@@ -703,16 +965,16 @@ err1:
kobject_put(&xcp_cfg->kobj);
}
-void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
+static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
{
struct amdgpu_xcp_res_details *xcp_res;
struct amdgpu_xcp_cfg *xcp_cfg;
int i;
- if (!adev->xcp_mgr)
+ if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
return;
- xcp_cfg = adev->xcp_mgr->xcp_cfg;
+ xcp_cfg = adev->xcp_mgr->xcp_cfg;
for (i = 0; i < xcp_cfg->num_res; i++) {
xcp_res = &xcp_cfg->xcp_res[i];
kobject_put(&xcp_res->kobj);
@@ -722,3 +984,124 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
kobject_put(&xcp_cfg->kobj);
}
+
+/*====================== xcp sysfs - data entries ======================*/
+
+#define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
+
+static ssize_t xcp_metrics_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp *xcp = to_xcp(kobj);
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ ssize_t size;
+
+ xcp_mgr = xcp->xcp_mgr;
+ size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
+ if (size <= 0)
+ return size;
+
+ if (size > PAGE_SIZE)
+ return -ENOSPC;
+
+ return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
+}
+
+static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct amdgpu_xcp *xcp = to_xcp(kobj);
+
+ if (!xcp || !xcp->valid)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
+
+static struct attribute *amdgpu_xcp_attrs[] = {
+ &xcp_sysfs_metrics.attr,
+ NULL,
+};
+
+static const struct attribute_group amdgpu_xcp_attrs_group = {
+ .attrs = amdgpu_xcp_attrs,
+ .is_visible = amdgpu_xcp_attrs_is_visible
+};
+
+static const struct kobj_type xcp_sysfs_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
+{
+ struct amdgpu_xcp *xcp;
+
+ for (n--; n >= 0; n--) {
+ xcp = &xcp_mgr->xcp[n];
+ if (!xcp->ddev || !xcp->valid)
+ continue;
+ sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ kobject_put(&xcp->kobj);
+ }
+}
+
+static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_xcp *xcp;
+ int i, r;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ /* Redirect all IOCTLs to the primary device */
+ xcp = &xcp_mgr->xcp[i];
+ if (!xcp->ddev)
+ break;
+ r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
+ &xcp->ddev->dev->kobj, "xcp");
+ if (r)
+ goto out;
+
+ r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ if (r)
+ goto out;
+ }
+
+ return;
+out:
+ kobject_put(&xcp->kobj);
+}
+
+static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_xcp *xcp;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ /* Redirect all IOCTLs to the primary device */
+ xcp = &xcp_mgr->xcp[i];
+ if (!xcp->ddev)
+ continue;
+ sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ }
+
+ return;
+}
+
+void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
+{
+ if (!adev->xcp_mgr)
+ return;
+
+ amdgpu_xcp_cfg_sysfs_init(adev);
+
+ return;
+}
+
+void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!adev->xcp_mgr)
+ return;
+ amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
+ amdgpu_xcp_cfg_sysfs_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index b63f53242c57..1928d9e224fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -39,6 +39,8 @@
#define AMDGPU_XCP_NO_PARTITION (~0)
+#define AMDGPU_XCP_OPS_KFD (1 << 0)
+
struct amdgpu_fpriv;
enum AMDGPU_XCP_IP_BLOCK {
@@ -108,6 +110,9 @@ struct amdgpu_xcp {
struct drm_driver *driver;
struct drm_vma_offset_manager *vma_offset_manager;
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ struct kobject kobj;
+ uint64_t unique_id;
};
struct amdgpu_xcp_mgr {
@@ -142,10 +147,6 @@ struct amdgpu_xcp_mgr_funcs {
int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
- int (*select_scheds)(struct amdgpu_device *adev,
- u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds);
- int (*update_partition_sched_list)(struct amdgpu_device *adev);
};
int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -174,18 +175,17 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
struct drm_file *file_priv);
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
-
-void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev);
-void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev);
-
-#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->select_scheds ? \
- (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT)
-#define amdgpu_xcp_update_partition_sched_list(adev) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
- (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds);
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr);
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 74b4349e345a..aad530c46a9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -294,17 +294,53 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
};
+int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num)
+{
+ int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 };
+
+ if (adev->gmc.xgmi.num_physical_nodes <= 1)
+ return -EINVAL;
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ if (link_num < ARRAY_SIZE(link_map_6_4_x))
+ return link_map_6_4_x[link_num];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
{
- const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070;
- const int xgmi_inst = 2;
- u32 link_inst;
+ const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 };
+ const u32 smn_xgmi_6_4_1_pcs_state_hist1[2] = { 0x12100070,
+ 0x11b00070 };
+ u32 i, n;
u64 addr;
- link_inst = global_link_num % xgmi_inst;
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ n = ARRAY_SIZE(smn_xgmi_6_4_pcs_state_hist1);
+ addr = smn_xgmi_6_4_pcs_state_hist1[global_link_num % n];
+ break;
+ case IP_VERSION(6, 4, 1):
+ n = ARRAY_SIZE(smn_xgmi_6_4_1_pcs_state_hist1);
+ addr = smn_xgmi_6_4_1_pcs_state_hist1[global_link_num % n];
+ break;
+ default:
+ return U32_MAX;
+ }
+
+ i = global_link_num / n;
+
+ if (!(adev->aid_mask & BIT(i)))
+ return U32_MAX;
- addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) +
- adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst);
+ addr += adev->asic_funcs->encode_ext_smn_addressing(i);
return RREG32_PCIE_EXT(addr);
}
@@ -313,8 +349,12 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
{
u32 xgmi_state_reg_val;
+ if (adev->gmc.xgmi.num_physical_nodes <= 1)
+ return -EINVAL;
+
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_state_reg_val = xgmi_v6_4_get_link_status(adev, global_link_num);
break;
default:
@@ -818,28 +858,71 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
* num_hops[2:0] = number of hops
*/
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
+ struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
uint8_t num_hops_mask = 0x7;
int i;
+ if (!adev->gmc.xgmi.supported)
+ return 0;
+
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
return top->nodes[i].num_hops & num_hops_mask;
- return -EINVAL;
+
+ dev_err(adev->dev, "Failed to get xgmi hops count for peer %d.\n",
+ peer_adev->gmc.xgmi.physical_node_id);
+
+ return 0;
}
-int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
+int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
+ enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
+ uint32_t *min_bw, uint32_t *max_bw)
{
- struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
- int i;
+ bool peer_mode = bw_mode == AMDGPU_XGMI_BW_MODE_PER_PEER;
+ int unit_scale = bw_unit == AMDGPU_XGMI_BW_UNIT_MBYTES ? 1000 : 1;
+ int num_lanes = adev->gmc.xgmi.max_width;
+ int speed = adev->gmc.xgmi.max_speed;
+ int num_links = !peer_mode ? 1 : -1;
- for (i = 0 ; i < top->num_nodes; ++i)
- if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
- return top->nodes[i].num_links;
- return -EINVAL;
+ if (!(min_bw && max_bw))
+ return -EINVAL;
+
+ *min_bw = 0;
+ *max_bw = 0;
+
+ if (!adev->gmc.xgmi.supported)
+ return -ENODATA;
+
+ if (peer_mode && !peer_adev)
+ return -EINVAL;
+
+ if (peer_mode) {
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0 ; i < top->num_nodes; ++i) {
+ if (top->nodes[i].node_id != peer_adev->gmc.xgmi.node_id)
+ continue;
+
+ num_links = top->nodes[i].num_links;
+ break;
+ }
+ }
+
+ if (num_links == -1) {
+ dev_err(adev->dev, "Failed to get number of xgmi links for peer %d.\n",
+ peer_adev->gmc.xgmi.physical_node_id);
+ } else if (num_links) {
+ int per_link_bw = (speed * num_lanes * unit_scale)/BITS_PER_BYTE;
+
+ *min_bw = per_link_bw;
+ *max_bw = num_links * per_link_bw;
+ }
+
+ return 0;
}
bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
@@ -885,28 +968,6 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf
return 0;
}
-static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
-{
- struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info;
- struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info;
-
- for (int i = 0; i < peer_info->num_nodes; i++) {
- if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) {
- for (int j = 0; j < top_info->num_nodes; j++) {
- if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) {
- peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops;
- peer_info->nodes[i].is_sharing_enabled =
- top_info->nodes[j].is_sharing_enabled;
- peer_info->nodes[i].num_links =
- top_info->nodes[j].num_links;
- return;
- }
- }
- }
- }
-}
-
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
struct psp_xgmi_topology_info *top_info;
@@ -992,11 +1053,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
/* To do: continue with some node failed or disable the whole hive*/
goto exit_unlock;
}
-
- /* fill the topology info for peers instead of getting from PSP */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- amdgpu_xgmi_fill_topology_info(adev, tmp_adev);
- }
} else {
/* get latest topology info for each device from psp */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
@@ -1123,11 +1179,13 @@ static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_ban
if (ext_error_code != 0 && ext_error_code != 9)
count = 0ULL;
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count);
break;
case ACA_SMU_TYPE_CE:
count = ext_error_code == 6 ? count : 0ULL;
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, count);
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count);
break;
default:
return -EINVAL;
@@ -1162,6 +1220,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL,
&xgmi_v6_4_0_aca_info, NULL);
if (r)
@@ -1221,6 +1280,7 @@ static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++)
pcs_clear_status(adev,
xgmi3x16_pcs_err_status_reg_v6_4[i]);
@@ -1255,6 +1315,7 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_v6_4_0_reset_ras_error_count(adev);
break;
default:
@@ -1280,7 +1341,9 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
IP_VERSION(6, 1, 0) ||
amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
- IP_VERSION(6, 4, 0)) {
+ IP_VERSION(6, 4, 0) ||
+ amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
+ IP_VERSION(6, 4, 1)) {
pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
} else {
@@ -1388,6 +1451,7 @@ static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
/* check xgmi3x16 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) {
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]);
@@ -1484,6 +1548,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
{
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status);
break;
default:
@@ -1671,3 +1736,43 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
return r;
}
+
+bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev)
+{
+ return (amdgpu_use_xgmi_p2p && adev != bo_adev &&
+ adev->gmc.xgmi.hive_id &&
+ adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
+
+void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ /* 25 GT/s */
+ adev->gmc.xgmi.max_speed = 25;
+ adev->gmc.xgmi.max_width = 16;
+ break;
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ /* 32 GT/s */
+ adev->gmc.xgmi.max_speed = 32;
+ adev->gmc.xgmi.max_width = 16;
+ break;
+ default:
+ break;
+ }
+}
+
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width)
+{
+ adev->gmc.xgmi.max_speed = max_speed;
+ adev->gmc.xgmi.max_width = max_width;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index d1282b4c6348..5f36aff17e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -23,7 +23,6 @@
#define __AMDGPU_XGMI_H__
#include <drm/task_barrier.h>
-#include "amdgpu_psp.h"
#include "amdgpu_ras.h"
struct amdgpu_hive_info {
@@ -55,29 +54,63 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift;
};
-extern struct amdgpu_xgmi_ras xgmi_ras;
+/**
+ * Bandwidth range reporting comes in two modes.
+ *
+ * PER_LINK - range for any xgmi link
+ * PER_PEER - range of max of single xgmi link to max of multiple links based on source peer
+ */
+enum amdgpu_xgmi_bw_mode {
+ AMDGPU_XGMI_BW_MODE_PER_LINK = 0,
+ AMDGPU_XGMI_BW_MODE_PER_PEER
+};
+
+enum amdgpu_xgmi_bw_unit {
+ AMDGPU_XGMI_BW_UNIT_GBYTES = 0,
+ AMDGPU_XGMI_BW_UNIT_MBYTES
+};
+
+struct amdgpu_xgmi_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+extern struct amdgpu_xgmi_ras xgmi_ras;
+
+struct amdgpu_xgmi {
+ /* from psp */
+ u64 node_id;
+ u64 hive_id;
+ /* fixed per family */
+ u64 node_segment_size;
+ /* physical node (0-3) */
+ unsigned physical_node_id;
+ /* number of nodes (0-4) */
+ unsigned num_physical_nodes;
+ /* gpu list in the same hive */
+ struct list_head head;
+ bool supported;
+ struct ras_common_if *ras_if;
+ bool connected_to_cpu;
+ struct amdgpu_xgmi_ras *ras;
+ uint16_t max_speed;
+ uint8_t max_width;
+};
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
-int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev);
-int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
+ enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
+ uint32_t *min_bw, uint32_t *max_bw);
bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
-static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
- struct amdgpu_device *bo_adev)
-{
- return (amdgpu_use_xgmi_p2p &&
- adev != bo_adev &&
- adev->gmc.xgmi.hive_id &&
- adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
-}
+bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev);
int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev);
@@ -86,5 +119,15 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
int req_nps_mode);
int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev,
int global_link_num);
+int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num);
+
+void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
+uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width);
+
+/* Cleanup macro for use with __free(xgmi_put_hive) */
+DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T))
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index b4f9c2f4e92c..3cdb1e0eca37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -23,26 +23,84 @@
#ifndef AMDGV_SRIOV_MSG__H_
#define AMDGV_SRIOV_MSG__H_
-/* unit in kilobytes */
-#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
-#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
-#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
-#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
-#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
-#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2
-#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64
+#define AMD_SRIOV_MSG_SIZE_KB 1
+
/*
- * layout
+ * layout v1
* 0 64KB 65KB 66KB 68KB 132KB
* | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
* | 64KB | 1KB | 1KB | 2KB | 64KB | ...
*/
-#define AMD_SRIOV_MSG_SIZE_KB 1
-#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
-#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB)
+/*
+ * layout v2 (offsets are dynamically allocated and the offsets below are examples)
+ * 0 1KB 64KB 65KB 66KB 68KB 132KB
+ * | INITD_H | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
+ * | 1KB | 64KB | 1KB | 1KB | 2KB | 64KB | ...
+ *
+ * Note: PF2VF + VF2PF + Bad Page = DataExchange region (allocated contiguously)
+ */
+
+/* v1 layout sizes */
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1 2
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 + AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+
+/* v1 offsets */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET_V1 0
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1
+#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+#define AMD_SRIOV_MSG_INIT_DATA_TOT_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 + AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1)
+
+enum amd_sriov_crit_region_version {
+ GPU_CRIT_REGION_V1 = 1,
+ GPU_CRIT_REGION_V2 = 2,
+};
+
+/* v2 layout offset enum (in order of allocation) */
+enum amd_sriov_msg_table_id_enum {
+ AMD_SRIOV_MSG_IPD_TABLE_ID = 0,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID,
+ AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID,
+ AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID,
+ AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID,
+ AMD_SRIOV_MSG_INITD_H_TABLE_ID,
+ AMD_SRIOV_MSG_MAX_TABLE_ID,
+};
+
+struct amd_sriov_msg_init_data_header {
+ char signature[4]; /* "INDA" */
+ uint32_t version;
+ uint32_t checksum;
+ uint32_t initdata_offset; /* 0 */
+ uint32_t initdata_size_in_kb; /* 5MB */
+ uint32_t valid_tables;
+ uint32_t vbios_img_offset;
+ uint32_t vbios_img_size_in_kb;
+ uint32_t dataexchange_offset;
+ uint32_t dataexchange_size_in_kb;
+ uint32_t ras_tele_info_offset;
+ uint32_t ras_tele_info_size_in_kb;
+ uint32_t ip_discovery_offset;
+ uint32_t ip_discovery_size_in_kb;
+ uint32_t bad_page_info_offset;
+ uint32_t bad_page_size_in_kb;
+ uint32_t reserved[8];
+};
/*
* PF2VF history log:
@@ -97,21 +155,25 @@ union amd_sriov_msg_feature_flags {
uint32_t pp_one_vf_mode : 1;
uint32_t reg_indirect_acc : 1;
uint32_t av1_support : 1;
- uint32_t vcn_rb_decouple : 1;
+ uint32_t vcn_rb_decouple : 1;
uint32_t mes_info_dump_enable : 1;
uint32_t ras_caps : 1;
uint32_t ras_telemetry : 1;
- uint32_t reserved : 21;
+ uint32_t ras_cper : 1;
+ uint32_t xgmi_ta_ext_peer_link : 1;
+ uint32_t reserved : 19;
} flags;
uint32_t all;
};
union amd_sriov_reg_access_flags {
struct {
- uint32_t vf_reg_access_ih : 1;
- uint32_t vf_reg_access_mmhub : 1;
- uint32_t vf_reg_access_gc : 1;
- uint32_t reserved : 29;
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t vf_reg_access_l1_tlb_cntl : 1;
+ uint32_t vf_reg_access_sq_config : 1;
+ uint32_t reserved : 27;
} flags;
uint32_t all;
};
@@ -137,8 +199,9 @@ union amd_sriov_ras_caps {
uint64_t block_jpeg : 1;
uint64_t block_ih : 1;
uint64_t block_mpio : 1;
+ uint64_t block_mmsch : 1;
uint64_t poison_propogation_mode : 1;
- uint64_t reserved : 44;
+ uint64_t reserved : 43;
} bits;
uint64_t all;
};
@@ -328,21 +391,29 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_READY_TO_RESET = 201,
MB_REQ_MSG_RAS_POISON = 202,
MB_REQ_RAS_ERROR_COUNT = 203,
+ MB_REQ_RAS_CPER_DUMP = 204,
+ MB_REQ_RAS_BAD_PAGES = 205,
};
/* mailbox message send from host to guest */
enum amd_sriov_mailbox_response_message {
- MB_RES_MSG_CLR_MSG_BUF = 0,
- MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
- MB_RES_MSG_FLR_NOTIFICATION,
- MB_RES_MSG_FLR_NOTIFICATION_COMPLETION,
- MB_RES_MSG_SUCCESS,
- MB_RES_MSG_FAIL,
- MB_RES_MSG_QUERY_ALIVE,
- MB_RES_MSG_GPU_INIT_DATA_READY,
- MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
-
- MB_RES_MSG_TEXT_MESSAGE = 255
+ MB_RES_MSG_CLR_MSG_BUF = 0,
+ MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
+ MB_RES_MSG_FLR_NOTIFICATION = 2,
+ MB_RES_MSG_FLR_NOTIFICATION_COMPLETION = 3,
+ MB_RES_MSG_SUCCESS = 4,
+ MB_RES_MSG_FAIL = 5,
+ MB_RES_MSG_QUERY_ALIVE = 6,
+ MB_RES_MSG_GPU_INIT_DATA_READY = 7,
+ MB_RES_MSG_RAS_POISON_READY = 8,
+ MB_RES_MSG_PF_SOFT_FLR_NOTIFICATION = 9,
+ MB_RES_MSG_GPU_RMA = 10,
+ MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
+ MB_REQ_RAS_CPER_DUMP_READY = 14,
+ MB_RES_MSG_RAS_BAD_PAGES_READY = 15,
+ MB_RES_MSG_RAS_BAD_PAGES_NOTIFICATION = 16,
+ MB_RES_MSG_UNRECOV_ERR_NOTIFICATION = 17,
+ MB_RES_MSG_TEXT_MESSAGE = 255
};
enum amd_sriov_ras_telemetry_gpu_block {
@@ -386,11 +457,25 @@ struct amd_sriov_ras_telemetry_error_count {
} block[RAS_TELEMETRY_GPU_BLOCK_COUNT];
};
+struct amd_sriov_ras_cper_dump {
+ uint32_t more;
+ uint64_t overflow_count;
+ uint64_t count;
+ uint64_t wptr;
+ uint32_t buf[];
+};
+
+struct amd_sriov_ras_chk_criti {
+ uint32_t hit;
+};
+
struct amdsriov_ras_telemetry {
struct amd_sriov_ras_telemetry_header header;
union {
struct amd_sriov_ras_telemetry_error_count error_count;
+ struct amd_sriov_ras_cper_dump cper_dump;
+ struct amd_sriov_ras_chk_criti chk_criti;
} body;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index e157d6d857b6..f9e2edf5260b 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -29,12 +29,11 @@
#include "gfx_v9_4_3.h"
#include "gfxhub_v1_2.h"
#include "sdma_v4_4_2.h"
+#include "amdgpu_ip.h"
#define XCP_INST_MASK(num_inst, xcp_id) \
(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
-#define AMDGPU_XCP_OPS_KFD (1 << 0)
-
void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
{
int i;
@@ -62,233 +61,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
-static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
-{
- return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
-}
-
-static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
- uint32_t inst_idx, struct amdgpu_ring *ring)
-{
- int xcp_id;
- enum AMDGPU_XCP_IP_BLOCK ip_blk;
- uint32_t inst_mask;
-
- ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
- if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return;
-
- inst_mask = 1 << inst_idx;
-
- switch (ring->funcs->type) {
- case AMDGPU_HW_IP_GFX:
- case AMDGPU_RING_TYPE_COMPUTE:
- case AMDGPU_RING_TYPE_KIQ:
- ip_blk = AMDGPU_XCP_GFX;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ip_blk = AMDGPU_XCP_SDMA;
- break;
- case AMDGPU_RING_TYPE_VCN_ENC:
- case AMDGPU_RING_TYPE_VCN_JPEG:
- ip_blk = AMDGPU_XCP_VCN;
- break;
- default:
- DRM_ERROR("Not support ring type %d!", ring->funcs->type);
- return;
- }
-
- for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
- if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
- ring->xcp_id = xcp_id;
- dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
- ring->xcp_id);
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
- break;
- }
- }
-}
-
-static void aqua_vanjaram_xcp_gpu_sched_update(
- struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- unsigned int sel_xcp_id)
-{
- unsigned int *num_gpu_sched;
-
- num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
- .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
- adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
- .sched[(*num_gpu_sched)++] = &ring->sched;
- DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
- sel_xcp_id, ring->funcs->type,
- ring->hw_prio, *num_gpu_sched);
-}
-
-static int aqua_vanjaram_xcp_sched_list_update(
- struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring;
- int i;
-
- for (i = 0; i < MAX_XCP; i++) {
- atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
- memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
- }
-
- if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return 0;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- ring = adev->rings[i];
- if (!ring || !ring->sched.ready || ring->no_scheduler)
- continue;
-
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
-
- /* VCN may be shared by two partitions under CPX MODE in certain
- * configs.
- */
- if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- aqua_vanjaram_xcp_vcn_shared(adev))
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
- }
-
- return 0;
-}
-
-static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->num_rings; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
- ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
- aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
- else
- aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
- }
-
- return aqua_vanjaram_xcp_sched_list_update(adev);
-}
-
-static int aqua_vanjaram_select_scheds(
- struct amdgpu_device *adev,
- u32 hw_ip,
- u32 hw_prio,
- struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds,
- struct drm_gpu_scheduler ***scheds)
-{
- u32 sel_xcp_id;
- int i;
-
- if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
- u32 least_ref_cnt = ~0;
-
- fpriv->xcp_id = 0;
- for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
- u32 total_ref_cnt;
-
- total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
- if (total_ref_cnt < least_ref_cnt) {
- fpriv->xcp_id = i;
- least_ref_cnt = total_ref_cnt;
- }
- }
- }
- sel_xcp_id = fpriv->xcp_id;
-
- if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
- *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
- *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
- atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
- DRM_DEBUG("Selected partition #%d", sel_xcp_id);
- } else {
- DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
- return -ENOENT;
- }
-
- return 0;
-}
-
-static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- int8_t inst)
-{
- int8_t dev_inst;
-
- switch (block) {
- case GC_HWIP:
- case SDMA0_HWIP:
- /* Both JPEG and VCN as JPEG is only alias of VCN */
- case VCN_HWIP:
- dev_inst = adev->ip_map.dev_inst[block][inst];
- break;
- default:
- /* For rest of the IPs, no look up required.
- * Assume 'logical instance == physical instance' for all configs. */
- dev_inst = inst;
- break;
- }
-
- return dev_inst;
-}
-
-static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- uint32_t mask)
-{
- uint32_t dev_mask = 0;
- int8_t log_inst, dev_inst;
-
- while (mask) {
- log_inst = ffs(mask) - 1;
- dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
- dev_mask |= (1 << dev_inst);
- mask &= ~(1 << log_inst);
- }
-
- return dev_mask;
-}
-
-static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type ip_block,
- uint32_t inst_mask)
-{
- int l = 0, i;
-
- while (inst_mask) {
- i = ffs(inst_mask) - 1;
- adev->ip_map.dev_inst[ip_block][l++] = i;
- inst_mask &= ~(1 << i);
- }
- for (; l < HWIP_MAX_INSTANCE; l++)
- adev->ip_map.dev_inst[ip_block][l] = -1;
-}
-
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
-{
- u32 ip_map[][2] = {
- { GC_HWIP, adev->gfx.xcc_mask },
- { SDMA0_HWIP, adev->sdma.sdma_mask },
- { VCN_HWIP, adev->vcn.inst_mask },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
- aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
-
- adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
- adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
-}
-
/* Fixed pattern for smn addressing on different AIDs:
* bit[34]: indicate cross AID access
* bit[33:32]: indicate target AID id
@@ -352,11 +124,14 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->nbio.funcs->get_compute_partition_mode) {
mode = adev->nbio.funcs->get_compute_partition_mode(adev);
- if (mode != derv_mode)
+ if (mode != derv_mode) {
dev_warn(
adev->dev,
"Mismatch in compute partition mode - reported : %d derived : %d",
mode, derv_mode);
+ if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+ amdgpu_device_bus_status_check(adev);
+ }
}
return mode;
@@ -447,52 +222,74 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
return 0;
}
-static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
- int mode,
- struct amdgpu_xcp_cfg *xcp_cfg)
+static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int px_mode, int *num_xcp,
+ uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int max_res[AMDGPU_XCP_RES_MAX] = {};
- bool res_lt_xcp;
- int num_xcp, i;
- u16 nps_modes;
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
- if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
- max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
- max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
- max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
- max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
-
- switch (mode) {
+ switch (px_mode) {
case AMDGPU_SPX_PARTITION_MODE:
- num_xcp = 1;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ *num_xcp = 1;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
break;
case AMDGPU_DPX_PARTITION_MODE:
- num_xcp = 2;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ *num_xcp = 2;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_TPX_PARTITION_MODE:
- num_xcp = 3;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 3;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_QPX_PARTITION_MODE:
- num_xcp = 4;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 4;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
- num_xcp = NUM_XCC(adev->gfx.xcc_mask);
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = NUM_XCC(adev->gfx.xcc_mask);
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
return -EINVAL;
}
+ return 0;
+}
+
+static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int max_res[AMDGPU_XCP_RES_MAX] = {};
+ bool res_lt_xcp;
+ int num_xcp, i, r;
+ u16 nps_modes;
+
+ if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ return -EINVAL;
+
+ max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
+ max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
+ max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
+ max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
+
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
+ if (r)
+ return r;
+
xcp_cfg->compatible_nps_modes =
(adev->gmc.supported_nps_modes & nps_modes);
xcp_cfg->num_res = ARRAY_SIZE(max_res);
@@ -541,27 +338,31 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
enum amdgpu_gfx_partition mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int num_xcc, num_xccs_per_xcp;
+ int num_xcc, num_xccs_per_xcp, r;
+ int num_xcp, nps_mode;
+ u16 supp_nps_modes;
+ bool comp_mode;
+
+ nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
+ &supp_nps_modes);
+ if (r)
+ return false;
+ comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
+ return comp_mode && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
+ return comp_mode && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 3) &&
- ((num_xcc % 3) == 0);
+ return comp_mode && ((num_xcc % 3) == 0);
case AMDGPU_QPX_PARTITION_MODE:
num_xccs_per_xcp = num_xcc / 4;
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 4) &&
- (num_xccs_per_xcp >= 2);
+ return comp_mode && (num_xccs_per_xcp >= 2);
case AMDGPU_CPX_PARTITION_MODE:
- return ((num_xcc > 1) &&
- (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
- (num_xcc % adev->gmc.num_mem_partitions) == 0);
+ return comp_mode && (num_xcc > 1);
default:
return false;
}
@@ -569,72 +370,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return false;
}
-static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- /* TODO:
- * Stop user queues and threads, and make sure GPU is empty of work.
- */
-
- if (flags & AMDGPU_XCP_OPS_KFD)
- amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
-
- return 0;
-}
-
-static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- int ret = 0;
-
- if (flags & AMDGPU_XCP_OPS_KFD) {
- amdgpu_amdkfd_device_probe(xcp_mgr->adev);
- amdgpu_amdkfd_device_init(xcp_mgr->adev);
- /* If KFD init failed, return failure */
- if (!xcp_mgr->adev->kfd.init_complete)
- ret = -EIO;
- }
-
- return ret;
-}
-
-static void
-__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
-{
- struct amdgpu_device *adev = xcp_mgr->adev;
-
- xcp_mgr->supp_xcp_modes = 0;
-
- switch (NUM_XCC(adev->gfx.xcc_mask)) {
- case 8:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_QPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 6:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_TPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 4:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- /* this seems only existing in emulation phase */
- case 2:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 1:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
-
- default:
- break;
- }
-}
-
static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
int mode;
@@ -672,7 +407,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return -EINVAL;
}
- if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
+ if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
+ !adev->in_suspend)
flags |= AMDGPU_XCP_OPS_KFD;
if (flags & AMDGPU_XCP_OPS_KFD) {
@@ -681,7 +417,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
- ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
if (ret)
goto unlock;
@@ -694,7 +430,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcps = num_xcc / num_xcc_per_xcp;
amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
- ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
if (!ret)
__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
unlock:
@@ -777,9 +513,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
- .select_scheds = &aqua_vanjaram_select_scheds,
- .update_partition_sched_list =
- &aqua_vanjaram_update_partition_sched_list
};
static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
@@ -794,7 +527,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
if (ret)
return ret;
- __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
+ amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
/* TODO: Default memory node affinity init */
return ret;
@@ -834,7 +567,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
if (ret)
return ret;
- aqua_vanjaram_ip_map_init(adev);
+ amdgpu_ip_map_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 81d195d366ce..7a063e44d429 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1246,6 +1246,10 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.last_jump_jiffies = 0;
if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
+ if (!ectx.ws) {
+ ret = -ENOMEM;
+ goto free;
+ }
ectx.ws_size = ws;
} else {
ectx.ws = NULL;
@@ -1444,6 +1448,7 @@ static void atom_get_vbios_pn(struct atom_context *ctx)
if (vbios_str == NULL)
vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
}
+ OPTIMIZER_HIDE_VAR(vbios_str);
if (vbios_str != NULL && *vbios_str == 0)
vbios_str++;
@@ -1493,6 +1498,28 @@ static void atom_get_vbios_version(struct atom_context *ctx)
}
}
+static void atom_get_vbios_build(struct atom_context *ctx)
+{
+ unsigned char *atom_rom_hdr;
+ unsigned char *str;
+ uint16_t base, len;
+
+ base = CU16(ATOM_ROM_TABLE_PTR);
+ atom_rom_hdr = CSTR(base);
+
+ str = CSTR(CU16(base + ATOM_ROM_CFG_PTR));
+ /* Skip config string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+ /* Skip change list string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+
+ len = min(atom_rom_hdr - str, STRLEN_NORMAL);
+ if (len)
+ strscpy(ctx->build_num, str, len);
+}
+
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
@@ -1553,6 +1580,7 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
atom_get_vbios_pn(ctx);
atom_get_vbios_date(ctx);
atom_get_vbios_version(ctx);
+ atom_get_vbios_build(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index b807f6639a4c..825ff28731f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -37,6 +37,7 @@ struct drm_device;
#define ATOM_ROM_MAGIC "ATOM"
#define ATOM_ROM_MAGIC_PTR 4
+#define ATOM_ROM_CFG_PTR 0xC
#define ATOM_ROM_MSG_PTR 0x10
#define ATOM_ROM_CMD_PTR 0x1E
#define ATOM_ROM_DATA_PTR 0x20
@@ -151,6 +152,7 @@ struct atom_context {
uint32_t version;
uint8_t vbios_ver_str[STRLEN_NORMAL];
uint8_t date[STRLEN_NORMAL];
+ uint8_t build_num[STRLEN_NORMAL];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 622634c08c7b..492813ab1b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -430,7 +430,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
}
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
@@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect
u8 link_status[DP_LINK_STATUS_SIZE];
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
index f59d85eaddf0..3e24acf8133f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
@@ -32,7 +32,7 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode);
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector);
void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 08d6787893b3..9cd63b4177bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2148,7 +2148,7 @@ static int cik_common_resume(struct amdgpu_ip_block *ip_block)
return cik_common_hw_init(ip_block);
}
-static bool cik_common_is_idle(void *handle)
+static bool cik_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 444563486769..876a3256dba4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -156,6 +156,9 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
/* enable irqs */
cik_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -192,6 +195,9 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
/* When a ring buffer overflow happen start parsing interrupt
@@ -211,6 +217,8 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
}
+
+out:
return (wptr & ih->ptr_mask);
}
@@ -306,6 +314,10 @@ static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
@@ -345,9 +357,9 @@ static int cik_ih_resume(struct amdgpu_ip_block *ip_block)
return cik_ih_hw_init(ip_block);
}
-static bool cik_ih_is_idle(void *handle)
+static bool cik_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index d9bd8f3f17e2..9e8715b4739d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -56,6 +56,8 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
@@ -67,9 +69,6 @@ MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
-u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
-
-
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
@@ -993,14 +992,9 @@ static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block)
static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
{
- int r;
struct amdgpu_device *adev = ip_block->adev;
- r = cik_sdma_start(adev);
- if (r)
- return r;
-
- return r;
+ return cik_sdma_start(adev);
}
static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1025,9 +1019,9 @@ static int cik_sdma_resume(struct amdgpu_ip_block *ip_block)
return cik_sdma_hw_init(ip_block);
}
-static bool cik_sdma_is_idle(void *handle)
+static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -1040,14 +1034,10 @@ static bool cik_sdma_is_idle(void *handle)
static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
- SRBM_STATUS2__SDMA1_BUSY_MASK);
-
- if (!tmp)
+ if (cik_sdma_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 06088d52d81c..8aca4f2734f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -51,8 +51,14 @@
#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
-#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
-#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
+/* audio endpt instance offsets */
+#define AUD0_REGISTER_OFFSET (0x1780 - 0x1780)
+#define AUD1_REGISTER_OFFSET (0x1786 - 0x1780)
+#define AUD2_REGISTER_OFFSET (0x178c - 0x1780)
+#define AUD3_REGISTER_OFFSET (0x1792 - 0x1780)
+#define AUD4_REGISTER_OFFSET (0x1798 - 0x1780)
+#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
+#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
new file mode 100644
index 000000000000..ed1e25661706
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "cyan_skillfish_ip_offset.h"
+
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke needed by driver */
+ uint32_t i;
+
+ adev->gfx.xcc_mask = 1;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 82586b76aeda..bc7a2e06ab5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -157,6 +157,9 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
cz_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -194,6 +197,9 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -297,6 +303,10 @@ static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
@@ -341,9 +351,9 @@ static int cz_ih_resume(struct amdgpu_ip_block *ip_block)
return cz_ih_hw_init(ip_block);
}
-static bool cz_ih_is_idle(void *handle)
+static bool cz_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c5e3d2251b18..72ca6538b2e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1141,8 +1141,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
@@ -1462,17 +1461,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev)
static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
@@ -2970,7 +2964,7 @@ static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v10_0_is_idle(void *handle)
+static bool dce_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -3075,7 +3069,7 @@ static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3227,7 +3221,7 @@ static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
deleted file mode 100644
index ea42a4472bf6..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ /dev/null
@@ -1,3824 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drm_edid.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_modeset_helper.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_vblank.h>
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_i2c.h"
-#include "vid.h"
-#include "atom.h"
-#include "amdgpu_atombios.h"
-#include "atombios_crtc.h"
-#include "atombios_encoders.h"
-#include "amdgpu_pll.h"
-#include "amdgpu_connectors.h"
-#include "amdgpu_display.h"
-#include "dce_v11_0.h"
-
-#include "dce/dce_11_0_d.h"
-#include "dce/dce_11_0_sh_mask.h"
-#include "dce/dce_11_0_enum.h"
-#include "oss/oss_3_0_d.h"
-#include "oss/oss_3_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "ivsrcid/ivsrcid_vislands30.h"
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
-
-static const u32 crtc_offsets[] =
-{
- CRTC0_REGISTER_OFFSET,
- CRTC1_REGISTER_OFFSET,
- CRTC2_REGISTER_OFFSET,
- CRTC3_REGISTER_OFFSET,
- CRTC4_REGISTER_OFFSET,
- CRTC5_REGISTER_OFFSET,
- CRTC6_REGISTER_OFFSET
-};
-
-static const u32 hpd_offsets[] =
-{
- HPD0_REGISTER_OFFSET,
- HPD1_REGISTER_OFFSET,
- HPD2_REGISTER_OFFSET,
- HPD3_REGISTER_OFFSET,
- HPD4_REGISTER_OFFSET,
- HPD5_REGISTER_OFFSET
-};
-
-static const uint32_t dig_offsets[] = {
- DIG0_REGISTER_OFFSET,
- DIG1_REGISTER_OFFSET,
- DIG2_REGISTER_OFFSET,
- DIG3_REGISTER_OFFSET,
- DIG4_REGISTER_OFFSET,
- DIG5_REGISTER_OFFSET,
- DIG6_REGISTER_OFFSET,
- DIG7_REGISTER_OFFSET,
- DIG8_REGISTER_OFFSET
-};
-
-static const struct {
- uint32_t reg;
- uint32_t vblank;
- uint32_t vline;
- uint32_t hpd;
-
-} interrupt_status_offsets[] = { {
- .reg = mmDISP_INTERRUPT_STATUS,
- .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
-} };
-
-static const u32 cz_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14300000,
-};
-
-static const u32 cz_mgcg_cgcg_init[] =
-{
- mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
- mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
-};
-
-static const u32 stoney_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14302000,
-};
-
-static const u32 polaris11_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_DEBUG1, 0xffffffff, 0x00000008,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static const u32 polaris10_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- amdgpu_device_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_device_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
- break;
- case CHIP_STONEY:
- amdgpu_device_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- amdgpu_device_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- ARRAY_SIZE(polaris11_golden_settings_a11));
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- amdgpu_device_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- ARRAY_SIZE(polaris10_golden_settings_a11));
- break;
- default:
- break;
- }
-}
-
-static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-
- return r;
-}
-
-static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-}
-
-static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
-{
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Enable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_get(adev, &adev->pageflip_irq, i);
-}
-
-static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Disable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_put(adev, &adev->pageflip_irq, i);
-}
-
-/**
- * dce_v11_0_page_flip - pageflip callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc_id: crtc to cleanup pageflip on
- * @crtc_base: new address of the crtc (GPU MC address)
- * @async: asynchronous flip
- *
- * Triggers the actual pageflip by updating the primary
- * surface base address.
- */
-static void dce_v11_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base, bool async)
-{
- struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
- struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
- u32 tmp;
-
- /* flip immediate for async, default is vsync */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* update pitch */
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
- fb->pitches[0] / fb->format->cpp[0]);
- /* update the scanout addresses */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(crtc_base));
- /* writing to the low address triggers the update */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(crtc_base));
- /* post the write */
- RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
-}
-
-static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
- u32 *vbl, u32 *position)
-{
- if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
- return -EINVAL;
-
- *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
- *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- return 0;
-}
-
-/**
- * dce_v11_0_hpd_sense - hpd sense callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Checks if a digital monitor is connected (evergreen+).
- * Returns true if connected, false if not connected.
- */
-static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- bool connected = false;
-
- if (hpd >= adev->mode_info.num_hpd)
- return connected;
-
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
- DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
- connected = true;
-
- return connected;
-}
-
-/**
- * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Set the polarity of the hpd pin (evergreen+).
- */
-static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- u32 tmp;
- bool connected = dce_v11_0_hpd_sense(adev, hpd);
-
- if (hpd >= adev->mode_info.num_hpd)
- return;
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- if (connected)
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
- else
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-/**
- * dce_v11_0_hpd_init - hpd setup callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Setup the hpd pins used by the card (evergreen+).
- * Enable the pin, set the polarity, and enable the hpd interrupts.
- */
-static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- continue;
- }
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_CONNECT_INT_DELAY,
- AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_DISCONNECT_INT_DELAY,
- AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
- dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
- amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-/**
- * dce_v11_0_hpd_fini - hpd tear down callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the hpd pins used by the card (evergreen+).
- * Disable the hpd interrupts.
- */
-static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
-{
- return mmDC_GPIO_HPD_A;
-}
-
-static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
-{
- u32 crtc_hung = 0;
- u32 crtc_status[6];
- u32 i, j, tmp;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
- crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- crtc_hung |= (1 << i);
- }
- }
-
- for (j = 0; j < 10; j++) {
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (crtc_hung & (1 << i)) {
- tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- if (tmp != crtc_status[i])
- crtc_hung &= ~(1 << i);
- }
- }
- if (crtc_hung == 0)
- return false;
- udelay(100);
- }
-
- return true;
-}
-
-static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
- bool render)
-{
- u32 tmp;
-
- /* Lockout access through VGA aperture*/
- tmp = RREG32(mmVGA_HDP_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
- else
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32(mmVGA_HDP_CONTROL, tmp);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
- else
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-}
-
-static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
-{
- int num_crtc = 0;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- num_crtc = 3;
- break;
- case CHIP_STONEY:
- num_crtc = 2;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- num_crtc = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- num_crtc = 5;
- break;
- default:
- num_crtc = 0;
- }
- return num_crtc;
-}
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev)
-{
- /*Disable VGA render and enabled crtc, if has DCE engine*/
- if (amdgpu_atombios_has_dce_engine_info(adev)) {
- u32 tmp;
- int crtc_enabled, i;
-
- dce_v11_0_set_vga_render_state(adev, false);
-
- /*Disable crtc*/
- for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- }
- }
-}
-
-static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- bpc = amdgpu_connector_get_monitor_bpc(connector);
- dither = amdgpu_connector->dither;
- }
-
- /* LVDS/eDP FMT is set up by atom */
- if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- /* not needed for analog */
- if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
- (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
- }
- break;
- case 8:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
- }
- break;
- case 10:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
- }
- break;
- default:
- /* not needed */
- break;
- }
-
- WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-
-/* display watermark setup */
-/**
- * dce_v11_0_line_buffer_adjust - Set up the line buffer
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @mode: the current display mode on the selected display
- * controller
- *
- * Setup up the line buffer allocation for
- * the selected display controller (CIK).
- * Returns the line buffer size in pixels.
- */
-static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- struct drm_display_mode *mode)
-{
- u32 tmp, buffer_alloc, i, mem_cfg;
- u32 pipe_offset = amdgpu_crtc->crtc_id;
- /*
- * Line Buffer Setup
- * There are 6 line buffers, one for each display controllers.
- * There are 3 partitions per LB. Select the number of partitions
- * to enable based on the display width. For display widths larger
- * than 4096, you need use to use 2 display controllers and combine
- * them using the stereo blender.
- */
- if (amdgpu_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920) {
- mem_cfg = 1;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 2560) {
- mem_cfg = 2;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 4096) {
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- } else {
- DRM_DEBUG_KMS("Mode too big for LB!\n");
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- }
- } else {
- mem_cfg = 1;
- buffer_alloc = 0;
- }
-
- tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
- WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
- WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
- break;
- udelay(1);
- }
-
- if (amdgpu_crtc->base.enabled && mode) {
- switch (mem_cfg) {
- case 0:
- default:
- return 4096 * 2;
- case 1:
- return 1920 * 2;
- case 2:
- return 2560 * 2;
- }
- }
-
- /* controller not enabled, so no lb used */
- return 0;
-}
-
-/**
- * cik_get_number_of_dram_channels - get the number of dram channels
- *
- * @adev: amdgpu_device pointer
- *
- * Look up the number of video ram channels (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the number of dram channels
- */
-static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32(mmMC_SHARED_CHMAP);
-
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- case 3:
- return 8;
- case 4:
- return 3;
- case 5:
- return 6;
- case 6:
- return 10;
- case 7:
- return 12;
- case 8:
- return 16;
- }
-}
-
-struct dce10_wm_params {
- u32 dram_channels; /* number of dram channels */
- u32 yclk; /* bandwidth per dram data pin in kHz */
- u32 sclk; /* engine clock in kHz */
- u32 disp_clk; /* display clock in kHz */
- u32 src_width; /* viewport width */
- u32 active_time; /* active display time in ns */
- u32 blank_time; /* blank time in ns */
- bool interlaced; /* mode is interlaced */
- fixed20_12 vsc; /* vertical scale ratio */
- u32 num_heads; /* number of active crtcs */
- u32 bytes_per_pixel; /* bytes per pixel display + overlay */
- u32 lb_size; /* line buffer allocated to pipe */
- u32 vtaps; /* vertical scaler taps */
-};
-
-/**
- * dce_v11_0_dram_bandwidth - get the dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the raw dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate raw DRAM Bandwidth */
- fixed20_12 dram_efficiency; /* 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- dram_efficiency.full = dfixed_const(7);
- dram_efficiency.full = dfixed_div(dram_efficiency, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
- *
- * @wm: watermark calculation data
- *
- * Calculate the dram bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth for display in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- /* Calculate DRAM Bandwidth and the part allocated to display. */
- fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
- disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_data_return_bandwidth - get the data return bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the data return bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the data return bandwidth in MBytes/s
- */
-static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display Data return Bandwidth */
- fixed20_12 return_efficiency; /* 0.8 */
- fixed20_12 sclk, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- sclk.full = dfixed_const(wm->sclk);
- sclk.full = dfixed_div(sclk, a);
- a.full = dfixed_const(10);
- return_efficiency.full = dfixed_const(8);
- return_efficiency.full = dfixed_div(return_efficiency, a);
- a.full = dfixed_const(32);
- bandwidth.full = dfixed_mul(a, sclk);
- bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the dmif bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dmif bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the DMIF Request Bandwidth */
- fixed20_12 disp_clk_request_efficiency; /* 0.8 */
- fixed20_12 disp_clk, bandwidth;
- fixed20_12 a, b;
-
- a.full = dfixed_const(1000);
- disp_clk.full = dfixed_const(wm->disp_clk);
- disp_clk.full = dfixed_div(disp_clk, a);
- a.full = dfixed_const(32);
- b.full = dfixed_mul(a, disp_clk);
-
- a.full = dfixed_const(10);
- disp_clk_request_efficiency.full = dfixed_const(8);
- disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
-
- bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_available_bandwidth - get the min available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the min available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the min available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
- u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
- u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
- u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
-
- return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
-}
-
-/**
- * dce_v11_0_average_bandwidth - get the average available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the average available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the average available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display mode Average Bandwidth
- * DisplayMode should contain the source and destination dimensions,
- * timing, etc.
- */
- fixed20_12 bpp;
- fixed20_12 line_time;
- fixed20_12 src_width;
- fixed20_12 bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- line_time.full = dfixed_const(wm->active_time + wm->blank_time);
- line_time.full = dfixed_div(line_time, a);
- bpp.full = dfixed_const(wm->bytes_per_pixel);
- src_width.full = dfixed_const(wm->src_width);
- bandwidth.full = dfixed_mul(src_width, bpp);
- bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
- bandwidth.full = dfixed_div(bandwidth, line_time);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_latency_watermark - get the latency watermark
- *
- * @wm: watermark calculation data
- *
- * Calculate the latency watermark (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the latency watermark in ns
- */
-static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
-{
- /* First calculate the latency in ns */
- u32 mc_latency = 2000; /* 2000 ns. */
- u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
- u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
- u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
- u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
- u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
- (wm->num_heads * cursor_line_pair_return_time);
- u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
- u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
- u32 tmp, dmif_size = 12288;
- fixed20_12 a, b, c;
-
- if (wm->num_heads == 0)
- return 0;
-
- a.full = dfixed_const(2);
- b.full = dfixed_const(1);
- if ((wm->vsc.full > a.full) ||
- ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
- (wm->vtaps >= 5) ||
- ((wm->vsc.full >= a.full) && wm->interlaced))
- max_src_lines_per_dst_line = 4;
- else
- max_src_lines_per_dst_line = 2;
-
- a.full = dfixed_const(available_bandwidth);
- b.full = dfixed_const(wm->num_heads);
- a.full = dfixed_div(a, b);
- tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
- tmp = min(dfixed_trunc(a), tmp);
-
- lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
-
- a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
- b.full = dfixed_const(1000);
- c.full = dfixed_const(lb_fill_bw);
- b.full = dfixed_div(c, b);
- a.full = dfixed_div(a, b);
- line_fill_time = dfixed_trunc(a);
-
- if (line_fill_time < wm->active_time)
- return latency;
- else
- return latency + (line_fill_time - wm->active_time);
-
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
- * average and available dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
- * average and available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * available bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_check_latency_hiding - check latency hiding
- *
- * @wm: watermark calculation data
- *
- * Check latency hiding (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
-{
- u32 lb_partitions = wm->lb_size / wm->src_width;
- u32 line_time = wm->active_time + wm->blank_time;
- u32 latency_tolerant_lines;
- u32 latency_hiding;
- fixed20_12 a;
-
- a.full = dfixed_const(1);
- if (wm->vsc.full > a.full)
- latency_tolerant_lines = 1;
- else {
- if (lb_partitions <= (wm->vtaps + 1))
- latency_tolerant_lines = 1;
- else
- latency_tolerant_lines = 2;
- }
-
- latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
-
- if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_program_watermarks - program display watermarks
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @lb_size: line buffer size
- * @num_heads: number of display controllers in use
- *
- * Calculate and program the display watermarks for the
- * selected display controller (CIK).
- */
-static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- u32 lb_size, u32 num_heads)
-{
- struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
- struct dce10_wm_params wm_low, wm_high;
- u32 active_time;
- u32 line_time = 0;
- u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
-
- if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
- (u32)mode->clock);
- line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
- (u32)mode->clock);
- line_time = min_t(u32, line_time, 65535);
-
- /* watermark for high clocks */
- if (adev->pm.dpm_enabled) {
- wm_high.yclk =
- amdgpu_dpm_get_mclk(adev, false) * 10;
- wm_high.sclk =
- amdgpu_dpm_get_sclk(adev, false) * 10;
- } else {
- wm_high.yclk = adev->pm.current_mclk * 10;
- wm_high.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_high.disp_clk = mode->clock;
- wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = active_time;
- wm_high.blank_time = line_time - wm_high.active_time;
- wm_high.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_high.interlaced = true;
- wm_high.vsc = amdgpu_crtc->vsc;
- wm_high.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_high.vtaps = 2;
- wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_high.lb_size = lb_size;
- wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_high.num_heads = num_heads;
-
- /* set for high clocks */
- latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
- !dce_v11_0_check_latency_hiding(&wm_high) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
-
- /* watermark for low clocks */
- if (adev->pm.dpm_enabled) {
- wm_low.yclk =
- amdgpu_dpm_get_mclk(adev, true) * 10;
- wm_low.sclk =
- amdgpu_dpm_get_sclk(adev, true) * 10;
- } else {
- wm_low.yclk = adev->pm.current_mclk * 10;
- wm_low.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_low.disp_clk = mode->clock;
- wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = active_time;
- wm_low.blank_time = line_time - wm_low.active_time;
- wm_low.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_low.interlaced = true;
- wm_low.vsc = amdgpu_crtc->vsc;
- wm_low.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_low.vtaps = 2;
- wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_low.lb_size = lb_size;
- wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_low.num_heads = num_heads;
-
- /* set for low clocks */
- latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
- !dce_v11_0_check_latency_hiding(&wm_low) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
- lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
- }
-
- /* select wm A */
- wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* select wm B */
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* restore original selection */
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
-
- /* save values for DPM */
- amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
- /* Save number of lines the linebuffer leads before the scanout */
- amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
-}
-
-/**
- * dce_v11_0_bandwidth_update - program display watermarks
- *
- * @adev: amdgpu_device pointer
- *
- * Calculate and program the display watermarks and line
- * buffer allocation (CIK).
- */
-static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
-{
- struct drm_display_mode *mode = NULL;
- u32 num_heads = 0, lb_size;
- int i;
-
- amdgpu_display_update_priority(adev);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i]->base.enabled)
- num_heads++;
- }
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- mode = &adev->mode_info.crtcs[i]->base.mode;
- lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
- dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
- lb_size, num_heads);
- }
-}
-
-static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
-{
- int i;
- u32 offset, tmp;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- offset = adev->mode_info.audio.pin[i].offset;
- tmp = RREG32_AUDIO_ENDPT(offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
- if (((tmp &
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
- adev->mode_info.audio.pin[i].connected = false;
- else
- adev->mode_info.audio.pin[i].connected = true;
- }
-}
-
-static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
-{
- int i;
-
- dce_v11_0_audio_get_connected_pins(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- if (adev->mode_info.audio.pin[i].connected)
- return &adev->mode_info.audio.pin[i];
- }
- DRM_ERROR("No connected audio pins found!\n");
- return NULL;
-}
-
-static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
- WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
-}
-
-static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- int interlace = 0;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- interlace = 1;
- if (connector->latency_present[interlace]) {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, connector->video_latency[interlace]);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, connector->audio_latency[interlace]);
- } else {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, 0);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, 0);
- }
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
-}
-
-static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- u8 *sadb = NULL;
- int sad_count;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
-
- /* program the speaker allocation */
- tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- DP_CONNECTION, 0);
- /* set HDMI mode */
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- HDMI_CONNECTION, 1);
- if (sad_count)
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, sadb[0]);
- else
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, 5); /* stereo */
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
-}
-
-static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
-
- static const u16 eld_reg_to_type[][2] = {
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
- if (sad_count < 0)
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- if (sad_count <= 0)
- return;
- BUG_ON(!sads);
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
- }
-
- kfree(sads);
-}
-
-static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
- struct amdgpu_audio_pin *pin,
- bool enable)
-{
- if (!pin)
- return;
-
- WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
-}
-
-static const u32 pin_offsets[] =
-{
- AUD0_REGISTER_OFFSET,
- AUD1_REGISTER_OFFSET,
- AUD2_REGISTER_OFFSET,
- AUD3_REGISTER_OFFSET,
- AUD4_REGISTER_OFFSET,
- AUD5_REGISTER_OFFSET,
- AUD6_REGISTER_OFFSET,
- AUD7_REGISTER_OFFSET,
-};
-
-static int dce_v11_0_audio_init(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return 0;
-
- adev->mode_info.audio.enabled = true;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->mode_info.audio.num_pins = 7;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.audio.num_pins = 8;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.audio.num_pins = 6;
- break;
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- adev->mode_info.audio.pin[i].channels = -1;
- adev->mode_info.audio.pin[i].rate = -1;
- adev->mode_info.audio.pin[i].bits_per_sample = -1;
- adev->mode_info.audio.pin[i].status_bits = 0;
- adev->mode_info.audio.pin[i].category_code = 0;
- adev->mode_info.audio.pin[i].connected = false;
- adev->mode_info.audio.pin[i].offset = pin_offsets[i];
- adev->mode_info.audio.pin[i].id = i;
- /* disable audio. it will be set up later */
- /* XXX remove once we switch to ip funcs */
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- return 0;
-}
-
-static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return;
-
- if (!adev->mode_info.audio.enabled)
- return;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
- adev->mode_info.audio.enabled = false;
-}
-
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
- WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
- WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
- WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
- WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
- WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
- WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
-
-}
-
-/*
- * build a HDMI Video Info Frame
- */
-static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- uint8_t *frame = buffer + 3;
- uint8_t *header = buffer;
-
- WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
- frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
- frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
- frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
- frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
-}
-
-static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- u32 dto_phase = 24 * 1000;
- u32 dto_modulo = clock;
- u32 tmp;
-
- if (!dig || !dig->afmt)
- return;
-
- /* XXX two dtos; generally use dto0 for hdmi */
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
- tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
- amdgpu_crtc->crtc_id);
- WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
- WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
-}
-
-/*
- * update the info frames with the data from the current display mode
- */
-static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- ssize_t err;
- u32 tmp;
- int bpc = 8;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
- return;
-
- /* hdmi deep color mode general control packets setup, if bpc > 8 */
- if (encoder->crtc) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- bpc = amdgpu_crtc->bpc;
- }
-
- /* disable audio prior to setting up hw */
- dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
-
- dce_v11_0_audio_set_dto(encoder, mode->clock);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
-
- WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
-
- tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
- switch (bpc) {
- case 0:
- case 6:
- case 8:
- case 16:
- default:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
- DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
- connector->name, bpc);
- break;
- case 10:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
- DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
- connector->name);
- break;
- case 12:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
- DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
- connector->name);
- break;
- }
- WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable audio info frames (frames won't be set until audio is enabled) */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
- WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- /* anything other than 0 */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
-
- tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* set the default audio delay */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
- /* should be suffient for all audio modes and small enough for all hblanks */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
- WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* allow 60958 channel status fields to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
- if (bpc > 8)
- /* clear SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
- else
- /* select SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
- /* allow hw to sent ACR packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
- WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- dce_v11_0_afmt_update_ACR(encoder, mode->clock);
-
- tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
- WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
- WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
- WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
-
- dce_v11_0_audio_write_speaker_allocation(encoder);
-
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
- (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
-
- dce_v11_0_afmt_audio_select_pin(encoder);
- dce_v11_0_audio_write_sad_regs(encoder);
- dce_v11_0_audio_write_latency_fields(encoder, mode);
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
-
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
-
- dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable AVI info frames */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* send audio packets */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
- WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
- WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
- WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
-
- /* enable audio after to setting up hw */
- dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
-}
-
-static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (enable && dig->afmt->enabled)
- return;
- if (!enable && !dig->afmt->enabled)
- return;
-
- if (!enable && dig->afmt->pin) {
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
- dig->afmt->pin = NULL;
- }
-
- dig->afmt->enabled = enable;
-
- DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
- enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
-}
-
-static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++)
- adev->mode_info.afmt[i] = NULL;
-
- /* DCE11 has audio blocks tied to DIG encoders */
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
- if (adev->mode_info.afmt[i]) {
- adev->mode_info.afmt[i]->offset = dig_offsets[i];
- adev->mode_info.afmt[i]->id = i;
- } else {
- int j;
- for (j = 0; j < i; j++) {
- kfree(adev->mode_info.afmt[j]);
- adev->mode_info.afmt[j] = NULL;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- kfree(adev->mode_info.afmt[i]);
- adev->mode_info.afmt[i] = NULL;
- }
-}
-
-static const u32 vga_control_regs[6] =
-{
- mmD1VGA_CONTROL,
- mmD2VGA_CONTROL,
- mmD3VGA_CONTROL,
- mmD4VGA_CONTROL,
- mmD5VGA_CONTROL,
- mmD6VGA_CONTROL,
-};
-
-static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 vga_control;
-
- vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
- if (enable)
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
- else
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
-}
-
-static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (enable)
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
- else
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
-}
-
-static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct drm_framebuffer *target_fb;
- struct drm_gem_object *obj;
- struct amdgpu_bo *abo;
- uint64_t fb_location, tiling_flags;
- uint32_t fb_format, fb_pitch_pixels;
- u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
- u32 pipe_config;
- u32 tmp, viewport_w, viewport_h;
- int r;
- bool bypass_lut = false;
-
- /* no fb bound */
- if (!atomic && !crtc->primary->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
- return 0;
- }
-
- if (atomic)
- target_fb = fb;
- else
- target_fb = crtc->primary->fb;
-
- /* If atomic, assume fb object is pinned & idle & fenced and
- * just update base pointers
- */
- obj = target_fb->obj[0];
- abo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(abo, false);
- if (unlikely(r != 0))
- return r;
-
- if (!atomic) {
- abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(abo);
- return -EINVAL;
- }
- }
- fb_location = amdgpu_bo_gpu_offset(abo);
-
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- amdgpu_bo_unreserve(abo);
-
- pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-
- switch (target_fb->format->format) {
- case DRM_FORMAT_C8:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- break;
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_ARGB4444:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRA5551:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_RGB565:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRA1010102:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- default:
- DRM_ERROR("Unsupported screen format %p4cc\n",
- &target_fb->format->format);
- return -EINVAL;
- }
-
- if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
- unsigned bankw, bankh, mtaspect, tile_split, num_banks;
-
- bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
- bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
- mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
- tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
- num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_2D_TILED_THIN1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
- tile_split);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
- mtaspect);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
- ADDR_SURF_MICRO_TILING_DISPLAY);
- } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_1D_TILED_THIN1);
- }
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
- pipe_config);
-
- dce_v11_0_vga_enable(crtc, false);
-
- /* Make sure surface address is updated at vertical blank rather than
- * horizontal blank
- */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
- WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
-
- /*
- * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
- * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
- * retain the full precision throughout the pipeline.
- */
- tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
- if (bypass_lut)
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
- WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
-
- if (bypass_lut)
- DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
-
- WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
- WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
-
- fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
-
- dce_v11_0_grph_enable(crtc, true);
-
- WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
- target_fb->height);
-
- x &= ~3;
- y &= ~1;
- WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
- (x << 16) | y);
- viewport_w = crtc->mode.hdisplay;
- viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
- (viewport_w << 16) | viewport_h);
-
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
-
- if (!atomic && fb && fb != crtc->primary->fb) {
- abo = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- return r;
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
-
- /* Bytes per pixel may have changed */
- dce_v11_0_bandwidth_update(adev);
-
- return 0;
-}
-
-static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- u32 tmp;
-
- tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
- WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u16 *r, *g, *b;
- int i;
- u32 tmp;
-
- DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
-
- tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
- WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
- WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
- WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
-
- WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
-
- WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
- for (i = 0; i < 256; i++) {
- WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- ((*r++ & 0xffc0) << 14) |
- ((*g++ & 0xffc0) << 4) |
- (*b++ >> 6));
- }
-
- tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
- WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
- WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
- WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
- WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- /* XXX match this to the depth of the crtc fmt block, move to modeset? */
- WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
- /* XXX this only needs to be programmed once per crtc at startup,
- * not sure where the best place for it is
- */
- tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
- WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return 1;
- else
- return 0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return 6;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return 0;
- }
-}
-
-/**
- * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
- *
- * @crtc: drm crtc
- *
- * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
- * a single PPLL can be used for all DP crtcs/encoders. For non-DP
- * monitors a dedicated PPLL must be used. If a particular board has
- * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
- * as there is no need to program the PLL itself. If we are not able to
- * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
- * avoid messing up an existing monitor.
- *
- * Asic specific PLL information
- *
- * DCE 10.x
- * Tonga
- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
- * CI
- * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
- *
- */
-static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 pll_in_use;
- int pll;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return ATOM_DP_DTO;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL1;
- else
- return ATOM_COMBOPHY_PLL0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL3;
- else
- return ATOM_COMBOPHY_PLL2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL5;
- else
- return ATOM_COMBOPHY_PLL4;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return ATOM_PPLL_INVALID;
- }
- }
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
- if (adev->clock.dp_extclk)
- /* skip PPLL programming if using ext clock */
- return ATOM_PPLL_INVALID;
- else {
- /* use the same PPLL for all DP monitors */
- pll = amdgpu_pll_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
-
- /* XXX need to determine what plls are available on each DCE11 part */
- pll_in_use = amdgpu_pll_get_use_mask(crtc);
- if (adev->flags & AMD_IS_APU) {
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- }
- return ATOM_PPLL_INVALID;
-}
-
-static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
-{
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- uint32_t cur_lock;
-
- cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
- if (lock)
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
- else
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
- WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
-}
-
-static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(amdgpu_crtc->cursor_addr));
- WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(amdgpu_crtc->cursor_addr));
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
- int x, int y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- int xorigin = 0, yorigin = 0;
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
-
- /* avivo cursor are offset into the total surface */
- x += crtc->x;
- y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- if (x < 0) {
- xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
- yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
- y = 0;
- }
-
- WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
- WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- return 0;
-}
-
-static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- int ret;
-
- dce_v11_0_lock_cursor(crtc, true);
- ret = dce_v11_0_cursor_move_locked(crtc, x, y);
- dce_v11_0_lock_cursor(crtc, false);
-
- return ret;
-}
-
-static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x,
- int32_t hot_y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_gem_object *obj;
- struct amdgpu_bo *aobj;
- int ret;
-
- if (!handle) {
- /* turn off cursor */
- dce_v11_0_hide_cursor(crtc);
- obj = NULL;
- goto unpin;
- }
-
- if ((width > amdgpu_crtc->max_cursor_width) ||
- (height > amdgpu_crtc->max_cursor_height)) {
- DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
- return -ENOENT;
- }
-
- aobj = gem_to_amdgpu_bo(obj);
- ret = amdgpu_bo_reserve(aobj, false);
- if (ret != 0) {
- drm_gem_object_put(obj);
- return ret;
- }
-
- aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_bo_unreserve(aobj);
- if (ret) {
- DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put(obj);
- return ret;
- }
- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
-
- dce_v11_0_lock_cursor(crtc, true);
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height ||
- hot_x != amdgpu_crtc->cursor_hot_x ||
- hot_y != amdgpu_crtc->cursor_hot_y) {
- int x, y;
-
- x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
- y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
-
- dce_v11_0_cursor_move_locked(crtc, x, y);
-
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- dce_v11_0_show_cursor(crtc);
- dce_v11_0_lock_cursor(crtc, false);
-
-unpin:
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, true);
- if (likely(ret == 0)) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- drm_gem_object_put(amdgpu_crtc->cursor_bo);
- }
-
- amdgpu_crtc->cursor_bo = obj;
- return 0;
-}
-
-static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- dce_v11_0_lock_cursor(crtc, true);
-
- dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
- amdgpu_crtc->cursor_y);
-
- dce_v11_0_show_cursor(crtc);
-
- dce_v11_0_lock_cursor(crtc, false);
- }
-}
-
-static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- dce_v11_0_crtc_load_lut(crtc);
-
- return 0;
-}
-
-static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(amdgpu_crtc);
-}
-
-static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
- .cursor_set2 = dce_v11_0_crtc_cursor_set2,
- .cursor_move = dce_v11_0_crtc_cursor_move,
- .gamma_set = dce_v11_0_crtc_gamma_set,
- .set_config = amdgpu_display_crtc_set_config,
- .destroy = dce_v11_0_crtc_destroy,
- .page_flip_target = amdgpu_display_crtc_page_flip_target,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-};
-
-static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- unsigned type;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- amdgpu_crtc->enabled = true;
- amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
- dce_v11_0_vga_enable(crtc, false);
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_display_crtc_idx_to_irq_type(adev,
- amdgpu_crtc->crtc_id);
- amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_crtc_vblank_on(crtc);
- dce_v11_0_crtc_load_lut(crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- drm_crtc_vblank_off(crtc);
- if (amdgpu_crtc->enabled) {
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, false);
- }
- amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
- amdgpu_crtc->enabled = false;
- break;
- }
- /* adjust pm to dpms */
- amdgpu_dpm_compute_clocks(adev);
-}
-
-static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
-{
- /* disable crtc pair power gating before programming */
- amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
- amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
-{
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
-}
-
-static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_atom_ss ss;
- int i;
-
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
- /* disable the GRPH */
- dce_v11_0_grph_enable(crtc, false);
-
- amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i] &&
- adev->mode_info.crtcs[i]->enabled &&
- i != amdgpu_crtc->crtc_id &&
- amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
- /* one other crtc is using this pll don't turn
- * off the pll
- */
- goto done;
- }
- }
-
- switch (amdgpu_crtc->pll_id) {
- case ATOM_PPLL0:
- case ATOM_PPLL1:
- case ATOM_PPLL2:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- case ATOM_COMBOPHY_PLL0:
- case ATOM_COMBOPHY_PLL1:
- case ATOM_COMBOPHY_PLL2:
- case ATOM_COMBOPHY_PLL3:
- case ATOM_COMBOPHY_PLL4:
- case ATOM_COMBOPHY_PLL5:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- default:
- break;
- }
-done:
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
-}
-
-static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (!amdgpu_crtc->adjusted_clock)
- return -EINVAL;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- int encoder_mode =
- amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
-
- /* SetPixelClock calculates the plls and ss values now */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
- amdgpu_crtc->pll_id,
- encoder_mode, amdgpu_encoder->encoder_id,
- adjusted_mode->clock, 0, 0, 0, 0,
- amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
- } else {
- amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
- }
- amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
- dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
- amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
- amdgpu_atombios_crtc_scaler_setup(crtc);
- dce_v11_0_cursor_reset(crtc);
- /* update the hw version fpr dpm */
- amdgpu_crtc->hw_mode = *adjusted_mode;
-
- return 0;
-}
-
-static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
- if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
- return false;
- if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
- return false;
- /* pick pll */
- amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
- /* if we can't get a PPLL for a non-DP encoder, fail */
- if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
- !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return false;
-
- return true;
-}
-
-static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
-}
-
-static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
- .dpms = dce_v11_0_crtc_dpms,
- .mode_fixup = dce_v11_0_crtc_mode_fixup,
- .mode_set = dce_v11_0_crtc_mode_set,
- .mode_set_base = dce_v11_0_crtc_set_base,
- .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
- .prepare = dce_v11_0_crtc_prepare,
- .commit = dce_v11_0_crtc_commit,
- .disable = dce_v11_0_crtc_disable,
- .get_scanout_position = amdgpu_crtc_get_scanout_position,
-};
-
-static void dce_v11_0_panic_flush(struct drm_plane *plane)
-{
- struct drm_framebuffer *fb;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_device *adev;
- uint32_t fb_format;
-
- if (!plane->fb)
- return;
-
- fb = plane->fb;
- amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
- adev = drm_to_adev(fb->dev);
-
- /* Disable DC tiling */
- fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
-
-}
-
-static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
- .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
- .panic_flush = dce_v11_0_panic_flush,
-};
-
-static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
-{
- struct amdgpu_crtc *amdgpu_crtc;
-
- amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
- (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
- if (amdgpu_crtc == NULL)
- return -ENOMEM;
-
- drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
-
- drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
- amdgpu_crtc->crtc_id = index;
- adev->mode_info.crtcs[index] = amdgpu_crtc;
-
- amdgpu_crtc->max_cursor_width = 128;
- amdgpu_crtc->max_cursor_height = 128;
- adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
-
- switch (amdgpu_crtc->crtc_id) {
- case 0:
- default:
- amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
- break;
- case 1:
- amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
- break;
- case 2:
- amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
- break;
- case 3:
- amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
- break;
- case 4:
- amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
- break;
- case 5:
- amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
- break;
- }
-
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
- drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
-
- return 0;
-}
-
-static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
- adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
-
- dce_v11_0_set_display_funcs(adev);
-
- adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_STONEY:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.num_hpd = 5;
- adev->mode_info.num_dig = 5;
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
-
- dce_v11_0_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
-{
- int r, i;
- struct amdgpu_device *adev = ip_block->adev;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
- if (r)
- return r;
- }
-
- for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
- if (r)
- return r;
- }
-
- /* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
-
- adev_to_drm(adev)->mode_config.async_page_flip = true;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
- adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
-
- adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
-
- r = amdgpu_display_modeset_create_props(adev);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
-
- /* allocate crtcs */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = dce_v11_0_crtc_init(adev, i);
- if (r)
- return r;
- }
-
- if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev_to_drm(adev));
- else
- return -EINVAL;
-
- /* setup afmt */
- r = dce_v11_0_afmt_init(adev);
- if (r)
- return r;
-
- r = dce_v11_0_audio_init(adev);
- if (r)
- return r;
-
- /* Disable vblank IRQs aggressively for power-saving */
- /* XXX: can this be enabled for DC? */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
- if (r)
- return r;
-
- INIT_DELAYED_WORK(&adev->hotplug_work,
- amdgpu_display_hotplug_work_func);
-
- drm_kms_helper_poll_init(adev_to_drm(adev));
-
- adev->mode_info.mode_config_initialized = true;
- return 0;
-}
-
-static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- drm_edid_free(adev->mode_info.bios_hardcoded_edid);
-
- drm_kms_helper_poll_fini(adev_to_drm(adev));
-
- dce_v11_0_audio_fini(adev);
-
- dce_v11_0_afmt_fini(adev);
-
- drm_mode_config_cleanup(adev_to_drm(adev));
- adev->mode_info.mode_config_initialized = false;
-
- return 0;
-}
-
-static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_init_golden_registers(adev);
-
- /* disable vga render */
- dce_v11_0_set_vga_render_state(adev, false);
- /* init dig PHYs, disp eng pll */
- amdgpu_atombios_crtc_powergate_init(adev);
- amdgpu_atombios_encoder_init_dig(adev);
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
- DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
- amdgpu_atombios_crtc_set_dce_clock(adev, 0,
- DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
- } else {
- amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
- }
-
- /* initialize hpd */
- dce_v11_0_hpd_init(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_init(adev);
-
- return 0;
-}
-
-static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_hpd_fini(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_fini(adev);
-
- flush_delayed_work(&adev->hotplug_work);
-
- return 0;
-}
-
-static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int r;
-
- r = amdgpu_display_suspend_helper(adev);
- if (r)
- return r;
-
- adev->mode_info.bl_level =
- amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
-
- return dce_v11_0_hw_fini(ip_block);
-}
-
-static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
-
- amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
- adev->mode_info.bl_level);
-
- ret = dce_v11_0_hw_init(ip_block);
-
- /* turn on the BL */
- if (adev->mode_info.bl_encoder) {
- u8 bl_level = amdgpu_display_backlight_get_level(adev,
- adev->mode_info.bl_encoder);
- amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
- bl_level);
- }
- if (ret)
- return ret;
-
- return amdgpu_display_resume_helper(adev);
-}
-
-static bool dce_v11_0_is_idle(void *handle)
-{
- return true;
-}
-
-static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
-{
- u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = ip_block->adev;
-
- if (dce_v11_0_is_display_hung(adev))
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
- return 0;
-}
-
-static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned hpd,
- enum amdgpu_interrupt_state state)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
- return 0;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK2:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK3:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK4:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK5:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK6:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE1:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE2:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE3:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE4:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE5:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE6:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 reg;
-
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
-
- reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
- if (state == AMDGPU_IRQ_STATE_DISABLE)
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
- else
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
-
- return 0;
-}
-
-static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned long flags;
- unsigned crtc_id;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_flip_work *works;
-
- crtc_id = (entry->src_id - 8) >> 1;
- amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
-
- if (crtc_id >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
- return -EINVAL;
- }
-
- if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
- WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
-
- /* IRQ could occur when in initial stage */
- if(amdgpu_crtc == NULL)
- return 0;
-
- spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
- "AMDGPU_FLIP_SUBMITTED(%d)\n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
- return 0;
- }
-
- /* page flip completed. clean up */
- amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
- amdgpu_crtc->pflip_works = NULL;
-
- /* wakeup usersapce */
- if(works->event)
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
-
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
-
- drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
-
- return 0;
-}
-
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
- int hpd)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
- return;
- }
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
- WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
- WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = entry->src_id - 1;
- uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
- crtc);
-
- switch (entry->src_data[0]) {
- case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank)
- dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev_to_drm(adev), crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
- break;
- case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline)
- dce_v11_0_crtc_vline_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
- break;
- default:
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- uint32_t disp_int, mask;
- unsigned hpd;
-
- if (entry->src_data[0] >= adev->mode_info.num_hpd) {
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- return 0;
- }
-
- hpd = entry->src_data[0];
- disp_int = RREG32(interrupt_status_offsets[hpd].reg);
- mask = interrupt_status_offsets[hpd].hpd;
-
- if (disp_int & mask) {
- dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_delayed_work(&adev->hotplug_work, 0);
- DRM_DEBUG("IH: HPD%d\n", hpd + 1);
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
- .name = "dce_v11_0",
- .early_init = dce_v11_0_early_init,
- .sw_init = dce_v11_0_sw_init,
- .sw_fini = dce_v11_0_sw_fini,
- .hw_init = dce_v11_0_hw_init,
- .hw_fini = dce_v11_0_hw_fini,
- .suspend = dce_v11_0_suspend,
- .resume = dce_v11_0_resume,
- .is_idle = dce_v11_0_is_idle,
- .soft_reset = dce_v11_0_soft_reset,
- .set_clockgating_state = dce_v11_0_set_clockgating_state,
- .set_powergating_state = dce_v11_0_set_powergating_state,
-};
-
-static void
-dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- amdgpu_encoder->pixel_clock = adjusted_mode->clock;
-
- /* need to call this here rather than in prepare() since we need some crtc info */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- /* set scaler clears this on some chips */
- dce_v11_0_set_interleave(encoder->crtc, mode);
-
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- dce_v11_0_afmt_enable(encoder, true);
- dce_v11_0_afmt_setmode(encoder, adjusted_mode);
- }
-}
-
-static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
-
- if ((amdgpu_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
- ENCODER_OBJECT_ID_NONE)) {
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- if (dig) {
- dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
- if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
- dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
- }
- }
-
- amdgpu_atombios_scratch_regs_lock(adev, true);
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- /* select the clock/data port if it uses a router */
- if (amdgpu_connector->router.cd_valid)
- amdgpu_i2c_router_select_cd_port(amdgpu_connector);
-
- /* turn eDP panel on for mode set */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_atombios_encoder_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- }
-
- /* this is needed for the pll/ss setup to work correctly in some cases */
- amdgpu_atombios_encoder_set_crtc_source(encoder);
- /* set up the FMT blocks */
- dce_v11_0_program_fmt(encoder);
-}
-
-static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- /* need to call this here as we need the crtc set up */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- amdgpu_atombios_scratch_regs_lock(adev, false);
-}
-
-static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
-
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- if (amdgpu_atombios_encoder_is_digital(encoder)) {
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- dce_v11_0_afmt_enable(encoder, false);
- dig = amdgpu_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- amdgpu_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
- .dpms = dce_v11_0_ext_dpms,
- .prepare = dce_v11_0_ext_prepare,
- .mode_set = dce_v11_0_ext_mode_set,
- .commit = dce_v11_0_ext_commit,
- .disable = dce_v11_0_ext_disable,
- /* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .disable = dce_v11_0_encoder_disable,
- .detect = amdgpu_atombios_encoder_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .detect = amdgpu_atombios_encoder_dac_detect,
-};
-
-static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
- kfree(amdgpu_encoder->enc_priv);
- drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
-}
-
-static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
- .destroy = dce_v11_0_encoder_destroy,
-};
-
-static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
-
- }
-
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
-
- encoder = &amdgpu_encoder->base;
- switch (adev->mode_info.num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 3:
- encoder->possible_crtcs = 0x7;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 5:
- encoder->possible_crtcs = 0x1f;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- amdgpu_encoder->enc_priv = NULL;
-
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- amdgpu_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
- } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- } else {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- }
- drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_SI170B:
- case ENCODER_OBJECT_ID_CH7303:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
- case ENCODER_OBJECT_ID_TITFP513:
- case ENCODER_OBJECT_ID_VT1623:
- case ENCODER_OBJECT_ID_HDMI_SI1930:
- case ENCODER_OBJECT_ID_TRAVIS:
- case ENCODER_OBJECT_ID_NUTMEG:
- /* these are handled by the primary encoders */
- amdgpu_encoder->is_ext_encoder = true;
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- else
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
- break;
- }
-}
-
-static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
- .bandwidth_update = &dce_v11_0_bandwidth_update,
- .vblank_get_counter = &dce_v11_0_vblank_get_counter,
- .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
- .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
- .hpd_sense = &dce_v11_0_hpd_sense,
- .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
- .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
- .page_flip = &dce_v11_0_page_flip,
- .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
- .add_encoder = &dce_v11_0_encoder_add,
- .add_connector = &amdgpu_connector_add,
-};
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
-{
- adev->mode_info.funcs = &dce_v11_0_display_funcs;
-}
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
- .set = dce_v11_0_set_crtc_irq_state,
- .process = dce_v11_0_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
- .set = dce_v11_0_set_pageflip_irq_state,
- .process = dce_v11_0_pageflip_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
- .set = dce_v11_0_set_hpd_irq_state,
- .process = dce_v11_0_hpd_irq,
-};
-
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
-{
- if (adev->mode_info.num_crtc > 0)
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
- else
- adev->crtc_irq.num_types = 0;
- adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
- adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
-
- adev->hpd_irq.num_types = adev->mode_info.num_hpd;
- adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
-}
-
-const struct amdgpu_ip_block_version dce_v11_0_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
-
-const struct amdgpu_ip_block_version dce_v11_2_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 915804a6a1d7..acc887a58518 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -40,18 +40,25 @@
#include "amdgpu_connectors.h"
#include "amdgpu_display.h"
+#include "dce_v6_0.h"
+#include "sid.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
#include "gca/gfx_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
-#include "gca/gfx_7_2_enum.h"
-#include "dce_v6_0.h"
+
#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
@@ -59,31 +66,31 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 crtc_offsets[6] =
{
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET
+ CRTC0_REGISTER_OFFSET,
+ CRTC1_REGISTER_OFFSET,
+ CRTC2_REGISTER_OFFSET,
+ CRTC3_REGISTER_OFFSET,
+ CRTC4_REGISTER_OFFSET,
+ CRTC5_REGISTER_OFFSET
};
static const u32 hpd_offsets[] =
{
- mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ HPD0_REGISTER_OFFSET,
+ HPD1_REGISTER_OFFSET,
+ HPD2_REGISTER_OFFSET,
+ HPD3_REGISTER_OFFSET,
+ HPD4_REGISTER_OFFSET,
+ HPD5_REGISTER_OFFSET
};
static const uint32_t dig_offsets[] = {
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET,
+ CRTC0_REGISTER_OFFSET,
+ CRTC1_REGISTER_OFFSET,
+ CRTC2_REGISTER_OFFSET,
+ CRTC3_REGISTER_OFFSET,
+ CRTC4_REGISTER_OFFSET,
+ CRTC5_REGISTER_OFFSET,
(0x13830 - 0x7030) >> 2,
};
@@ -206,9 +213,9 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
+ /* writing to the low address triggers the update */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)crtc_base);
-
/* post the write */
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
}
@@ -218,11 +225,11 @@ static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
+
*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
return 0;
-
}
/**
@@ -242,7 +249,8 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
if (hpd >= adev->mode_info.num_hpd)
return connected;
- if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
connected = true;
return connected;
@@ -279,7 +287,7 @@ static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -370,13 +378,41 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
return mmDC_GPIO_HPD_A;
}
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
+ crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
+}
+
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
- RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
-
+ RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
@@ -419,7 +455,6 @@ void dce_v6_0_disable_dce(struct amdgpu_device *adev)
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -895,8 +930,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_high.dram_channels = dram_channels;
wm_high.num_heads = num_heads;
- if (adev->pm.dpm_enabled) {
/* watermark for low clocks */
+ if (adev->pm.dpm_enabled) {
wm_low.yclk =
amdgpu_dpm_get_mclk(adev, true) * 10;
wm_low.sclk =
@@ -976,16 +1011,16 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* select wm A */
arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(1);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(2);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
@@ -999,13 +1034,26 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/* watermark setup */
+/**
+ * dce_v6_0_line_buffer_adjust - Set up the line buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @mode: the current display mode on the selected display
+ * controller
+ * @other_mode: the display mode of another display controller
+ * that may be sharing the line buffer
+ *
+ * Setup up the line buffer allocation for
+ * the selected display controller (CIK).
+ * Returns the line buffer size in pixels.
+ */
static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc,
struct drm_display_mode *mode,
@@ -1040,7 +1088,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
}
WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
- DC_LB_MEMORY_CONFIG(tmp));
+ (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));
WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
(buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
@@ -1257,6 +1305,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
@@ -1278,6 +1327,11 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
@@ -1299,7 +1353,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
+ u32 value = 0;
u8 stereo_freqs = 0;
int max_channels = -1;
int j;
@@ -1309,12 +1363,12 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
max_channels = sad->channels;
}
@@ -1325,13 +1379,13 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
}
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
+ value |= (stereo_freqs <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
+
+ WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
}
kfree(sads);
-
}
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
@@ -1347,13 +1401,13 @@ static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
static const u32 pin_offsets[7] =
{
- (0x1780 - 0x1780),
- (0x1786 - 0x1780),
- (0x178c - 0x1780),
- (0x1792 - 0x1780),
- (0x1798 - 0x1780),
- (0x179d - 0x1780),
- (0x17a4 - 0x1780),
+ AUD0_REGISTER_OFFSET,
+ AUD1_REGISTER_OFFSET,
+ AUD2_REGISTER_OFFSET,
+ AUD3_REGISTER_OFFSET,
+ AUD4_REGISTER_OFFSET,
+ AUD5_REGISTER_OFFSET,
+ AUD6_REGISTER_OFFSET,
};
static int dce_v6_0_audio_init(struct amdgpu_device *adev)
@@ -1386,6 +1440,8 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.pin[i].connected = false;
adev->mode_info.audio.pin[i].offset = pin_offsets[i];
adev->mode_info.audio.pin[i].id = i;
+ /* disable audio. it will be set up later */
+ /* XXX remove once we switch to ip funcs */
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
@@ -1394,17 +1450,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
@@ -1835,7 +1886,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
- u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
+ u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
@@ -1875,76 +1926,76 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
switch (target_fb->format->format) {
case DRM_FORMAT_C8:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
- GRPH_FORMAT(GRPH_FORMAT_INDEXED));
+ fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_RGB565:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB565));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
- fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
- GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
+ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
default:
@@ -1962,18 +2013,18 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- fb_format |= GRPH_NUM_BANKS(num_banks);
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
- fb_format |= GRPH_TILE_SPLIT(tile_split);
- fb_format |= GRPH_BANK_WIDTH(bankw);
- fb_format |= GRPH_BANK_HEIGHT(bankh);
- fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
+ fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
+ fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
+ fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
+ fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
+ fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
+ fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
+ fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- fb_format |= GRPH_PIPE_CONFIG(pipe_config);
+ fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
dce_v6_0_vga_enable(crtc, false);
@@ -1989,7 +2040,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
@@ -2057,14 +2108,13 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
- INTERLEAVE_EN);
+ DATA_FORMAT__INTERLEAVE_EN_MASK);
else
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -2074,15 +2124,15 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
- (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+ ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
@@ -2109,19 +2159,19 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
- (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
- ICON_DEGAMMA_MODE(0) |
- (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
- (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
- (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
+ ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
@@ -2216,8 +2266,6 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
-
}
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
@@ -2234,7 +2282,6 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
CUR_CONTROL__CURSOR_EN_MASK |
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
}
static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
@@ -2545,7 +2592,6 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
@@ -2618,7 +2664,7 @@ static void dce_v6_0_panic_flush(struct drm_plane *plane)
/* Disable DC tiling */
fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_ARRAY_MODE(0x7);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
}
@@ -2694,7 +2740,6 @@ static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- bool ret;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2738,8 +2783,7 @@ static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
- if (ret)
+ if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2865,14 +2909,35 @@ static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v6_0_is_idle(void *handle)
+static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+ u32 srbm_soft_reset = 0, tmp;
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (dce_v6_0_is_display_hung(adev))
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
return 0;
}
@@ -2889,22 +2954,22 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (crtc) {
case 0:
- reg_block = SI_CRTC0_REGISTER_OFFSET;
+ reg_block = CRTC0_REGISTER_OFFSET;
break;
case 1:
- reg_block = SI_CRTC1_REGISTER_OFFSET;
+ reg_block = CRTC1_REGISTER_OFFSET;
break;
case 2:
- reg_block = SI_CRTC2_REGISTER_OFFSET;
+ reg_block = CRTC2_REGISTER_OFFSET;
break;
case 3:
- reg_block = SI_CRTC3_REGISTER_OFFSET;
+ reg_block = CRTC3_REGISTER_OFFSET;
break;
case 4:
- reg_block = SI_CRTC4_REGISTER_OFFSET;
+ reg_block = CRTC4_REGISTER_OFFSET;
break;
case 5:
- reg_block = SI_CRTC5_REGISTER_OFFSET;
+ reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_DEBUG("invalid crtc %d\n", crtc);
@@ -2914,12 +2979,12 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask &= ~VBLANK_INT_MASK;
+ interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask |= VBLANK_INT_MASK;
+ interrupt_mask |= INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
@@ -2934,28 +2999,28 @@ static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned hpd,
enum amdgpu_interrupt_state state)
{
u32 dc_hpd_int_cntl;
- if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl |= DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
default:
break;
@@ -2964,7 +3029,7 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3024,7 +3089,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data[0]) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
- WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3035,7 +3100,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
- WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3049,7 +3114,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3100,7 +3165,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
@@ -3148,7 +3213,6 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
}
return 0;
-
}
static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
@@ -3178,12 +3242,10 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.set_powergating_state = dce_v6_0_set_powergating_state,
};
-static void
-dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3203,7 +3265,6 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
-
struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3243,7 +3304,6 @@ static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -3254,7 +3314,6 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3281,8 +3340,7 @@ static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
}
-static void
-dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -3294,8 +3352,7 @@ static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
}
-static void
-dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+static void dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
{
}
@@ -3366,7 +3423,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
amdgpu_encoder->devices |= supported_device;
return;
}
-
}
/* add a new one */
@@ -3473,17 +3529,17 @@ static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
- .set = dce_v6_0_set_crtc_interrupt_state,
+ .set = dce_v6_0_set_crtc_irq_state,
.process = dce_v6_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
- .set = dce_v6_0_set_pageflip_interrupt_state,
+ .set = dce_v6_0_set_pageflip_irq_state,
.process = dce_v6_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
- .set = dce_v6_0_set_hpd_interrupt_state,
+ .set = dce_v6_0_set_hpd_irq_state,
.process = dce_v6_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f2edc0fece5b..2ccd6aad8dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -271,7 +271,7 @@ static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -1096,8 +1096,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
@@ -1395,13 +1394,13 @@ static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
}
static const u32 pin_offsets[7] = {
- (0x1780 - 0x1780),
- (0x1786 - 0x1780),
- (0x178c - 0x1780),
- (0x1792 - 0x1780),
- (0x1798 - 0x1780),
- (0x179d - 0x1780),
- (0x17a4 - 0x1780),
+ AUD0_REGISTER_OFFSET,
+ AUD1_REGISTER_OFFSET,
+ AUD2_REGISTER_OFFSET,
+ AUD3_REGISTER_OFFSET,
+ AUD4_REGISTER_OFFSET,
+ AUD5_REGISTER_OFFSET,
+ AUD6_REGISTER_OFFSET,
};
static int dce_v8_0_audio_init(struct amdgpu_device *adev)
@@ -1443,17 +1442,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev)
static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
@@ -2887,7 +2881,7 @@ static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v8_0_is_idle(void *handle)
+static bool dce_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -3021,7 +3015,7 @@ static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
}
-static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3029,7 +3023,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
u32 dc_hpd_int_cntl;
if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ DRM_DEBUG("invalid hpd %d\n", type);
return 0;
}
@@ -3051,7 +3045,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3136,7 +3130,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3547,17 +3541,17 @@ static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
- .set = dce_v8_0_set_crtc_interrupt_state,
+ .set = dce_v8_0_set_crtc_irq_state,
.process = dce_v8_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
- .set = dce_v8_0_set_pageflip_interrupt_state,
+ .set = dce_v8_0_set_pageflip_irq_state,
.process = dce_v8_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
- .set = dce_v8_0_set_hpd_interrupt_state,
+ .set = dce_v8_0_set_hpd_irq_state,
.process = dce_v8_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 003522c2d902..d75b9940f248 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -40,11 +40,11 @@
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
#include "soc15.h"
-#include "soc15d.h"
#include "soc15_common.h"
#include "clearstate_gfx10.h"
#include "v10_structs.h"
#include "gfx_v10_0.h"
+#include "gfx_v10_0_cleaner_shader.h"
#include "nbio_v2_3.h"
/*
@@ -368,11 +368,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
@@ -421,7 +416,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
@@ -448,7 +452,32 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI),
+ /* gfx header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
@@ -3789,12 +3818,65 @@ static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
gfx_v10_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
}
+static void gfx_v10_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ unsigned i;
+ uint32_t tmp;
+
+ /* enter save mode */
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ } else if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, mmCP_VMID_RESET, tmp);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ } else {
+ dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
+ }
+
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ /* exit safe mode */
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+}
+
static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx10_kiq_set_resources,
.kiq_map_queues = gfx10_kiq_map_queues,
.kiq_unmap_queues = gfx10_kiq_unmap_queues,
.kiq_query_status = gfx10_kiq_query_status,
.kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs,
+ .kiq_reset_hw_queue = gfx_v10_0_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
@@ -3993,7 +4075,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned int index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
memset(&ib, 0, sizeof(ib));
@@ -4240,12 +4322,9 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -4253,39 +4332,15 @@ static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
@@ -4699,6 +4754,9 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+
+ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10):
@@ -4708,7 +4766,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 1, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -4723,7 +4781,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 2;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -4738,6 +4796,69 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_10_1_10_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_1_10_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 101 &&
+ adev->gfx.pfp_fw_version >= 158 &&
+ adev->gfx.mec_fw_version >= 151) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 64 &&
+ adev->gfx.pfp_fw_version >= 100 &&
+ adev->gfx.mec_fw_version >= 122) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(10, 3, 6):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 14 &&
+ adev->gfx.pfp_fw_version >= 17 &&
+ adev->gfx.mec_fw_version >= 24) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 4 &&
+ adev->gfx.pfp_fw_version >= 9 &&
+ adev->gfx.mec_fw_version >= 12) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -4798,7 +4919,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
/* set up the gfx ring */
for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
continue;
@@ -4830,11 +4951,16 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
+
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
if (r) {
@@ -6026,7 +6152,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -6104,7 +6230,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -6181,7 +6307,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6556,7 +6682,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -6763,22 +6889,9 @@ static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
int r, i;
- struct amdgpu_ring *ring;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v10_0_kgq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v10_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
if (r)
return r;
}
@@ -7085,55 +7198,24 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v10_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
+ gfx_v10_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
return 0;
}
static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
gfx_v10_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v10_0_kcq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v10_0_kcq_init_queue(&adev->gfx.compute_ring[i],
+ false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, 0);
}
static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
@@ -7449,6 +7531,8 @@ static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
@@ -7493,9 +7577,9 @@ static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v10_0_hw_init(ip_block);
}
-static bool gfx_v10_0_is_idle(void *handle)
+static bool gfx_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -7584,19 +7668,17 @@ static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v10_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -8413,9 +8495,9 @@ static int gfx_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v10_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v10_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
/* AMD_CG_SUPPORT_GFX_FGCG */
@@ -8966,21 +9048,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
-static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, mmSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -9442,7 +9509,9 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9452,15 +9521,14 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
u64 addr;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
- if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) {
spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM;
}
@@ -9480,37 +9548,37 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
0, 1, 0x20);
gfx_v10_0_ring_emit_reg_wait(kiq_ring,
SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
- kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("fail to resv mqd_obj\n");
+ r = gfx_v10_0_kgq_init_queue(ring, true);
+ if (r) {
+ DRM_ERROR("fail to init kgq\n");
return r;
}
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v10_0_kgq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
}
- amdgpu_bo_unreserve(ring->mqd_obj);
- if (r) {
- DRM_ERROR("fail to unresv mqd_obj\n");
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ if (r)
return r;
- }
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9518,12 +9586,11 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -9534,9 +9601,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
0, 0);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9559,20 +9625,9 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v10_0_kcq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v10_0_kcq_init_queue(ring, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "fail to init kcq\n");
return r;
}
@@ -9583,13 +9638,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -9621,9 +9675,14 @@ static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_10[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_10[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -9684,9 +9743,13 @@ static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block)
nv_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_10[reg]));
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_10[reg]));
}
index += reg_count;
}
@@ -9730,6 +9793,20 @@ static void gfx_v10_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
+static void gfx_v10_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_begin_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+}
+
+static void gfx_v10_0_ring_end_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_end_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -9801,12 +9878,11 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kgq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v10_0_ring_begin_use,
+ .end_use = gfx_v10_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -9842,12 +9918,11 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kcq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v10_0_ring_begin_use,
+ .end_use = gfx_v10_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -9877,6 +9952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
};
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
new file mode 100644
index 000000000000..f67569ccf9f6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Define the cleaner shader gfx_10_1_10 */
+static const u32 gfx_10_1_10_cleaner_shader_hex[] = {
+ 0xb0804004, 0xbf8a0000,
+ 0xbf068100, 0xbf840023,
+ 0xbe8203b8, 0xbefc0380,
+ 0x7e008480, 0x7e028480,
+ 0x7e048480, 0x7e068480,
+ 0x7e088480, 0x7e0a8480,
+ 0x7e0c8480, 0x7e0e8480,
+ 0xbefc0302, 0x80828802,
+ 0xbf84fff5, 0xbe8203ff,
+ 0x80000000, 0x87020102,
+ 0xbf840012, 0xbefe03c1,
+ 0xbeff03c1, 0xd7650001,
+ 0x0001007f, 0xd7660001,
+ 0x0002027e, 0x16020288,
+ 0xbe8203bf, 0xbefc03c1,
+ 0xd9382000, 0x00020201,
+ 0xd9386040, 0x00040401,
+ 0xd70f6a01, 0x000202ff,
+ 0x00000400, 0x80828102,
+ 0xbf84fff7, 0xbefc03ff,
+ 0x00000068, 0xbe803000,
+ 0xbe813000, 0xbe823000,
+ 0xbe833000, 0x80fc847c,
+ 0xbf84fffa, 0xbeea0480,
+ 0xbeec0480, 0xbeee0480,
+ 0xbef00480, 0xbef20480,
+ 0xbef40480, 0xbef60480,
+ 0xbef80480, 0xbefa0480,
+ 0xbf810000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
+
+/* Define the cleaner shader gfx_10_3_0 */
+static const u32 gfx_10_3_0_cleaner_shader_hex[] = {
+ 0xb0804004, 0xbf8a0000,
+ 0xbe8203b8, 0xbefc0380,
+ 0x7e008480, 0x7e028480,
+ 0x7e048480, 0x7e068480,
+ 0x7e088480, 0x7e0a8480,
+ 0x7e0c8480, 0x7e0e8480,
+ 0xbefc0302, 0x80828802,
+ 0xbf84fff5, 0xbe8203ff,
+ 0x80000000, 0x87020002,
+ 0xbf840012, 0xbefe03c1,
+ 0xbeff03c1, 0xd7650001,
+ 0x0001007f, 0xd7660001,
+ 0x0002027e, 0x16020288,
+ 0xbe8203bf, 0xbefc03c1,
+ 0xd9382000, 0x00020201,
+ 0xd9386040, 0x00040401,
+ 0xd70f6a01, 0x000202ff,
+ 0x00000400, 0x80828102,
+ 0xbf84fff7, 0xbefc03ff,
+ 0x00000068, 0xbe803080,
+ 0xbe813080, 0xbe823080,
+ 0xbe833080, 0x80fc847c,
+ 0xbf84fffa, 0xbeea0480,
+ 0xbeec0480, 0xbeee0480,
+ 0xbef00480, 0xbef20480,
+ 0xbef40480, 0xbef60480,
+ 0xbef80480, 0xbefa0480,
+ 0xbf810000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
new file mode 100644
index 000000000000..54f7ed9e2801
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 256 Dwords cleaner shader.
+
+// GFX10.1 : Clear SGPRs, VGPRs and LDS
+// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot
+// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD
+// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS)
+// It takes 2 workgroups to use all of LDS: one on each CU of the WGP
+// Each wave clears SGPRs 0 - 107
+// Each wave clears VGPRs 0 - 63
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+
+
+shader main
+ asic(GFX10.1)
+ type(CS)
+ wave_size(32)
+// Note: original source code from SQ team
+//
+// Create 32 waves in a threadgroup (CS waves)
+// Each allocates 64 VGPRs
+// The workgroup allocates all of LDS (64kbytes)
+//
+// Takes about 2500 clocks to run.
+// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks)
+//
+ S_BARRIER
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_0
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s0 == 1)
+
+ s_mov_b32 s2, 0x00000038 // Loop 64/8=8 times (loop unrolled for performance)
+ s_mov_b32 m0, 0
+ //
+ // CLEAR VGPRs
+ //
+label_0005:
+ v_movreld_b32 v0, 0
+ v_movreld_b32 v1, 0
+ v_movreld_b32 v2, 0
+ v_movreld_b32 v3, 0
+ v_movreld_b32 v4, 0
+ v_movreld_b32 v5, 0
+ v_movreld_b32 v6, 0
+ v_movreld_b32 v7, 0
+ s_mov_b32 m0, s2
+ s_sub_u32 s2, s2, 8
+ s_cbranch_scc0 label_0005
+ //
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+
+ //
+ // CLEAR SGPRs
+ //
+label_0023:
+ s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
+label_sgpr_loop:
+ s_movreld_b32 s0, s0
+ s_movreld_b32 s1, s0
+ s_movreld_b32 s2, s0
+ s_movreld_b32 s3, s0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc
+ s_mov_b64 vcc, 0 //clear vcc
+ //s_setreg_imm32_b32 hw_reg_shader_flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ //s_setreg_imm32_b32 hw_reg_shader_flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+
+ s_endpgm
+
+end
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm
new file mode 100644
index 000000000000..0e1c246166c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// GFX10.3 : Clear SGPRs, VGPRs and LDS
+// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot
+// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD
+// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS)
+// It takes 2 workgroups to use all of LDS: one on each CU of the WGP
+// Each wave clears SGPRs 0 - 107
+// Each wave clears VGPRs 0 - 63
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+
+
+shader main
+ asic(GFX10)
+ type(CS)
+ wave_size(32)
+// Note: original source code from SQ team
+
+//
+// Create 32 waves in a threadgroup (CS waves)
+// Each allocates 64 VGPRs
+// The workgroup allocates all of LDS (64kbytes)
+//
+// Takes about 2500 clocks to run.
+// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks)
+//
+ S_BARRIER
+ s_mov_b32 s2, 0x00000038 // Loop 64/8=8 times (loop unrolled for performance)
+ s_mov_b32 m0, 0
+ //
+ // CLEAR VGPRs
+ //
+label_0005:
+ v_movreld_b32 v0, 0
+ v_movreld_b32 v1, 0
+ v_movreld_b32 v2, 0
+ v_movreld_b32 v3, 0
+ v_movreld_b32 v4, 0
+ v_movreld_b32 v5, 0
+ v_movreld_b32 v6, 0
+ v_movreld_b32 v7, 0
+ s_mov_b32 m0, s2
+ s_sub_u32 s2, s2, 8
+ s_cbranch_scc0 label_0005
+ //
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+
+ //
+ // CLEAR SGPRs
+ //
+label_0023:
+ s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+
+ s_endpgm
+
+end
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index fe5d7cd814f7..8a2ee2de390f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -29,7 +29,6 @@
#include "amdgpu_gfx.h"
#include "amdgpu_psp.h"
#include "amdgpu_smu.h"
-#include "amdgpu_atomfirmware.h"
#include "imu_v11_0.h"
#include "soc21.h"
#include "nvd.h"
@@ -42,7 +41,6 @@
#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
#include "soc15.h"
-#include "soc15d.h"
#include "clearstate_gfx11.h"
#include "v11_structs.h"
#include "gfx_v11_0.h"
@@ -50,6 +48,8 @@
#include "gfx_v11_0_3.h"
#include "nbio_v4_3.h"
#include "mes_v11_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX11_NUM_GFX_RINGS 1
#define GFX11_MEC_HPD_SIZE 2048
@@ -64,10 +64,28 @@
#define regPC_CONFIG_CNTL_1 0x194d
#define regPC_CONFIG_CNTL_1_BASE_IDX 1
+#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01
+#define regCP_GFX_HQD_CNTL_DEFAULT 0x00a00000
+#define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000
+
+#define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000
+#define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501
+#define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000
+
MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
@@ -98,6 +116,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin");
static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
@@ -158,9 +180,13 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -211,7 +237,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
@@ -240,7 +275,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
@@ -551,7 +603,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -561,33 +613,18 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -614,12 +651,10 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -725,6 +760,10 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/gc_11_0_0_rlc_1.bin");
+ else if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
@@ -811,12 +850,9 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -824,39 +860,15 @@ static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
@@ -1037,14 +1049,21 @@ static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
#define MQD_FWWORKAREA_SIZE 484
#define MQD_FWWORKAREA_ALIGNMENT 256
-static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
struct amdgpu_gfx_shadow_info *shadow_info)
{
- if (adev->gfx.cp_gfx_shadow) {
- shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
- shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
- shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
- shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
return 0;
} else {
memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
@@ -1087,6 +1106,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1116,6 +1136,10 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
+ if (adev->gfx.disable_kq) {
+ ring->no_scheduler = true;
+ ring->no_user_submission = true;
+ }
if (!ring_id)
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
@@ -1548,29 +1572,26 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- int i, j, k, r, ring_id = 0;
+ int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+
+ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
- adev->gfx.mec.num_mec = 2;
- adev->gfx.mec.num_pipe_per_mec = 4;
- adev->gfx.mec.num_queue_per_pipe = 4;
- break;
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1589,6 +1610,35 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2420 &&
+ adev->gfx.pfp_fw_version >= 2580 &&
+ adev->gfx.mec_fw_version >= 2650 &&
+ adev->mes.fw_version[0] >= 120) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ /* add firmware version checks here */
+ if (0 && !adev->gfx.disable_uq) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 2280 &&
@@ -1603,6 +1653,63 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.pfp_fw_version >= 102 &&
+ adev->gfx.mec_fw_version >= 66 &&
+ adev->mes.fw_version[0] >= 128) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.mec_fw_version >= 26 &&
+ adev->mes.fw_version[0] >= 114) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(11, 5, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 12 &&
+ adev->gfx.pfp_fw_version >= 15 &&
+ adev->gfx.mec_fw_version >= 15) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(11, 5, 3):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 7 &&
+ adev->gfx.pfp_fw_version >= 8 &&
+ adev->gfx.mec_fw_version >= 8) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -1664,37 +1771,42 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v11_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ ring_id = 0;
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v11_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
- k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
+ continue;
- r = gfx_v11_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v11_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -1708,12 +1820,19 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 2280) &&
- (adev->gfx.mec_fw_version >= 2410)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2410) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
break;
default:
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
break;
}
@@ -1891,6 +2010,7 @@ static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1908,9 +2028,11 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
active_rb_bitmap &= global_active_rb_bitmap;
@@ -2318,7 +2440,7 @@ static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
if (version_minor == 3)
gfx_v11_0_load_rlcp_rlcv_microcode(adev);
}
-
+
return 0;
}
@@ -2388,7 +2510,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2432,7 +2554,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2477,7 +2599,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -2923,7 +3045,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
IP_VERSION(11, 0, 4) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3))
bootload_status = RREG32_SOC15(GC, 0,
regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
else
@@ -3112,7 +3235,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -3330,7 +3453,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -3765,7 +3888,7 @@ static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
memcpy(fw, fw_data, fw_size);
-
+
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
@@ -3955,7 +4078,7 @@ static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
priority = 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
+ tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
mqd->cp_gfx_hqd_queue_priority = tmp;
}
@@ -3977,14 +4100,14 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set up mqd control */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
+ tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
mqd->cp_gfx_mqd_control = tmp;
/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
+ tmp = regCP_GFX_HQD_VMID_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
mqd->cp_gfx_hqd_vmid = 0;
@@ -3992,7 +4115,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
/* set up time quantum */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
+ tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
mqd->cp_gfx_hqd_quantum = tmp;
@@ -4014,16 +4137,20 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
+ tmp = regCP_GFX_HQD_CNTL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
- tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
+ tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -4035,11 +4162,21 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_rb_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
+ mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr);
+ mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4074,22 +4211,9 @@ static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
int r, i;
- struct amdgpu_ring *ring;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v11_0_kgq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
if (r)
return r;
}
@@ -4121,14 +4245,14 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
+ tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -4157,7 +4281,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set MQD vmid to 0 */
- tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
+ tmp = regCP_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
@@ -4167,7 +4291,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
+ tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
@@ -4175,8 +4299,12 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -4193,7 +4321,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = 0;
/* enable the doorbell if requested */
if (prop->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -4208,17 +4336,17 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
+ mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
+ tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
+ tmp = regCP_HQD_IB_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
@@ -4228,6 +4356,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4411,57 +4543,24 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v11_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
- ring->sched.ready = true;
+ gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
return 0;
}
static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
if (!amdgpu_async_gfx_ring)
gfx_v11_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v11_0_kcq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, 0);
}
static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
@@ -4514,11 +4613,23 @@ static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
return r;
}
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ if (adev->gfx.disable_kq) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ /* we don't want to set ring->ready */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+ }
+ if (amdgpu_async_gfx_ring)
+ amdgpu_gfx_disable_kgq(adev, 0);
+ } else {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
@@ -4546,10 +4657,9 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
@@ -4727,16 +4837,63 @@ static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v11_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
- if (amdgpu_async_gfx_ring) {
+ if (amdgpu_async_gfx_ring &&
+ !adev->gfx.disable_kq) {
if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
@@ -4775,9 +4932,9 @@ static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v11_0_hw_init(ip_block);
}
-static bool gfx_v11_0_is_idle(void *handle)
+static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -5062,11 +5219,36 @@ static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ /* We need one GFX ring temporarily to set up
+ * the clear state.
+ */
+ adev->gfx.num_gfx_rings = 1;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v11_0_set_kiq_pm4_funcs(adev);
gfx_v11_0_set_ring_funcs(adev);
@@ -5097,6 +5279,11 @@ static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
if (r)
return r;
+
+ r = gfx_v11_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -5445,6 +5632,7 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
break;
default:
@@ -5482,6 +5670,7 @@ static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
@@ -5515,6 +5704,7 @@ static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -5525,9 +5715,9 @@ static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
/* AMD_CG_SUPPORT_GFX_MGCG */
@@ -5674,8 +5864,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
-
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
@@ -5686,15 +5874,11 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (vmid)
+ if (vmid && !ring->adev->gfx.rs64_enable)
gfx_v11_0_ring_emit_de_meta(ring,
- (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
+ !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
}
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -5714,10 +5898,6 @@ static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
/* Currently, there is a high possibility to get wave ID mismatch
* between ME and GDS, leading to a hw deadlock, because ME generates
* different wave IDs than the GDS expects. This situation happens
@@ -5775,8 +5955,7 @@ static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -5804,10 +5983,7 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -6036,28 +6212,13 @@ static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v10_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@@ -6144,21 +6305,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -6296,25 +6442,23 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -6481,27 +6625,29 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -6609,32 +6755,90 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
-static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+
+static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
- if (r)
- return r;
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
}
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v11_0_kgq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* FIXME: Sometimes driver can't cache the ME firmware start PC correctly,
+ * so the pipe reset status relies on the later gfx ring test result.
+ */
+ return 0;
+}
+
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r) {
+
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v11_reset_gfx_pipe(ring);
+ if (r)
+ return r;
}
- amdgpu_bo_unreserve(ring->mqd_obj);
+
+ r = gfx_v11_0_kgq_init_queue(ring, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "failed to init kgq\n");
return r;
}
@@ -6644,37 +6848,159 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
-static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec1 fw pc: CP_MEC1_INSTR_PNTR */
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR));
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe
+ * reset status relies on the compute ring test result.
+ */
+ return 0;
+}
+
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r = 0;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v11_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v11_0_kcq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v11_0_kcq_init_queue(ring, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "fail to init kcq\n");
return r;
}
r = amdgpu_mes_map_legacy_queue(adev, ring);
@@ -6683,7 +7009,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -6715,9 +7041,14 @@ static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_11[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_11[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -6777,9 +7108,16 @@ static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
/* ME0 is for GFX so start from 1 for CP */
soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_11[reg]));
+ if (i &&
+ gc_cp_reg_list_11[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0,
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_11[reg]));
}
index += reg_count;
}
@@ -6823,6 +7161,20 @@ static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
+static void gfx_v11_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_begin_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+}
+
+static void gfx_v11_0_ring_end_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_end_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
+}
+
static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.name = "gfx_v11_0",
.early_init = gfx_v11_0_early_init,
@@ -6893,12 +7245,11 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kgq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v11_0_ring_begin_use,
+ .end_use = gfx_v11_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
@@ -6935,12 +7286,11 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kcq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v11_0_ring_begin_use,
+ .end_use = gfx_v11_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
@@ -6970,6 +7320,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
};
static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 40c47a2dd060..d01d2712cf57 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -29,7 +29,6 @@
#include "amdgpu_gfx.h"
#include "amdgpu_psp.h"
#include "amdgpu_smu.h"
-#include "amdgpu_atomfirmware.h"
#include "imu_v12_0.h"
#include "soc24.h"
#include "nvd.h"
@@ -37,21 +36,40 @@
#include "gc/gc_12_0_0_offset.h"
#include "gc/gc_12_0_0_sh_mask.h"
#include "soc24_enum.h"
-#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
#include "soc15.h"
-#include "soc15d.h"
#include "clearstate_gfx12.h"
#include "v12_structs.h"
#include "gfx_v12_0.h"
#include "nbif_v6_3_1.h"
#include "mes_v12_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX12_NUM_GFX_RINGS 1
#define GFX12_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01
+#define regCP_GFX_HQD_CNTL_DEFAULT 0x00f00000
+#define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000
+
+#define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000
+#define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501
+#define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000
+
+
MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin");
@@ -61,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin");
static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
@@ -117,11 +136,14 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0),
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR),
-
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -170,7 +192,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
@@ -199,7 +230,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = {
@@ -449,7 +497,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -459,33 +507,18 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -512,12 +545,10 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -556,7 +587,7 @@ out:
static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -583,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
if (!amdgpu_sriov_vf(adev)) {
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
- AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_rlc.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -649,8 +685,7 @@ static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0, clustercount = 0, i;
const struct cs_section_def *sect = NULL;
@@ -865,6 +900,34 @@ static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev,
soc24_grbm_select(adev, me, pipe, q, vm);
}
+/* all sizes are in bytes */
+#define MQD_SHADOW_BASE_SIZE 73728
+#define MQD_SHADOW_BASE_ALIGNMENT 256
+#define MQD_FWWORKAREA_SIZE 484
+#define MQD_FWWORKAREA_ALIGNMENT 256
+
+static void gfx_v12_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info)
+{
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v12_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
+ return 0;
+ }
+
+ memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
+ return -EINVAL;
+}
+
static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v12_0_select_se_sh,
@@ -873,6 +936,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v12_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
+ .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
};
static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1330,14 +1394,17 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
unsigned num_compute_rings;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+
+ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
- adev->gfx.mec.num_mec = 2;
+ adev->gfx.me.num_queue_per_pipe = 8;
+ adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 2;
adev->gfx.mec.num_queue_per_pipe = 4;
break;
@@ -1352,41 +1419,67 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2780 &&
+ adev->gfx.pfp_fw_version >= 2840 &&
+ adev->gfx.mec_fw_version >= 3050 &&
+ adev->mes.fw_version[0] >= 123) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if (adev->gfx.me_fw_version >= 2480 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2680 &&
+ adev->mes.fw_version[0] >= 100)
+ adev->gfx.enable_cleaner_shader = true;
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
}
- /* recalculate compute rings to use based on hardware configuration */
- num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
- adev->gfx.mec.num_queue_per_pipe) / 2;
- adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
- num_compute_rings);
+ if (adev->gfx.num_compute_rings) {
+ /* recalculate compute rings to use based on hardware configuration */
+ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe) / 2;
+ adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
+ num_compute_rings);
+ }
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
+ GFX_12_0_0__SRCID__CP_EOP_INTERRUPT,
&adev->gfx.eop_irq);
if (r)
return r;
/* Bad opcode Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR,
&adev->gfx.bad_op_irq);
if (r)
return r;
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
+ GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
+ GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -1407,46 +1500,63 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v12_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v12_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev,
- 0, i, k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev,
+ 0, i, k, j))
+ continue;
- r = gfx_v12_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v12_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if ((adev->gfx.me_fw_version >= 2660) &&
+ (adev->gfx.mec_fw_version >= 2920) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
+ break;
+ default:
+ break;
+ }
if (!adev->enable_mes_kiq) {
r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0);
@@ -1615,6 +1725,7 @@ static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1632,12 +1743,14 @@ static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -2287,7 +2400,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -2418,7 +2531,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
(void **)&adev->gfx.me.me_fw_data_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
- gfx_v12_0_pfp_fini(adev);
+ gfx_v12_0_me_fini(adev);
return r;
}
@@ -2431,7 +2544,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -2600,7 +2713,6 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
- u32 i;
/* Set the write pointer delay */
WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
@@ -2655,12 +2767,6 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v12_0_cp_gfx_start(adev);
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- ring->sched.ready = true;
- }
-
return 0;
}
@@ -2872,25 +2978,25 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set up mqd control */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
+ tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
mqd->cp_gfx_mqd_control = tmp;
/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
+ tmp = regCP_GFX_HQD_VMID_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
mqd->cp_gfx_hqd_vmid = 0;
/* set up default queue priority level
* 0x0 = low priority, 0x1 = high priority */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
+ tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
mqd->cp_gfx_hqd_queue_priority = tmp;
/* set up time quantum */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
+ tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
mqd->cp_gfx_hqd_quantum = tmp;
@@ -2912,16 +3018,20 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
+ tmp = regCP_GFX_HQD_CNTL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
- tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
+ tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -2933,11 +3043,19 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_rb_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
+ mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -2971,41 +3089,19 @@ static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
- int r, i;
- struct amdgpu_ring *ring;
+ int i, r;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v12_0_kgq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v12_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
if (r)
- goto done;
+ return r;
}
r = amdgpu_gfx_enable_kgq(adev, 0);
if (r)
- goto done;
-
- r = gfx_v12_0_cp_gfx_start(adev);
- if (r)
- goto done;
+ return r;
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- ring->sched.ready = true;
- }
-done:
- return r;
+ return gfx_v12_0_cp_gfx_start(adev);
}
static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
@@ -3028,14 +3124,14 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
+ tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3064,7 +3160,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set MQD vmid to 0 */
- tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
+ tmp = regCP_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
@@ -3074,15 +3170,19 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
+ tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -3099,7 +3199,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = 0;
/* enable the doorbell if requested */
if (prop->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -3114,17 +3214,17 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
+ mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
+ tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
+ tmp = regCP_HQD_IB_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
@@ -3134,6 +3234,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3318,57 +3422,25 @@ static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v12_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
- ring->sched.ready = true;
+ gfx_v12_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
+ adev->gfx.kiq[0].ring.sched.ready = true;
return 0;
}
static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
if (!amdgpu_async_gfx_ring)
gfx_v12_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v12_0_kcq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v12_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, 0);
}
static int gfx_v12_0_cp_resume(struct amdgpu_device *adev)
@@ -3450,10 +3522,9 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
@@ -3624,14 +3695,60 @@ static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v12_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
uint32_t tmp;
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v12_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -3674,9 +3791,9 @@ static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v12_0_hw_init(ip_block);
}
-static bool gfx_v12_0_is_idle(void *handle)
+static bool gfx_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -3720,11 +3837,33 @@ static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ adev->gfx.num_gfx_rings = 0;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v12_0_set_kiq_pm4_funcs(adev);
gfx_v12_0_set_ring_funcs(adev);
@@ -3755,6 +3894,10 @@ static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = gfx_v12_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -4002,17 +4145,6 @@ static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
-
- data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
- data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
-
- /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
- if (adev->sdma.num_instances > 1) {
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
- }
}
}
@@ -4139,9 +4271,9 @@ static int gfx_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v12_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
/* AMD_CG_SUPPORT_GFX_MGCG */
@@ -4205,45 +4337,17 @@ static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always being used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- if (ring->use_doorbell) {
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
- lower_32_bits(ring->wptr));
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
- upper_32_bits(ring->wptr));
- }
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
+ lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
+ upper_32_bits(ring->wptr));
}
}
@@ -4268,42 +4372,14 @@ static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell) {
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- /* XXX check if swapping is necessary on BE */
- if (ring->use_doorbell) {
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- BUG(); /* only DOORBELL method supported on gfx12 now */
- }
+ BUG(); /* only DOORBELL method supported on gfx12 now */
}
}
@@ -4344,16 +4420,10 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
-
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4373,10 +4443,6 @@ static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4416,8 +4482,7 @@ static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -4445,10 +4510,7 @@ static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -4639,21 +4701,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -4782,25 +4829,23 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -4967,27 +5012,29 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -5193,34 +5240,89 @@ static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block)
amdgpu_gfx_off_ctrl(adev, true);
}
-static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static bool gfx_v12_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Sometimes the ME start pc counter can't cache correctly, so the
+ * PC check only as a reference and pipe reset result rely on the
+ * later ring test.
+ */
+ return 0;
+}
+
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
- dev_err(adev->dev, "reset via MES failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v12_reset_gfx_pipe(ring);
+ if (r)
+ return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v12_0_kgq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v12_0_kgq_init_queue(ring, true);
if (r) {
- DRM_ERROR("fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "failed to init kgq\n");
return r;
}
@@ -5230,37 +5332,112 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
-static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r = 0;
+
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ /* Doesn't find the F32 MEC instruction pointer register, and suppose
+ * the driver won't run into the F32 mode.
+ */
+ }
+
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Need the ring test to verify the pipe reset result.*/
+ return 0;
+}
+
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v12_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v12_0_kcq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v12_0_kcq_init_queue(ring, true);
if (r) {
- DRM_ERROR("fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "failed to init kcq\n");
return r;
}
r = amdgpu_mes_map_legacy_queue(adev, ring);
@@ -5269,7 +5446,21 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
+static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_begin_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+}
+
+static void gfx_v12_0_ring_end_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_end_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
}
static const struct amd_ip_funcs gfx_v12_0_ip_funcs = {
@@ -5333,12 +5524,11 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kgq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v12_0_ring_begin_use,
+ .end_use = gfx_v12_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
@@ -5372,12 +5562,11 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kcq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v12_0_ring_begin_use,
+ .end_use = gfx_v12_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
@@ -5407,6 +5596,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
};
static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index f26e2cdec07a..80565392313f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -28,19 +28,33 @@
#include "amdgpu_gfx.h"
#include "amdgpu_ucode.h"
#include "clearstate_si.h"
+#include "si.h"
+#include "sid.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
#include "gca/gfx_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
-#include "gca/gfx_7_2_enum.h"
+
#include "si_enums.h"
-#include "si.h"
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -72,7 +86,7 @@ MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
@@ -1721,10 +1735,14 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
gfx_v6_0_get_cu_info(adev);
gfx_v6_0_config_init(adev);
- WREG32(mmCP_QUEUE_THRESHOLDS, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
- (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
- WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
- (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
+ WREG32(mmCP_QUEUE_THRESHOLDS,
+ ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
+ (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
+
+ /* set HW defaults for 3D engine */
+ WREG32(mmCP_MEQ_THRESHOLDS,
+ (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
sx_debug_1 = RREG32(mmSX_DEBUG_1);
WREG32(mmSX_DEBUG_1, sx_debug_1);
@@ -2336,7 +2354,7 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
@@ -2837,47 +2855,23 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
@@ -3108,6 +3102,11 @@ static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
@@ -3167,9 +3166,9 @@ static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v6_0_hw_init(ip_block);
}
-static bool gfx_v6_0_is_idle(void *handle)
+static bool gfx_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
return false;
@@ -3183,7 +3182,7 @@ static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v6_0_is_idle(adev))
+ if (gfx_v6_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 84745b2453ab..2b7aba22ecc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -55,6 +55,9 @@
#define GFX7_NUM_GFX_RINGS 1
#define GFX7_MEC_HPD_SIZE 2048
+#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
+
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
@@ -880,7 +883,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] = {
};
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@@ -3879,70 +3882,24 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KAVERI:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_HAWAII:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
@@ -4442,6 +4399,11 @@ static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
gfx_v7_0_gpu_early_init(adev);
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
@@ -4515,9 +4477,9 @@ static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v7_0_hw_init(ip_block);
}
-static bool gfx_v7_0_is_idle(void *handle)
+static bool gfx_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
return false;
@@ -4926,76 +4888,6 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
-static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
-static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -5045,7 +4937,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync,
- .reset = gfx_v7_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index af73f85527b7..1c87375e1dd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1220,51 +1220,24 @@ out:
return err;
}
-static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
- PACKET3_SET_CONTEXT_REG_START);
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
@@ -2050,6 +2023,11 @@ static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return 0;
}
@@ -4666,6 +4644,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -4683,60 +4662,25 @@ static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v8_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
+ gfx_v8_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
return 0;
}
static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
gfx_v8_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
- if (!r) {
- r = gfx_v8_0_kcq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v8_0_kcq_init_queue(&adev->gfx.compute_ring[i]);
if (r)
- goto done;
+ return r;
}
gfx_v8_0_set_mec_doorbell_range(adev);
- r = gfx_v8_0_kiq_kcq_enable(adev);
- if (r)
- goto done;
-
-done:
- return r;
+ return gfx_v8_0_kiq_kcq_enable(adev);
}
static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
@@ -4851,9 +4795,9 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
return r;
}
-static bool gfx_v8_0_is_idle(void *handle)
+static bool gfx_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
|| RREG32(mmGRBM_STATUS2) != 0x8)
@@ -4892,7 +4836,7 @@ static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v8_0_is_idle(adev))
+ if (gfx_v8_0_is_idle(ip_block))
return 0;
udelay(1);
@@ -5452,9 +5396,9 @@ static int gfx_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v8_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v8_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -5639,8 +5583,6 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
@@ -5734,8 +5676,6 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5745,8 +5685,6 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
@@ -5827,12 +5765,12 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
}
gfx_v8_0_wait_for_rlc_serdes(adev);
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
* === MGCG + MGLS + TS(CG/LS) ===
@@ -5846,6 +5784,8 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
}
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -6404,34 +6344,6 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
amdgpu_ring_write(ring, val);
}
-static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6908,48 +6820,6 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
-static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -7015,7 +6885,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
- .reset = gfx_v8_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -7075,6 +6944,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 4b5006dc3d34..0148d7ff34d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -225,17 +225,36 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
- SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3)
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
+ /* packet headers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
};
static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
@@ -277,6 +296,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
};
enum ta_ras_gfx_subblock {
@@ -1269,6 +1296,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.mec_fw_write_wait = false;
if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) &&
((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
@@ -1620,45 +1648,18 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
@@ -2233,6 +2234,25 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 3, 0):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 167 &&
+ adev->gfx.pfp_fw_version >= 196 &&
+ adev->gfx.mec_fw_version >= 474) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(9, 4, 2):
adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
@@ -2389,6 +2409,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset)
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
@@ -2627,6 +2649,9 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
!READ_ONCE(adev->barrier_has_auto_waitcnt));
WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
break;
+ case IP_VERSION(9, 4, 2):
+ gfx_v9_4_2_init_sq(adev);
+ break;
default:
break;
}
@@ -2637,7 +2662,10 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ if (!amdgpu_sriov_vf(adev) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) {
+ WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ }
gfx_v9_0_tiling_mode_table_init(adev);
@@ -3887,55 +3915,23 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[0].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v9_0_kiq_init_queue(ring);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
+ gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
return 0;
}
static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ int i, r;
gfx_v9_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v9_0_kcq_init_queue(ring, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
if (r)
- goto done;
+ return r;
}
- r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, 0);
}
static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
@@ -4042,7 +4038,8 @@ static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) &&
+ !amdgpu_sriov_vf(adev))
gfx_v9_4_2_set_power_brake_sequence(adev);
return r;
@@ -4110,9 +4107,9 @@ static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v9_0_hw_init(ip_block);
}
-static bool gfx_v9_0_is_idle(void *handle)
+static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -4127,7 +4124,7 @@ static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_0_is_idle(adev))
+ if (gfx_v9_0_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -4177,19 +4174,17 @@ static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -4964,8 +4959,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5030,8 +5023,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
@@ -5042,8 +5033,6 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (!adev->gfx.num_gfx_rings)
return;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* Enable 3D CGCG/CGLS */
if (enable) {
/* write cmd to clear cgcg/cgls ov */
@@ -5085,8 +5074,6 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5094,8 +5081,6 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
@@ -5137,13 +5122,12 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS
* === MGCG + MGLS ===
@@ -5163,6 +5147,7 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === MGCG + MGLS === */
gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
}
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5251,7 +5236,7 @@ static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 3, 0):
if (!enable)
- amdgpu_gfx_off_ctrl(adev, false);
+ amdgpu_gfx_off_ctrl_immediate(adev, false);
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
@@ -5273,10 +5258,10 @@ static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
if (enable)
- amdgpu_gfx_off_ctrl(adev, true);
+ amdgpu_gfx_off_ctrl_immediate(adev, true);
break;
case IP_VERSION(9, 2, 1):
- amdgpu_gfx_off_ctrl(adev, enable);
+ amdgpu_gfx_off_ctrl_immediate(adev, enable);
break;
default:
break;
@@ -5311,9 +5296,9 @@ static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v9_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -5478,16 +5463,8 @@ static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_ce_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
@@ -5510,16 +5487,8 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_de_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
IB_COMPLETION_STATUS_PREEMPTED;
@@ -5709,19 +5678,9 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- }
+ offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@@ -5807,28 +5766,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bo
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
if (usegds) {
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
@@ -7229,53 +7173,9 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v9_0_ring_emit_wreg(kiq_ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
- return -ENOMEM;
- gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v9_0_ring_emit_reg_wait(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
- gfx_v9_0_ring_emit_wreg(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -7283,12 +7183,11 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -7325,20 +7224,9 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)){
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v9_0_kcq_init_queue(ring, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
+ r = gfx_v9_0_kcq_init_queue(ring, true);
if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ dev_err(adev->dev, "fail to init kcq\n");
return r;
}
spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -7349,13 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
DRM_ERROR("fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -7387,9 +7275,14 @@ static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -7426,9 +7319,13 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_9[reg]));
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_9[reg]));
}
index += reg_count;
}
@@ -7442,11 +7339,49 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
/* Emit the cleaner shader */
- amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ else
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));
+
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
+static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ip_block *gfx_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+
+ /* Raven and PCO APUs seem to have stability issues
+ * with compute and gfxoff and gfx pg. Disable gfx pg during
+ * submission and allow again afterwards.
+ */
+ if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
+ gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_UNGATE);
+}
+
+static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ip_block *gfx_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+
+ /* Raven and PCO APUs seem to have stability issues
+ * with compute and gfxoff and gfx pg. Disable gfx pg during
+ * submission and allow again afterwards.
+ */
+ if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
+ gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_GATE);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -7518,7 +7453,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
- .reset = gfx_v9_0_reset_kgq,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
@@ -7623,8 +7557,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
.reset = gfx_v9_0_reset_kcq,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v9_0_ring_begin_use_compute,
+ .end_use = gfx_v9_0_ring_end_use_compute,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -7652,6 +7586,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index f53b379d8971..6028afd81690 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -27,7 +27,6 @@
#include "amdgpu_gfx.h"
#include "soc15.h"
#include "soc15d.h"
-#include "amdgpu_atomfirmware.h"
#include "amdgpu_pm.h"
#include "gc/gc_9_4_1_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index d81449f9d822..8058ea91ecaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -748,6 +748,18 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
}
}
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ if (adev->gfx.mec_fw_version >= 98) {
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ data = RREG32_SOC15(GC, 0, regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, 0, regSQ_CONFIG1, data);
+ }
+}
+
void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid,
uint32_t last_vmid)
@@ -1547,7 +1559,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
{
uint32_t bank, way, mem;
static const char * const vml2_way_str[] = { "BIGK", "4K" };
- static const char * const utcl2_rounter_str[] = { "VMC", "APT" };
+ static const char * const utcl2_router_str[] = { "VMC", "APT" };
mem = instance % blk->num_mem_blocks;
way = (instance / blk->num_mem_blocks) % blk->num_ways;
@@ -1568,7 +1580,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
dev_info(
adev->dev,
"GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
- bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
+ bank, utcl2_router_str[mem], sec_cnt, ded_cnt);
break;
case ATC_L2_CACHE_2M:
dev_info(
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
index 7584624b641c..a603724c1dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -28,6 +28,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid, uint32_t last_vmid);
void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
uint32_t die_id);
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev);
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index a5bdcaf7a081..cbb74ffc4792 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -105,9 +105,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
@@ -154,6 +151,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
@@ -349,18 +354,7 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
GOLDEN_GB_ADDR_CONFIG);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
- } else {
- /* Golden settings applied by driver for ASIC with rev_id 0 */
- if (adev->rev_id == 0) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
- REDUCE_FIFO_DEPTH_BY_2, 2);
- } else {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
- SPARE, 0x1);
- }
- }
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
}
}
@@ -563,27 +557,21 @@ out:
return err;
}
-static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
-{
- return true;
-}
-
-static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
-{
- if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-}
-
static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
int err;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sjt_mec.bin", chip_name);
- else
+
+ if (err)
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mec.bin", chip_name);
+ } else
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
@@ -595,8 +583,6 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
- gfx_v9_4_3_check_if_need_gfxoff(adev);
-
out:
if (err)
amdgpu_ucode_release(&adev->gfx.mec_fw);
@@ -886,12 +872,13 @@ static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
switch (type) {
case ACA_SMU_TYPE_UE:
- ret = aca_error_cache_log_bank_error(handle, &info,
- ACA_ERROR_TYPE_UE, 1ULL);
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info,
- ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
return -EINVAL;
@@ -932,28 +919,15 @@ static const struct aca_info gfx_v9_4_3_aca_info = {
static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
{
- u32 gb_addr_config;
-
adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
adev->gfx.ras = &gfx_v9_4_3_ras;
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- case IP_VERSION(9, 5, 0):
- adev->gfx.config.max_hw_contexts = 8;
- adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
- adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
- adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
- adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
- gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
- break;
- default:
- BUG();
- break;
- }
-
- adev->gfx.config.gb_addr_config = gb_addr_config;
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG;
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
REG_GET_FIELD(
@@ -1174,7 +1148,17 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
- if (adev->gfx.mec_fw_version >= 155) {
+ if ((adev->gfx.mec_fw_version >= 155) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
+ }
+ break;
+ case IP_VERSION(9, 5, 0):
+ if ((adev->gfx.mec_fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
@@ -1293,6 +1277,22 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
}
}
+/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
+ * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
+ * bit in SET_RESOURCES
+ */
+static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
+{
+ uint32_t data;
+
+ if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ return;
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
+}
+
static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
int xcc_id)
{
@@ -1337,6 +1337,7 @@ static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
+ gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
}
static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
@@ -1349,6 +1350,22 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
adev->gfx.config.db_debug2 =
RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ /* ToDo: GC 9.4.4 */
+ case IP_VERSION(9, 4, 3):
+ if (adev->gfx.mec_fw_version >= 184 &&
+ (amdgpu_sriov_reg_access_sq_config(adev) ||
+ !amdgpu_sriov_vf(adev)))
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ case IP_VERSION(9, 5, 0):
+ if (adev->gfx.mec_fw_version >= 23)
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_constants_init(adev, i);
}
@@ -1364,10 +1381,8 @@ static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
{
/*
* Rlc save restore list is workable since v2_1.
- * And it's needed by gfxoff feature.
*/
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
+ gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
}
static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
@@ -1852,7 +1867,7 @@ static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_MODE, 1);
} else {
@@ -2139,7 +2154,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
return 0;
}
-static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
+static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id,
+ bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -2173,8 +2189,6 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, b
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
}
-
- return 0;
}
static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
@@ -2200,55 +2214,25 @@ static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_
static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_ring *ring;
- int r;
-
- ring = &adev->gfx.kiq[xcc_id].ring;
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- return r;
-
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
-
- gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
+ gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id);
return 0;
}
static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_ring *ring = NULL;
- int r = 0, i;
+ struct amdgpu_ring *ring;
+ int i;
gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto done;
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
- if (r)
- goto done;
+ ring = &adev->gfx.compute_ring[i + xcc_id *
+ adev->gfx.num_compute_rings];
+
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
}
- r = amdgpu_gfx_enable_kcq(adev, xcc_id);
-done:
- return r;
+ return amdgpu_gfx_enable_kcq(adev, xcc_id);
}
static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
@@ -2307,7 +2291,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
} else {
- if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ if (adev->in_suspend)
+ amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
+ else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE) ==
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
r = amdgpu_xcp_switch_partition_mode(
@@ -2410,9 +2396,9 @@ static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
return gfx_v9_4_3_hw_init(ip_block);
}
-static bool gfx_v9_4_3_is_idle(void *handle)
+static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
@@ -2430,7 +2416,7 @@ static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_4_3_is_idle(adev))
+ if (gfx_v9_4_3_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -2476,19 +2462,17 @@ static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -2790,22 +2774,16 @@ static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- for (i = 0; i < num_xcc; i++)
- gfx_v9_4_3_xcc_update_gfx_clock_gating(
- adev, state == AMD_CG_STATE_GATE, i);
- break;
- default:
- break;
- }
+ for (i = 0; i < num_xcc; i++)
+ gfx_v9_4_3_xcc_update_gfx_clock_gating(
+ adev, state == AMD_CG_STATE_GATE, i);
+
return 0;
}
-static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -3514,9 +3492,7 @@ static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
{
- /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->gfx.mec_fw_version >= 0x0000009b)
+ if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return true;
else
dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
@@ -3579,20 +3555,21 @@ static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
}
static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE;
unsigned long flags;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -3618,30 +3595,19 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
pipe_reset:
- if(r) {
+ if (r) {
+ if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
+ return -EOPNOTSUPP;
r = gfx_v9_4_3_reset_hw_pipe(ring);
+ reset_mode = AMDGPU_RESET_TYPE_PER_PIPE;
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
r ? "failed" : "successfully");
if (r)
return r;
}
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0)){
- dev_err(adev->dev, "fail to resv mqd_obj\n");
- return r;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (!r) {
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
- amdgpu_bo_kunmap(ring->mqd_obj);
- ring->mqd_ptr = NULL;
- }
- amdgpu_bo_unreserve(ring->mqd_obj);
- if (r) {
- dev_err(adev->dev, "fail to unresv mqd_obj\n");
- return r;
- }
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
if (r) {
@@ -3650,14 +3616,24 @@ pipe_reset:
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE)
+ goto pipe_reset;
+
dev_err(adev->dev, "fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) {
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ goto pipe_reset;
+ }
+
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
enum amdgpu_gfx_cp_ras_mem_id {
@@ -4636,12 +4612,21 @@ static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_pri
"\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
xcc_id, i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p,
- "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9_4_3[reg].reg_name,
- adev->gfx.ip_dump_compute_queues
- [xcc_offset + inst_offset +
- reg]);
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
+ else
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9_4_3[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
}
inst_offset += reg_count;
}
@@ -4690,12 +4675,20 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
GET_INST(GC, xcc_id));
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues
- [xcc_offset +
- inst_offset + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(
- gc_cp_reg_list_9_4_3[reg],
- GET_INST(GC, xcc_id)));
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(
+ gc_cp_reg_list_9_4_3[reg],
+ GET_INST(GC, xcc_id)));
}
inst_offset += reg_count;
}
@@ -4803,6 +4796,7 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
};
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
@@ -4862,34 +4856,13 @@ static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
{
- /* init asci gds info */
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- case IP_VERSION(9, 5, 0):
- /* 9.4.3 removed all the GDS internal memory,
- * only support GWS opcode in kernel, like barrier
- * semaphore.etc */
- adev->gds.gds_size = 0;
- break;
- default:
- adev->gds.gds_size = 0x10000;
- break;
- }
-
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- case IP_VERSION(9, 5, 0):
- /* deprecated for 9.4.3, no usage at all */
- adev->gds.gds_compute_max_wave_id = 0;
- break;
- default:
- /* this really depends on the chip */
- adev->gds.gds_compute_max_wave_id = 0x7ff;
- break;
- }
+ /* 9.4.3 variants removed all the GDS internal memory,
+ * only support GWS opcode in kernel, like barrier
+ * semaphore.etc */
+ /* init asic gds info */
+ adev->gds.gds_size = 0;
+ adev->gds.gds_compute_max_wave_id = 0;
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 0e3ddea7b8e0..a7bfc9f41d0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Program the AGP BAR */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
-
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the AGP BAR */
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 5470cef7e9bd..6c03bf9f1ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -74,6 +74,8 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
uint32_t xcc_mask)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
int i;
@@ -91,10 +93,10 @@ static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
@@ -180,7 +182,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
/* In the case squeezing vram into GART aperture, we don't use
* FB aperture and AGP aperture. Disable them.
*/
- if (adev->gmc.pdb0_bo) {
+ if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) {
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
@@ -313,6 +315,16 @@ gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev,
}
}
+static inline bool
+gfxhub_v1_2_per_process_xnack_support(struct amdgpu_device *adev)
+{
+ /*
+ * TODO: Check if this function is really needed, so far only 9.4.3
+ * variants use GFXHUB 1.2
+ */
+ return !!adev->aid_mask;
+}
+
static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
uint32_t xcc_mask)
{
@@ -355,7 +367,7 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
PAGE_TABLE_BLOCK_SIZE,
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm.
- * On 9.4.2 and 9.4.3, XNACK can be enabled in
+ * On 9.4.3 variants, XNACK can be enabled in
* the SQ per-process.
* Retry faults need to be enabled for that to work.
*/
@@ -363,14 +375,8 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!adev->gmc.noretry ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 5, 0));
+ gfxhub_v1_2_per_process_xnack_support(
+ adev));
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 17509f32f61a..deb95fab02df 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -505,42 +505,6 @@ static void gfxhub_v2_1_init(struct amdgpu_device *adev)
hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs;
}
-static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
-{
- u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL);
- u32 max_region =
- REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
- u32 max_num_physical_nodes = 0;
- u32 max_physical_node_id = 0;
-
- switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
- case IP_VERSION(4, 8, 0):
- max_num_physical_nodes = 4;
- max_physical_node_id = 3;
- break;
- default:
- return -EINVAL;
- }
-
- /* PF_MAX_REGION=0 means xgmi is disabled */
- if (max_region) {
- adev->gmc.xgmi.num_physical_nodes = max_region + 1;
- if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
- return -EINVAL;
-
- adev->gmc.xgmi.physical_node_id =
- REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
- if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
- return -EINVAL;
-
- adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
- RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_SIZE),
- GCMC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
- }
-
- return 0;
-}
-
static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
{
int i;
@@ -696,7 +660,6 @@ const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
.gart_disable = gfxhub_v2_1_gart_disable,
.set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default,
.init = gfxhub_v2_1_init,
- .get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
.utcl2_harvest = gfxhub_v2_1_utcl2_harvest,
.mode2_save_regs = gfxhub_v2_1_save_regs,
.mode2_restore_regs = gfxhub_v2_1_restore_regs,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9bedca9a79c6..ce6e04242c52 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -103,8 +103,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
- bool retry_fault = !!(entry->src_data[1] & 0x80);
- bool write_fault = !!(entry->src_data[1] & 0x20);
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
struct amdgpu_task_info *task_info;
uint32_t status = 0;
u64 addr;
@@ -164,10 +166,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -268,7 +267,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -428,10 +427,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
@@ -473,24 +468,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -515,21 +492,39 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -570,7 +565,6 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde,
.get_vm_pte = gmc_v10_0_get_vm_pte,
.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
@@ -969,10 +963,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
if (!adev->in_s0ix)
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
@@ -1076,7 +1069,7 @@ static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v10_0_is_idle(void *handle)
+static bool gmc_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v10.*/
return true;
@@ -1115,9 +1108,9 @@ static int gmc_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return athub_v2_0_set_clockgating(adev, state);
}
-static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v10_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4))
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 72751ab4c766..ba59ee8e398a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -103,12 +103,41 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (retry_fault) {
+ /* Returning 1 here also prevents sending the IV to the KFD */
+
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
+ return 1;
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -134,10 +163,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -229,7 +255,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -393,10 +419,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -437,24 +459,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -479,21 +483,39 @@ static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -534,7 +556,6 @@ static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
- .map_mtype = gmc_v11_0_map_mtype,
.get_vm_pde = gmc_v11_0_get_vm_pde,
.get_vm_pte = gmc_v11_0_get_vm_pte,
.get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
@@ -579,6 +600,7 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
adev->mmhub.funcs = &mmhub_v3_3_funcs;
break;
default:
@@ -596,6 +618,7 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs;
break;
default:
@@ -750,6 +773,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
+ /* The mall_size is already calculated as mall_size_per_umc * num_umc.
+ * However, for gfx1151, which features a 2-to-1 UMC mapping,
+ * the result must be multiplied by 2 to determine the actual mall size.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 5, 1):
+ adev->gmc.mall_size *= 2;
+ break;
+ default:
+ break;
+ }
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -759,6 +794,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
/*
@@ -829,7 +865,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
@@ -896,10 +932,9 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
@@ -984,7 +1019,7 @@ static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v11_0_is_idle(void *handle)
+static bool gmc_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v11.*/
return true;
@@ -1009,9 +1044,9 @@ static int gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return athub_v3_0_set_clockgating(adev, state);
}
-static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->get_clockgating(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index b749f1c3f6a9..7a9d6894e321 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -91,6 +91,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
struct amdgpu_vmhub *hub;
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
@@ -102,6 +106,31 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
else
hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ if (retry_fault) {
+ /* Returning 1 here also prevents sending the IV to the KFD */
+
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
+ return 1;
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -127,10 +156,7 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -297,7 +323,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -315,9 +341,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
}
- mutex_lock(&adev->mman.gtt_window_lock);
gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
- mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
@@ -339,6 +363,22 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried;
int vmid, i;
+ if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
+ (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {
+ struct mes_inv_tlbs_pasid_input input = {0};
+ input.pasid = pasid;
+ input.flush_type = flush_type;
+ input.hub_id = AMDGPU_GFXHUB(0);
+ /* MES will invalidate all gc_hub for the device from master */
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ if (all_hub) {
+ /* Only need to invalidate mm_hub now, gfx12 only support one mmhub */
+ input.hub_id = AMDGPU_MMHUB0(0);
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ }
+ return;
+ }
+
for (vmid = 1; vmid < 16; vmid++) {
bool valid;
@@ -413,10 +453,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -460,20 +496,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 0 valid
*/
-static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -497,21 +519,35 @@ static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
- struct amdgpu_device *bo_adev;
- bool coherent, is_system;
-
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT_GFX12;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_SYSTEM;
@@ -519,25 +555,11 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (!bo)
- return;
-
- if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
- AMDGPU_GEM_CREATE_UNCACHED))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
-
- bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
- coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
- is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
- (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
-
if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
*flags |= AMDGPU_PTE_DCC;
- /* WA for HW bug */
- if (is_system || ((bo_adev != adev) && coherent))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
-
+ if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
}
static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
@@ -567,7 +589,6 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
- .map_mtype = gmc_v12_0_map_mtype,
.get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
@@ -837,7 +858,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
@@ -898,10 +919,9 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
@@ -984,7 +1004,7 @@ static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v12_0_is_idle(void *handle)
+static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v11.*/
return true;
@@ -1009,9 +1029,9 @@ static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return athub_v4_1_0_set_clockgating(adev, state);
}
-static void gmc_v12_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->get_clockgating(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 2245dda92021..a8ec95f42926 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -213,7 +213,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, mc, base);
- amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
+ amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -249,7 +249,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp &= ~VGA_VSTATUS_CNTL;
+ tmp &= VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK;
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
/* Update configuration */
@@ -382,7 +382,9 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
@@ -608,36 +610,33 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
- u32 status, u32 addr, u32 mc_client)
+ u32 status, u32 addr)
{
u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
- char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
- (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from %d\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
- "write" : "read", block, mc_client, mc_id);
+ "write" : "read", mc_id);
}
-/*
static const u32 mc_cg_registers[] = {
- MC_HUB_MISC_HUB_CG,
- MC_HUB_MISC_SIP_CG,
- MC_HUB_MISC_VM_CG,
- MC_XPB_CLK_GAT,
- ATC_MISC_CG,
- MC_CITF_MISC_WR_CG,
- MC_CITF_MISC_RD_CG,
- MC_CITF_MISC_VM_CG,
- VM_L2_CG,
+ mmMC_HUB_MISC_HUB_CG,
+ mmMC_HUB_MISC_SIP_CG,
+ mmMC_HUB_MISC_VM_CG,
+ mmMC_XPB_CLK_GAT,
+ mmATC_MISC_CG,
+ mmMC_CITF_MISC_WR_CG,
+ mmMC_CITF_MISC_RD_CG,
+ mmMC_CITF_MISC_VM_CG,
+ mmVM_L2_CG,
};
static const u32 mc_cg_ls_en[] = {
@@ -672,7 +671,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
@@ -689,7 +688,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
@@ -705,7 +704,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -728,7 +727,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -744,7 +743,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_MEM_POWER_LS);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
@@ -752,7 +751,6 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
if (orig != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
-*/
static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
{
@@ -957,9 +955,9 @@ static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v6_0_is_idle(void *handle)
+static bool gmc_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -976,7 +974,7 @@ static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gmc_v6_0_is_idle(adev))
+ if (gmc_v6_0_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -1072,6 +1070,12 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status;
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
@@ -1079,6 +1083,10 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
if (!addr && !status)
return 0;
+ amdgpu_vm_update_fault_cache(adev, entry->pasid,
+ ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT,
+ status, AMDGPU_GFXHUB(0));
+
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v6_0_set_fault_enable_default(adev, false);
@@ -1089,7 +1097,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
- gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+ gmc_v6_0_vm_decode_fault(adev, status, addr);
}
return 0;
@@ -1098,6 +1106,20 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool gate = false;
+
+ if (state == AMD_CG_STATE_GATE)
+ gate = true;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ gmc_v6_0_enable_mc_mgcg(adev, gate);
+ gmc_v6_0_enable_mc_ls(adev, gate);
+ }
+ gmc_v6_0_enable_bif_mgls(adev, gate);
+ gmc_v6_0_enable_hdp_mgcg(adev, gate);
+ gmc_v6_0_enable_hdp_ls(adev, gate);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 9aac4b1101e3..fbd0bf147f50 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -504,7 +504,9 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
@@ -1066,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1142,9 +1144,9 @@ static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v7_0_is_idle(void *handle)
+static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
@@ -1157,17 +1159,10 @@ static bool gmc_v7_0_is_idle(void *handle)
static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC_BUSY_MASK);
- if (!tmp)
+ if (gmc_v7_0_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -1266,6 +1261,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status, mc_client, vmid;
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
@@ -1295,7 +1296,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1311,8 +1312,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d06585207c33..6551b60f2584 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -716,11 +716,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_PRT;
}
@@ -1179,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1263,9 +1267,9 @@ static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v8_0_is_idle(void *handle)
+static bool gmc_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
@@ -1435,6 +1439,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
@@ -1458,9 +1468,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -1476,7 +1484,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1492,8 +1500,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
@@ -1686,9 +1693,9 @@ static int gmc_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gmc_v8_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v8_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 291549765c38..8ad7519f7b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -78,8 +78,6 @@
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
-#define MAX_MEM_RANGES 8
-
static const char * const gfxhub_client_ids[] = {
"CB",
"DB",
@@ -546,8 +544,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- bool retry_fault = !!(entry->src_data[1] & 0x80);
- bool write_fault = !!(entry->src_data[1] & 0x20);
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0, cid = 0, rw = 0, fed = 0;
struct amdgpu_task_info *task_info;
struct amdgpu_vmhub *hub;
@@ -636,10 +636,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " for process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -647,9 +644,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ if (amdgpu_is_multi_aid(adev))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -798,9 +793,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ amdgpu_is_multi_aid(adev))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1082,27 +1075,6 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int v
* 0 valid
*/
-static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_RW:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -1130,8 +1102,9 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
- struct amdgpu_bo_va_mapping *mapping,
+ uint32_t vm_flags,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1141,7 +1114,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
- struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool snoop = false;
@@ -1171,7 +1143,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
- if (mapping->bo_va->is_xgmi)
+ if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
@@ -1212,10 +1184,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
- if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id)
- mtype = is_local ? MTYPE_CC : MTYPE_UC;
- else
- mtype = MTYPE_UC;
+ mtype = is_local ? MTYPE_CC : MTYPE_UC;
} else if (adev->flags & AMD_IS_APU) {
mtype = is_local ? mtype_local : MTYPE_NC;
} else {
@@ -1249,24 +1218,43 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
}
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_RW:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ break;
+ }
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags &= ~AMDGPU_PTE_VALID;
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
+ gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1278,9 +1266,8 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
* memory can use more efficient MTYPEs.
*/
- if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 5, 0))
+ if (!(adev->flags & AMD_IS_APU) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
return;
/* Only direct-mapped memory allows us to determine the NUMA node from
@@ -1336,7 +1323,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
mtype_local = MTYPE_CC;
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
- } else if (adev->rev_id) {
+ } else {
/* MTYPE_UC case */
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
}
@@ -1388,46 +1375,6 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static enum amdgpu_memory_partition
-gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
-{
- enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
-
- if (adev->nbio.funcs->get_memory_partition_mode)
- mode = adev->nbio.funcs->get_memory_partition_mode(adev,
- supp_modes);
-
- return mode;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
-{
- switch (adev->gmc.num_mem_partitions) {
- case 0:
- return UNKNOWN_MEMORY_PARTITION_MODE;
- case 1:
- return AMDGPU_NPS1_PARTITION_MODE;
- case 2:
- return AMDGPU_NPS2_PARTITION_MODE;
- case 4:
- return AMDGPU_NPS4_PARTITION_MODE;
- default:
- return AMDGPU_NPS1_PARTITION_MODE;
- }
-
- return AMDGPU_NPS1_PARTITION_MODE;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
-{
- if (amdgpu_sriov_vf(adev))
- return gmc_v9_0_query_vf_memory_partition(adev);
-
- return gmc_v9_0_get_memory_partition(adev, NULL);
-}
-
static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
{
if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
@@ -1444,12 +1391,11 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
- .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
+ .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
};
@@ -1498,14 +1444,13 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
break;
case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
adev->umc.max_ras_err_cnt_per_query =
UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
- adev->umc.active_mask = adev->aid_mask;
- adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
break;
@@ -1524,6 +1469,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
adev->mmhub.funcs = &mmhub_v1_7_funcs;
break;
case IP_VERSION(1, 8, 0):
+ case IP_VERSION(1, 8, 1):
adev->mmhub.funcs = &mmhub_v1_8_funcs;
break;
default:
@@ -1556,9 +1502,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ if (amdgpu_is_multi_aid(adev))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1595,23 +1539,38 @@ static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
{
+ enum amdgpu_memory_partition mode;
+ uint32_t supp_modes;
+ int i;
+
adev->gmc.supported_nps_modes = 0;
if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
return;
- /*TODO: Check PSP version also which supports NPS switch. Otherwise keep
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware and supported modes available */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
+ while ((i = ffs(supp_modes))) {
+ if (AMDGPU_ALL_NPS_MASK & BIT(i))
+ adev->gmc.supported_nps_modes |= BIT(i);
+ supp_modes &= supp_modes - 1;
+ }
+ } else {
+ /*TODO: Check PSP version also which supports NPS switch. Otherwise keep
* supported modes as 0.
*/
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- adev->gmc.supported_nps_modes =
- BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
- break;
- default:
- break;
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ adev->gmc.supported_nps_modes =
+ BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ break;
+ default:
+ break;
+ }
}
}
@@ -1625,9 +1584,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ amdgpu_is_multi_aid(adev))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1636,8 +1593,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
}
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
enum amdgpu_pkg_type pkg_type =
adev->smuio.funcs->get_pkg_type(adev);
/* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
@@ -1715,7 +1671,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
amdgpu_gmc_sysvm_location(adev, mc);
} else {
amdgpu_gmc_vram_location(adev, mc, base);
@@ -1830,7 +1786,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
adev->gmc.vmid0_page_table_depth = 1;
adev->gmc.vmid0_page_table_block_size = 12;
} else {
@@ -1856,7 +1812,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
r = amdgpu_gmc_pdb0_alloc(adev);
}
@@ -1878,192 +1834,25 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
-static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
-{
- enum amdgpu_memory_partition mode;
- u32 supp_modes;
- bool valid;
-
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
-
- /* Mode detected by hardware not present in supported modes */
- if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
- !(BIT(mode - 1) & supp_modes))
- return false;
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- case AMDGPU_NPS1_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 1);
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 2);
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 3 ||
- adev->gmc.num_mem_partitions == 4);
- break;
- default:
- valid = false;
- }
-
- return valid;
-}
-
-static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
-{
- int i;
-
- /* Check if node with id 'nid' is present in 'node_ids' array */
- for (i = 0; i < num_ids; ++i)
- if (node_ids[i] == nid)
- return true;
-
- return false;
-}
-
-static void
-gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- struct amdgpu_numa_info numa_info;
- int node_ids[MAX_MEM_RANGES];
- int num_ranges = 0, ret;
- int num_xcc, xcc_id;
- uint32_t xcc_mask;
-
- num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- xcc_mask = (1U << num_xcc) - 1;
-
- for_each_inst(xcc_id, xcc_mask) {
- ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
- if (ret)
- continue;
-
- if (numa_info.nid == NUMA_NO_NODE) {
- mem_ranges[0].size = numa_info.size;
- mem_ranges[0].numa.node = numa_info.nid;
- num_ranges = 1;
- break;
- }
-
- if (gmc_v9_0_is_node_present(node_ids, num_ranges,
- numa_info.nid))
- continue;
-
- node_ids[num_ranges] = numa_info.nid;
- mem_ranges[num_ranges].numa.node = numa_info.nid;
- mem_ranges[num_ranges].size = numa_info.size;
- ++num_ranges;
- }
-
- adev->gmc.num_mem_partitions = num_ranges;
-}
-
-static void
-gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
+static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
- enum amdgpu_memory_partition mode;
- u32 start_addr = 0, size;
- int i, r, l;
-
- mode = gmc_v9_0_query_memory_partition(adev);
+ static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ u32 vram_info;
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 0;
- break;
- case AMDGPU_NPS1_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 1;
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 2;
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- if (adev->flags & AMD_IS_APU)
- adev->gmc.num_mem_partitions = 3;
- else
- adev->gmc.num_mem_partitions = 4;
- break;
- default:
- adev->gmc.num_mem_partitions = 1;
- break;
- }
-
- /* Use NPS range info, if populated */
- r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
- &adev->gmc.num_mem_partitions);
- if (!r) {
- l = 0;
- for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
- if (mem_ranges[i].range.lpfn >
- mem_ranges[i - 1].range.lpfn)
- l = i;
- }
-
- } else {
- if (!adev->gmc.num_mem_partitions) {
- dev_err(adev->dev,
- "Not able to detect NPS mode, fall back to NPS1");
- adev->gmc.num_mem_partitions = 1;
- }
- /* Fallback to sw based calculation */
- size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
- size /= adev->gmc.num_mem_partitions;
-
- for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
- mem_ranges[i].range.fpfn = start_addr;
- mem_ranges[i].size =
- ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
- mem_ranges[i].range.lpfn = start_addr + size - 1;
- start_addr += size;
- }
-
- l = adev->gmc.num_mem_partitions - 1;
- }
-
- /* Adjust the last one */
- mem_ranges[l].range.lpfn =
- (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
- mem_ranges[l].size =
- adev->gmc.real_vram_size -
- ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
-}
-
-static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
-{
- bool valid;
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ adev->gmc.vram_width = 128 * 64;
- adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
- sizeof(struct amdgpu_mem_partition_info),
- GFP_KERNEL);
- if (!adev->gmc.mem_partitions)
- return -ENOMEM;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
- /* TODO : Get the range from PSP/Discovery for dGPU */
- if (adev->gmc.is_app_apu)
- gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
- else
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
+ adev->rev_id == 0x3)
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
- if (amdgpu_sriov_vf(adev))
- valid = true;
- else
- valid = gmc_v9_0_validate_partition_info(adev);
- if (!valid) {
- /* TODO: handle invalid case */
- dev_WARN(adev->dev,
- "Mem ranges not matching with hardware config");
+ if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
}
-
- return 0;
-}
-
-static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
-{
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
- adev->gmc.vram_width = 128 * 64;
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
@@ -2078,9 +1867,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
+ if (amdgpu_is_multi_aid(adev)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2230,10 +2017,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_gmc_get_vbios_allocations(adev);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
- r = gmc_v9_0_init_mem_ranges(adev);
+ if (amdgpu_is_multi_aid(adev)) {
+ r = amdgpu_gmc_init_mem_ranges(adev);
if (r)
return r;
}
@@ -2261,9 +2046,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vm_manager.first_kfd_vmid =
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) ?
+ amdgpu_is_multi_aid(adev)) ?
3 :
8;
@@ -2275,9 +2058,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2287,9 +2068,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2363,7 +2142,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
int r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
amdgpu_gmc_init_pdb0(adev);
if (adev->gart.bo == NULL) {
@@ -2410,13 +2189,6 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.flush_tlb_needs_extra_type_2 =
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
adev->gmc.xgmi.num_physical_nodes;
- /*
- * TODO: This workaround is badly documented and had a buggy
- * implementation. We should probably verify what we do here.
- */
- adev->gmc.flush_tlb_needs_extra_type_0 =
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->rev_id == 0;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -2434,7 +2206,7 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -2528,7 +2300,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
* information again.
*/
if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
}
@@ -2541,7 +2313,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v9_0_is_idle(void *handle)
+static bool gmc_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v9.*/
return true;
@@ -2571,9 +2343,9 @@ static int gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->get_clockgating(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 194026e9be33..e6c0d86d3486 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v4_0.h"
#include "amdgpu_ras.h"
@@ -37,17 +36,6 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
-static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -181,7 +169,7 @@ struct amdgpu_hdp_ras hdp_v4_0_ras = {
};
const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
- .flush_hdp = hdp_v4_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v4_0_invalidate_hdp,
.update_clock_gating = hdp_v4_0_update_clock_gating,
.get_clock_gating_state = hdp_v4_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index d3962d469088..8bc001dc9f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -21,24 +21,12 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v5_0.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -218,7 +206,7 @@ static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
- .flush_hdp = hdp_v5_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v5_0_invalidate_hdp,
.update_clock_gating = hdp_v5_0_update_clock_gating,
.get_clock_gating_state = hdp_v5_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index f52552c5fa27..40940b4ab400 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v5_2.h"
#include "hdp/hdp_5_2_1_offset.h"
@@ -34,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
- RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ if (amdgpu_sriov_vf(adev)) {
+ /* this is fine because SR_IOV doesn't remap the register */
+ RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ }
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index 6948fe9956ce..ec20daf4272c 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v6_0.h"
#include "hdp/hdp_6_0_0_offset.h"
@@ -31,17 +30,6 @@
#define regHDP_CLK_CNTL_V6_1 0xd5
#define regHDP_CLK_CNTL_V6_1_BASE_IDX 0
-static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -150,7 +138,7 @@ static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v6_0_funcs = {
- .flush_hdp = hdp_v6_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v6_0_update_clock_gating,
.get_clock_gating_state = hdp_v6_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 63820329f67e..ed1debc03507 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -21,24 +21,12 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v7_0.h"
#include "hdp/hdp_7_0_0_offset.h"
#include "hdp/hdp_7_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -138,7 +126,7 @@ static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v7_0_funcs = {
- .flush_hdp = hdp_v7_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v7_0_update_clock_gating,
.get_clock_gating_state = hdp_v7_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 8ac3d3282268..01cadf898c00 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -157,6 +157,9 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
iceland_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -194,6 +197,9 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -296,6 +302,10 @@ static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
@@ -335,9 +345,9 @@ static int iceland_ih_resume(struct amdgpu_ip_block *ip_block)
return iceland_ih_hw_init(ip_block);
}
-static bool iceland_ih_is_idle(void *handle)
+static bool iceland_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index f8a485164437..333e9c30c091 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -349,6 +349,7 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
/* update doorbell range for ih ring 0 */
@@ -446,7 +447,10 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
@@ -583,8 +587,7 @@ static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
@@ -652,7 +655,7 @@ static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block)
return ih_v6_0_hw_init(ip_block);
}
-static bool ih_v6_0_is_idle(void *handle)
+static bool ih_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
@@ -768,9 +771,9 @@ static int ih_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags)
+static void ih_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index dd0042efceec..95b3f4e55ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -562,8 +562,7 @@ static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
@@ -631,7 +630,7 @@ static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block)
return ih_v6_1_hw_init(ip_block);
}
-static bool ih_v6_1_is_idle(void *handle)
+static bool ih_v6_1_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
@@ -749,9 +748,9 @@ static int ih_v6_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags)
+static void ih_v6_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 8f9b15c171f3..b32ea4129c61 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -552,8 +552,7 @@ static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
@@ -621,7 +620,7 @@ static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block)
return ih_v7_0_hw_init(ip_block);
}
-static bool ih_v7_0_is_idle(void *handle)
+static bool ih_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
@@ -739,9 +738,9 @@ static int ih_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
+static void ih_v7_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index aeca5c08ea2f..cc626036ed9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -32,6 +32,7 @@
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
@@ -39,6 +40,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
@@ -50,8 +52,12 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index df898dbb746e..58cd87db8061 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -34,12 +34,13 @@
MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin");
#define TRANSFER_RAM_MASK 0x001c0000
static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;
struct amdgpu_firmware_info *info = NULL;
@@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
@@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev,
static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev)
{
u32 reg_data, size = 0;
- const u32 *data;
+ const u32 *data = NULL;
int r = -EINVAL;
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
index 964c29ef25dc..0027a639c7e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
@@ -50,26 +50,29 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- num_res = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES + MAX_ISP410_INT_SRC;
+ num_res = MAX_ISP410_MEM_RES + MAX_ISP410_INT_SRC;
isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- DRM_ERROR("%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp platform data alloc failed\n", __func__);
goto failure;
}
@@ -88,14 +91,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_res[1].start = isp_base + ISP410_PHY0_OFFSET;
isp->isp_res[1].end = isp_base + ISP410_PHY0_OFFSET + ISP410_PHY0_SIZE;
- isp->isp_res[2].name = "isp_gpio_sensor0_reg";
- isp->isp_res[2].flags = IORESOURCE_MEM;
- isp->isp_res[2].start = isp_base + ISP410_GPIO_SENSOR0_OFFSET;
- isp->isp_res[2].end = isp_base + ISP410_GPIO_SENSOR0_OFFSET +
- ISP410_GPIO_SENSOR0_SIZE;
-
- for (idx = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES, int_idx = 0;
- idx < num_res; idx++, int_idx++) {
+ for (idx = MAX_ISP410_MEM_RES, int_idx = 0; idx < num_res; idx++, int_idx++) {
isp->isp_res[idx].name = "isp_4_1_0_irq";
isp->isp_res[idx].flags = IORESOURCE_IRQ;
isp->isp_res[idx].start =
@@ -110,11 +106,12 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- isp->isp_i2c_res = kcalloc(1, sizeof(struct resource),
- GFP_KERNEL);
+ /* initialize isp i2c platform data */
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
@@ -129,9 +126,31 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[1].platform_data = isp->isp_pdata;
isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
+ /* initialize isp gpiochip platform data */
+ isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_gpio_res) {
+ r = -ENOMEM;
+ drm_err(&adev->ddev,
+ "%s: isp gpio res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_gpio_res[0].name = "isp_gpio_reg";
+ isp->isp_gpio_res[0].flags = IORESOURCE_MEM;
+ isp->isp_gpio_res[0].start = isp_base + ISP410_GPIO_SENSOR_OFFSET;
+ isp->isp_gpio_res[0].end = isp_base + ISP410_GPIO_SENSOR_OFFSET +
+ ISP410_GPIO_SENSOR_SIZE;
+
+ isp->isp_cell[2].name = "amdisp-pinctrl";
+ isp->isp_cell[2].num_resources = 1;
+ isp->isp_cell[2].resources = &isp->isp_gpio_res[0];
+ isp->isp_cell[2].platform_data = isp->isp_pdata;
+ isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
if (r) {
- DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: add mfd hotplug device failed\n", __func__);
goto failure;
}
@@ -143,6 +162,7 @@ failure:
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return r;
}
@@ -155,6 +175,7 @@ static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
index 7db24c0f1080..4d239198edd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
@@ -42,8 +42,8 @@
#define ISP410_I2C0_OFFSET 0x66400
#define ISP410_I2C0_SIZE 0x100
-#define ISP410_GPIO_SENSOR0_OFFSET 0x6613C
-#define ISP410_GPIO_SENSOR0_SIZE 0x4
+#define ISP410_GPIO_SENSOR_OFFSET 0x6613C
+#define ISP410_GPIO_SENSOR_SIZE 0x54
void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index b56f27295468..4258d3e0b706 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -25,9 +25,18 @@
*
*/
+#include <linux/gpio/machine.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
+MODULE_FIRMWARE("amdgpu/isp_4_1_1.bin");
+
+#define ISP_PERFORMANCE_STATE_LOW 0
+#define ISP_PERFORMANCE_STATE_HIGH 1
+
+#define ISP_HIGH_PERFORMANC_XCLK 788
+#define ISP_HIGH_PERFORMANC_ICLK 788
+
static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9,
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10,
@@ -39,41 +48,205 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16
};
+static struct gpiod_lookup_table isp_gpio_table = {
+ .dev_id = "amd_isp_capture",
+ .table = {
+ GPIO_LOOKUP("AMDI0030:00", 85, "enable_isp", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table isp_sensor_gpio_table = {
+ .dev_id = "i2c-ov05c10",
+ .table = {
+ GPIO_LOOKUP("amdisp-pinctrl", 0, "enable", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static int isp_poweroff(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, true, 0);
+}
+
+static int isp_poweron(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, false, 0);
+}
+
+static int isp_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ u32 iclk, xclk;
+ int ret;
+
+ switch (state) {
+ case ISP_PERFORMANCE_STATE_HIGH:
+ xclk = ISP_HIGH_PERFORMANC_XCLK;
+ iclk = ISP_HIGH_PERFORMANC_ICLK;
+ break;
+ case ISP_PERFORMANCE_STATE_LOW:
+ /* isp runs at default lowest clock-rate on power-on, do nothing */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPXCLK, xclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set xclk %u to %u: %d\n",
+ xclk, state, ret);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPICLK, iclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set iclk %u to %u: %d\n",
+ iclk, state, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int isp_genpd_add_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to add\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to add\n", pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_add_device(gpd, dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to add dev %s to genpd %d\n",
+ pdev->mfd_cell->name, ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to add */
+ return 0;
+}
+
+static int isp_genpd_remove_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to remove\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to remove\n",
+ pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_remove_device(dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to remove */
+ return 0;
+}
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
+ const struct software_node *amd_camera_node, *isp4_node;
struct amdgpu_device *adev = isp->adev;
+ struct acpi_device *acpi_dev;
int idx, int_idx, num_res, r;
u64 isp_base;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
+ r = amdgpu_acpi_get_isp4_dev(&acpi_dev);
+ if (r) {
+ drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r);
+ /* allow GPU init to progress */
+ return 0;
+ }
+
+ /* add GPIO resources required for OMNI5C10 sensor */
+ if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) {
+ gpiod_add_lookup_table(&isp_gpio_table);
+ gpiod_add_lookup_table(&isp_sensor_gpio_table);
+ }
+
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->ispgpd.name = "ISP_v_4_1_1";
+ isp->ispgpd.power_off = isp_poweroff;
+ isp->ispgpd.power_on = isp_poweron;
+ isp->ispgpd.set_performance_state = isp_set_performance_state;
+
+ r = pm_genpd_init(&isp->ispgpd, NULL, true);
+ if (r) {
+ drm_err(&adev->ddev, "failed to initialize genpd (%d)\n", r);
+ return -EINVAL;
+ }
+
+ isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r);
goto failure;
}
- num_res = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES + MAX_ISP411_INT_SRC;
+ num_res = MAX_ISP411_MEM_RES + MAX_ISP411_INT_SRC;
isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- DRM_ERROR("%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r);
goto failure;
}
+ amd_camera_node = (const struct software_node *)acpi_dev->driver_data;
+ isp4_node = software_node_find_by_name(amd_camera_node, "isp4");
+
/* initialize isp platform data */
isp->isp_pdata->adev = (void *)adev;
isp->isp_pdata->asic_type = adev->asic_type;
@@ -89,14 +262,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_res[1].start = isp_base + ISP411_PHY0_OFFSET;
isp->isp_res[1].end = isp_base + ISP411_PHY0_OFFSET + ISP411_PHY0_SIZE;
- isp->isp_res[2].name = "isp_4_1_1_sensor0_reg";
- isp->isp_res[2].flags = IORESOURCE_MEM;
- isp->isp_res[2].start = isp_base + ISP411_GPIO_SENSOR0_OFFSET;
- isp->isp_res[2].end = isp_base + ISP411_GPIO_SENSOR0_OFFSET +
- ISP411_GPIO_SENSOR0_SIZE;
-
- for (idx = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES, int_idx = 0;
- idx < num_res; idx++, int_idx++) {
+ for (idx = MAX_ISP411_MEM_RES, int_idx = 0; idx < num_res; idx++, int_idx++) {
isp->isp_res[idx].name = "isp_4_1_1_irq";
isp->isp_res[idx].flags = IORESOURCE_IRQ;
isp->isp_res[idx].start =
@@ -109,12 +275,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
+ isp->isp_cell[0].swnode = isp4_node;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
+ /* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r);
goto failure;
}
@@ -129,9 +297,43 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[1].platform_data = isp->isp_pdata;
isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
+ /* initialize isp gpiochip platform data */
+ isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_gpio_res) {
+ r = -ENOMEM;
+ drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r);
+ goto failure;
+ }
+
+ isp->isp_gpio_res[0].name = "isp_gpio_reg";
+ isp->isp_gpio_res[0].flags = IORESOURCE_MEM;
+ isp->isp_gpio_res[0].start = isp_base + ISP411_GPIO_SENSOR_OFFSET;
+ isp->isp_gpio_res[0].end = isp_base + ISP411_GPIO_SENSOR_OFFSET +
+ ISP411_GPIO_SENSOR_SIZE;
+
+ isp->isp_cell[2].name = "amdisp-pinctrl";
+ isp->isp_cell[2].num_resources = 1;
+ isp->isp_cell[2].resources = &isp->isp_gpio_res[0];
+ isp->isp_cell[2].platform_data = isp->isp_pdata;
+ isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
+
+ /* add only amd_isp_capture and amd_isp_i2c_designware to genpd */
r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
- DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev, "add mfd hotplug device failed (%d)\n", r);
+ goto failure;
+ }
+
+ r = device_for_each_child(isp->parent, &isp->ispgpd,
+ isp_genpd_add_device);
+ if (r) {
+ drm_err(&adev->ddev, "failed to add devices to genpd (%d)\n", r);
+ goto failure;
+ }
+
+ r = mfd_add_hotplug_devices(isp->parent, &isp->isp_cell[2], 1);
+ if (r) {
+ drm_err(&adev->ddev, "add pinctl hotplug device failed (%d)\n", r);
goto failure;
}
@@ -143,18 +345,23 @@ failure:
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return r;
}
static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
{
+ device_for_each_child(isp->parent, NULL,
+ isp_genpd_remove_device);
+
mfd_remove_devices(isp->parent);
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
index 40887ddeb08c..fe45d70d87f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
@@ -33,7 +33,6 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
#define MAX_ISP411_MEM_RES 2
-#define MAX_ISP411_SENSOR_RES 1
#define MAX_ISP411_INT_SRC 8
#define ISP411_PHY0_OFFSET 0x66700
@@ -42,8 +41,8 @@
#define ISP411_I2C0_OFFSET 0x66400
#define ISP411_I2C0_SIZE 0x100
-#define ISP411_GPIO_SENSOR0_OFFSET 0x6613C
-#define ISP411_GPIO_SENSOR0_SIZE 0x4
+#define ISP411_GPIO_SENSOR_OFFSET 0x6613C
+#define ISP411_GPIO_SENSOR_SIZE 0x54
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 03b8b7cd5229..b5bb7f4d607c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
- .extra_dw = 64,
+ .extra_bytes = 256,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
@@ -604,15 +604,15 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
int cnt = 0;
- mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+ mutex_lock(&adev->vcn.inst[0].vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
- for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
+ for (cnt = 0; cnt < adev->vcn.inst[0].num_enc_rings; cnt++) {
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 7c9251c03815..27c76bd424cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
-#include "amdgpu_cs.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
@@ -33,6 +32,22 @@
#include "vcn/vcn_2_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_2_0[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -98,7 +113,17 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
- return 0;
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_2_0, ARRAY_SIZE(jpeg_reg_list_2_0));
+ if (r)
+ return r;
+
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -117,6 +142,8 @@ static int jpeg_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -655,9 +682,9 @@ void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool jpeg_v2_0_is_idle(void *handle)
+static bool jpeg_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
@@ -682,7 +709,7 @@ static int jpeg_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
- if (!jpeg_v2_0_is_idle(adev))
+ if (!jpeg_v2_0_is_idle(ip_block))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
@@ -739,6 +766,22 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v2_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v2_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.name = "jpeg_v2_0",
.early_init = jpeg_v2_0_early_init,
@@ -752,6 +795,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.wait_for_idle = jpeg_v2_0_wait_for_idle,
.set_clockgating_state = jpeg_v2_0_set_clockgating_state,
.set_powergating_state = jpeg_v2_0_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
@@ -760,7 +805,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -782,6 +827,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v2_0_ring_reset,
};
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -807,58 +853,3 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
-
-/**
- * jpeg_v2_dec_ring_parse_cs - command submission parser
- *
- * @parser: Command submission parser context
- * @job: the job to parse
- * @ib: the IB to parse
- *
- * Parse the command stream, return -EINVAL for invalid packet,
- * 0 otherwise
- */
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- u32 i, reg, res, cond, type;
- struct amdgpu_device *adev = parser->adev;
-
- for (i = 0; i < ib->length_dw ; i += 2) {
- reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
- res = CP_PACKETJ_GET_RES(ib->ptr[i]);
- cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
- type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
-
- if (res) /* only support 0 at the moment */
- return -EINVAL;
-
- switch (type) {
- case PACKETJ_TYPE0:
- if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE3:
- if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE6:
- if (ib->ptr[i] == CP_PACKETJ_NOP)
- continue;
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- default:
- dev_err(adev->dev, "Unknown packet type %d !\n", type);
- return -EINVAL;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 63fadda7a673..654e43e83e2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -45,9 +45,6 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-#define JPEG_REG_RANGE_START 0x4000
-#define JPEG_REG_RANGE_END 0x41c2
-
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
@@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib);
extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 11f6af2646e7..20983f126b49 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -36,6 +36,22 @@
#define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_2_5[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -147,7 +163,17 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- return 0;
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_2_5, ARRAY_SIZE(jpeg_reg_list_2_5));
+ if (r)
+ return r;
+
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -166,6 +192,8 @@ static int jpeg_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -310,6 +338,44 @@ static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
}
+static void jpeg_v2_5_start_inst(struct amdgpu_device *adev, int i)
+{
+ struct amdgpu_ring *ring = adev->jpeg.inst[i].ring_dec;
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* JPEG disable CGC */
+ jpeg_v2_5_disable_clock_gating(adev, i);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
+}
+
/**
* jpeg_v2_5_start - start JPEG block
*
@@ -319,52 +385,33 @@ static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
*/
static int jpeg_v2_5_start(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
+ jpeg_v2_5_start_inst(adev, i);
- ring = adev->jpeg.inst[i].ring_dec;
- /* disable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
-
- /* JPEG disable CGC */
- jpeg_v2_5_disable_clock_gating(adev, i);
-
- /* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC_MASK,
- ~JPEG_SYS_INT_EN__DJRBC_MASK);
-
- WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
- WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
}
return 0;
}
+static void jpeg_v2_5_stop_inst(struct amdgpu_device *adev, int i)
+{
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v2_5_enable_clock_gating(adev, i);
+
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+}
+
/**
* jpeg_v2_5_stop - stop JPEG block
*
@@ -379,18 +426,7 @@ static int jpeg_v2_5_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
-
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- jpeg_v2_5_enable_clock_gating(adev, i);
-
- /* enable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
- UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+ jpeg_v2_5_stop_inst(adev, i);
}
return 0;
@@ -482,9 +518,9 @@ static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14)));
}
-static bool jpeg_v2_5_is_idle(void *handle)
+static bool jpeg_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -530,7 +566,7 @@ static int jpeg_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
continue;
if (enable) {
- if (!jpeg_v2_5_is_idle(adev))
+ if (!jpeg_v2_5_is_idle(ip_block))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {
@@ -610,6 +646,16 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ jpeg_v2_5_stop_inst(ring->adev, ring->me);
+ jpeg_v2_5_start_inst(ring->adev, ring->me);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.name = "jpeg_v2_5",
.early_init = jpeg_v2_5_early_init,
@@ -623,6 +669,8 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.wait_for_idle = jpeg_v2_5_wait_for_idle,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
@@ -638,6 +686,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
.wait_for_idle = jpeg_v2_5_wait_for_idle,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
@@ -646,7 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -668,6 +718,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v2_5_ring_reset,
};
static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
@@ -676,7 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -698,6 +749,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v2_5_ring_reset,
};
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 4eca65ea9053..d1a011c40ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -34,6 +34,22 @@
#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_3_0[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -112,7 +128,17 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
- return 0;
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_3_0, ARRAY_SIZE(jpeg_reg_list_3_0));
+ if (r)
+ return r;
+
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -131,6 +157,8 @@ static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -445,9 +473,9 @@ static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v3_0_is_idle(void *handle)
+static bool jpeg_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
@@ -473,7 +501,7 @@ static int jpeg_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v3_0_is_idle(adev))
+ if (!jpeg_v3_0_is_idle(ip_block))
return -EBUSY;
jpeg_v3_0_enable_clock_gating(adev);
} else {
@@ -530,6 +558,22 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v3_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v3_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.name = "jpeg_v3_0",
.early_init = jpeg_v3_0_early_init,
@@ -543,6 +587,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.wait_for_idle = jpeg_v3_0_wait_for_idle,
.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
.set_powergating_state = jpeg_v3_0_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
@@ -551,7 +597,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -573,6 +619,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v3_0_ring_reset,
};
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 0aef1f64afd0..33db2c1ae6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -36,13 +36,28 @@
#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_4_0[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+};
+
static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev);
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
-
static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -123,14 +138,18 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
r = amdgpu_jpeg_ras_sw_init(adev);
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->jpeg.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_4_0, ARRAY_SIZE(jpeg_reg_list_4_0));
if (r)
return r;
- return 0;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -614,9 +633,9 @@ static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v4_0_is_idle(void *handle)
+static bool jpeg_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
@@ -642,7 +661,7 @@ static int jpeg_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v4_0_is_idle(adev))
+ if (!jpeg_v4_0_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_enable_clock_gating(adev);
} else {
@@ -704,6 +723,22 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.name = "jpeg_v4_0",
.early_init = jpeg_v4_0_early_init,
@@ -717,6 +752,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.wait_for_idle = jpeg_v4_0_wait_for_idle,
.set_clockgating_state = jpeg_v4_0_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
@@ -725,7 +762,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -747,6 +784,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_ring_reset,
};
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 88f9771c1686..aae7328973d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -59,10 +59,53 @@ static int amdgpu_ih_srcid_jpeg[] = {
VCN_4_0__SRCID__JPEG7_DECODE
};
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_4_0_3[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_SYS_INT_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_STATUS),
+};
+
static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
{
- return amdgpu_sriov_vf(adev) ||
- (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4));
+ return (adev->jpeg.caps & AMDGPU_JPEG_CAPS(RRMT_ENABLED)) == 0;
+}
+
+static inline int jpeg_v4_0_3_core_reg_offset(u32 pipe)
+{
+ if (pipe)
+ return ((0x40 * pipe) - 0xc80);
+ else
+ return 0;
}
/**
@@ -106,6 +149,18 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ /* JPEG DJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
+ /* JPEG EJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
@@ -144,10 +199,8 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.internal.jpeg_pitch[j] =
regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
adev->jpeg.inst[i].external.jpeg_pitch[j] =
- SOC15_REG_OFFSET1(
- JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_SCRATCH0,
- (j ? (0x40 * j - 0xc80) : 0));
+ SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_SCRATCH0,
+ jpeg_v4_0_3_core_reg_offset(j));
}
}
@@ -159,13 +212,16 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->jpeg.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_4_0_3, ARRAY_SIZE(jpeg_reg_list_4_0_3));
if (r)
return r;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
return 0;
}
@@ -186,6 +242,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -331,6 +388,11 @@ static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
}
}
} else {
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) &
+ 0x100)
+ adev->jpeg.caps |= AMDGPU_JPEG_CAPS(RRMT_ENABLED);
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
jpeg_inst = GET_INST(JPEG, i);
@@ -382,6 +444,9 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
+ amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+
return ret;
}
@@ -475,6 +540,75 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data);
}
+static void jpeg_v4_0_3_start_inst(struct amdgpu_device *adev, int inst)
+{
+ int jpeg_inst = GET_INST(JPEG, inst);
+
+ WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
+ 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
+ SOC15_WAIT_ON_RREG(JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
+ UVD_PGFSM_STATUS__UVDJ_PWR_ON <<
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
+
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
+ 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* JPEG disable CGC */
+ jpeg_v4_0_3_disable_clock_gating(adev, inst);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+}
+
+static void jpeg_v4_0_3_start_jrbc(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = jpeg_v4_0_3_core_reg_offset(ring->pipe);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe,
+ ~(JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe));
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ reg_offset);
+}
+
/**
* jpeg_v4_0_3_start - start JPEG block
*
@@ -485,84 +619,36 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst
static int jpeg_v4_0_3_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int i, j, jpeg_inst;
+ int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
-
- WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
- 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(
- JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
- UVD_PGFSM_STATUS__UVDJ_PWR_ON
- << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
-
- /* disable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
- regUVD_JPEG_POWER_STATUS),
- 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
-
- /* JPEG disable CGC */
- jpeg_v4_0_3_disable_clock_gating(adev, i);
-
- /* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
+ jpeg_v4_0_3_start_inst(adev, i);
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
-
ring = &adev->jpeg.inst[i].ring_dec[j];
-
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
- regJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC0_MASK << j,
- ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j));
-
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JMI0_UVD_LMI_JRBC_RB_VMID,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_CNTL,
- reg_offset,
- (0x00000001L | 0x00000002L));
- WREG32_SOC15_OFFSET(
- JPEG, jpeg_inst,
- regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- reg_offset, lower_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(
- JPEG, jpeg_inst,
- regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- reg_offset, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_RPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_WPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_CNTL,
- reg_offset, 0x00000002L);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_SIZE,
- reg_offset, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15_OFFSET(
- JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR,
- reg_offset);
+ jpeg_v4_0_3_start_jrbc(ring);
}
}
return 0;
}
+static void jpeg_v4_0_3_stop_inst(struct amdgpu_device *adev, int inst)
+{
+ int jpeg_inst = GET_INST(JPEG, inst);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v4_0_3_enable_clock_gating(adev, inst);
+
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+}
+
/**
* jpeg_v4_0_3_stop - stop JPEG block
*
@@ -572,31 +658,10 @@ static int jpeg_v4_0_3_start(struct amdgpu_device *adev)
*/
static int jpeg_v4_0_3_stop(struct amdgpu_device *adev)
{
- int i, jpeg_inst;
+ int i;
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- jpeg_v4_0_3_enable_clock_gating(adev, i);
-
- /* enable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
- regUVD_JPEG_POWER_STATUS),
- UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
-
- WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
- 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(
- JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
- UVD_PGFSM_STATUS__UVDJ_PWR_OFF
- << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
- }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
+ jpeg_v4_0_3_stop_inst(adev, i);
return 0;
}
@@ -612,9 +677,8 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- return RREG32_SOC15_OFFSET(
- JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR,
- ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR,
+ jpeg_v4_0_3_core_reg_offset(ring->pipe));
}
/**
@@ -630,14 +694,12 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
- else
- return RREG32_SOC15_OFFSET(
- JPEG, GET_INST(JPEG, ring->me),
- regUVD_JRBC0_UVD_JRBC_RB_WPTR,
- ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
+
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ jpeg_v4_0_3_core_reg_offset(ring->pipe));
}
-static void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* JPEG engine access for HDP flush doesn't work when RRMT is enabled.
* This is a workaround to avoid any HDP flush through JPEG ring.
@@ -659,10 +721,8 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
- regUVD_JRBC0_UVD_JRBC_RB_WPTR,
- (ring->pipe ? (0x40 * ring->pipe - 0xc80) :
- 0),
+ WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ jpeg_v4_0_3_core_reg_offset(ring->pipe),
lower_32_bits(ring->wptr));
}
}
@@ -907,21 +967,17 @@ void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool jpeg_v4_0_3_is_idle(void *handle)
+static bool jpeg_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool ret = false;
int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
-
- ret &= ((RREG32_SOC15_OFFSET(
- JPEG, GET_INST(JPEG, i),
- regUVD_JRBC0_UVD_JRBC_STATUS,
- reg_offset) &
- UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC0_UVD_JRBC_STATUS, jpeg_v4_0_3_core_reg_offset(j)) &
+ UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
}
@@ -937,13 +993,10 @@ static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
-
- ret &= SOC15_WAIT_ON_RREG_OFFSET(
- JPEG, GET_INST(JPEG, i),
- regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset,
+ ret &= (SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC0_UVD_JRBC_STATUS, jpeg_v4_0_3_core_reg_offset(j),
UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
- UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
}
}
return ret;
@@ -958,7 +1011,7 @@ static int jpeg_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (enable) {
- if (!jpeg_v4_0_3_is_idle(adev))
+ if (!jpeg_v4_0_3_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_3_enable_clock_gating(adev, i);
} else {
@@ -1001,6 +1054,14 @@ static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1055,6 +1116,44 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = jpeg_v4_0_3_core_reg_offset(ring->pipe);
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x1F);
+ SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS,
+ reg_offset, 0x1F, 0x1F);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x1F);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x00);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x00);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
+}
+
+static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ if (amdgpu_sriov_vf(ring->adev))
+ return -EOPNOTSUPP;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ jpeg_v4_0_3_core_stall_reset(ring);
+ jpeg_v4_0_3_start_jrbc(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.name = "jpeg_v4_0_3",
.early_init = jpeg_v4_0_3_early_init,
@@ -1068,6 +1167,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.wait_for_idle = jpeg_v4_0_3_wait_for_idle,
.set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_3_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
@@ -1076,7 +1177,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -1099,6 +1200,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_3_ring_reset,
};
static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -1122,6 +1224,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = {
.process = jpeg_v4_0_3_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_ras_irq_funcs = {
+ .set = jpeg_v4_0_3_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1130,6 +1237,9 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
}
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs;
+
+ adev->jpeg.inst->ras_poison_irq.num_types = 1;
+ adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_3_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = {
@@ -1226,9 +1336,47 @@ static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
jpeg_v4_0_3_inst_reset_ras_error_count(adev, i);
}
+static uint32_t jpeg_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_JPEG_V4_0_3_JPEG0:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
+ break;
+ case AMDGPU_JPEG_V4_0_3_JPEG1:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool jpeg_v4_0_3_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst = 0, sub = 0, poison_stat = 0;
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
+ for (sub = 0; sub < AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ jpeg_v4_0_3_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
.query_ras_error_count = jpeg_v4_0_3_query_ras_error_count,
.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
+ .query_poison_status = jpeg_v4_0_3_query_ras_poison_status,
};
static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@@ -1245,11 +1393,13 @@ static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_ban
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
@@ -1303,6 +1453,13 @@ static int jpeg_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->jpeg.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
&jpeg_v4_0_3_aca_info, NULL);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 747a3e5f6856..2e110d04af84 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -46,6 +46,13 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+enum amdgpu_jpeg_v4_0_3_sub_block {
+ AMDGPU_JPEG_V4_0_3_JPEG0 = 0,
+ AMDGPU_JPEG_V4_0_3_JPEG1,
+
+ AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
@@ -56,6 +63,7 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
unsigned int flags);
void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr);
+void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 6b3656984957..54fd9c800c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -46,11 +46,26 @@
#define regJPEG_CGC_GATE_INTERNAL_OFFSET 0x4160
#define regUVD_NO_OP_INTERNAL_OFFSET 0x0029
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_4_0_5[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
-
static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring);
static int amdgpu_ih_clientid_jpeg[] = {
@@ -58,6 +73,8 @@ static int amdgpu_ih_clientid_jpeg[] = {
SOC15_IH_CLIENTID_VCN1
};
+
+
/**
* jpeg_v4_0_5_early_init - set function pointers
*
@@ -153,9 +170,14 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH);
}
- /* TODO: Add queue reset mask when FW fully supports it */
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_4_0_5, ARRAY_SIZE(jpeg_reg_list_4_0_5));
+ if (r)
+ return r;
+
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -627,9 +649,9 @@ static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v4_0_5_is_idle(void *handle)
+static bool jpeg_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -664,7 +686,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -672,7 +694,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
continue;
if (enable) {
- if (!jpeg_v4_0_5_is_idle(adev))
+ if (!jpeg_v4_0_5_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_5_enable_clock_gating(adev, i);
@@ -746,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_5_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_5_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.name = "jpeg_v4_0_5",
.early_init = jpeg_v4_0_5_early_init,
@@ -759,6 +797,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.wait_for_idle = jpeg_v4_0_5_wait_for_idle,
.set_clockgating_state = jpeg_v4_0_5_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_5_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
@@ -767,7 +807,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -789,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_5_ring_reset,
};
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index d5cf0f2799d4..46bf15dce2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -34,6 +34,22 @@
#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
#include "jpeg_v5_0_0.h"
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_5_0[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -100,13 +116,17 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
- /* TODO: Add queue reset mask when FW fully supports it */
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0, ARRAY_SIZE(jpeg_reg_list_5_0));
+ if (r)
+ return r;
+
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- return 0;
+
+ return r;
}
/**
@@ -539,9 +559,9 @@ static void jpeg_v5_0_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v5_0_0_is_idle(void *handle)
+static bool jpeg_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
@@ -564,10 +584,10 @@ static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v5_0_0_is_idle(adev))
+ if (!jpeg_v5_0_0_is_idle(ip_block))
return -EBUSY;
jpeg_v5_0_0_enable_clock_gating(adev);
} else {
@@ -624,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v5_0_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v5_0_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.name = "jpeg_v5_0_0",
.early_init = jpeg_v5_0_0_early_init,
@@ -637,6 +673,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.wait_for_idle = jpeg_v5_0_0_wait_for_idle,
.set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_0_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
@@ -645,7 +683,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -667,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v5_0_0_ring_reset,
};
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 40d4c32a8c2a..ab0bf880d3d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -28,15 +28,18 @@
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
#include "jpeg_v5_0_1.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
+static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
static int amdgpu_ih_srcid_jpeg[] = {
@@ -52,6 +55,47 @@ static int amdgpu_ih_srcid_jpeg[] = {
VCN_5_0__SRCID__JPEG9_DECODE,
};
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_5_0_1[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_STATUS),
+};
+
static int jpeg_v5_0_1_core_reg_offset(u32 pipe)
{
if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3)
@@ -77,6 +121,7 @@ static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
jpeg_v5_0_1_set_dec_ring_funcs(adev);
jpeg_v5_0_1_set_irq_funcs(adev);
+ jpeg_v5_0_1_set_ras_funcs(adev);
return 0;
}
@@ -101,6 +146,17 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
}
+ /* JPEG DJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
+ /* JPEG EJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
@@ -115,21 +171,16 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- ring->use_doorbell = false;
+ ring->use_doorbell = true;
ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
if (!amdgpu_sriov_vf(adev)) {
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
1 + j + 11 * jpeg_inst;
} else {
- if (j < 4)
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 4 + j + 32 * jpeg_inst;
- else
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 8 + j + 32 * jpeg_inst;
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 2 + j + 32 * jpeg_inst;
}
sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
@@ -145,7 +196,25 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- return 0;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
+ return r;
+ }
+ }
+
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1));
+ if (r)
+ return r;
+
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -164,6 +233,8 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -182,7 +253,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
int i, j, r, jpeg_inst;
if (amdgpu_sriov_vf(adev)) {
- /* jpeg_v5_0_1_start_sriov(adev); */
+ r = jpeg_v5_0_1_start_sriov(adev);
+ if (r)
+ return r;
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
@@ -194,6 +268,9 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
}
return 0;
}
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->jpeg.caps |= AMDGPU_JPEG_CAPS(RRMT_ENABLED);
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
jpeg_inst = GET_INST(JPEG, i);
ring = adev->jpeg.inst[i].ring_dec;
@@ -206,7 +283,7 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->jpeg.inst[i].ring_dec[j];
if (ring->use_doorbell)
WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
- (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->pipe,
ring->doorbell_index <<
VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
@@ -233,8 +310,13 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ }
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
+ amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
}
@@ -281,11 +363,10 @@ static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx)
+static void jpeg_v5_0_1_init_inst(struct amdgpu_device *adev, int i)
{
- int jpeg_inst;
+ int jpeg_inst = GET_INST(JPEG, i);
- jpeg_inst = GET_INST(JPEG, inst_idx);
/* disable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
@@ -294,19 +375,187 @@ static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
- return 0;
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
}
-static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
+static void jpeg_v5_0_1_deinit_inst(struct amdgpu_device *adev, int i)
{
- int jpeg_inst;
+ int jpeg_inst = GET_INST(JPEG, i);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
- jpeg_inst = GET_INST(JPEG, inst_idx);
/* enable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+}
+
+static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 reg, data, mask;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0;
+
+ /* enable System Interrupt for JRBC */
+ reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
+ if (ring->pipe < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe;
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe);
+ WREG32_P(reg, data, mask);
+ } else {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12);
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12));
+ WREG32_P(reg, data, mask);
+ }
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
+ reg_offset);
+}
+
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw, item_offset;
+ uint32_t init_status;
+ int i, j, jpeg_inst;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ item_offset = header.total_size;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ table_size = 0;
+
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
+
+ if (j < 5) {
+ header.mjpegdec0[j].table_offset = item_offset;
+ header.mjpegdec0[j].init_status = 0;
+ header.mjpegdec0[j].table_size = table_size;
+ } else {
+ header.mjpegdec1[j - 5].table_offset = item_offset;
+ header.mjpegdec1[j - 5].init_status = 0;
+ header.mjpegdec1[j - 5].table_size = table_size;
+ }
+ header.total_size += table_size;
+ item_offset += table_size;
+ }
+
+ MMSCH_V5_0_INSERT_END();
+
+ /* send init table to MMSCH */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ init_status =
+ ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
+
+ if (resp != 0)
+ break;
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
+ init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
+ resp, init_status);
+
+ }
return 0;
}
@@ -320,69 +569,13 @@ static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int i, j, jpeg_inst, r;
+ int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
-
- /* disable antihang */
- r = jpeg_v5_0_1_disable_antihang(adev, i);
- if (r)
- return r;
-
- /* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
+ jpeg_v5_0_1_init_inst(adev, i);
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
- u32 reg, data, mask;
-
ring = &adev->jpeg.inst[i].ring_dec[j];
-
- /* enable System Interrupt for JRBC */
- reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
- if (j < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
- data = JPEG_SYS_INT_EN__DJRBC0_MASK << j;
- mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j);
- WREG32_P(reg, data, mask);
- } else {
- data = JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12);
- mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12));
- WREG32_P(reg, data, mask);
- }
-
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_VMID,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_CNTL,
- reg_offset,
- (0x00000001L | 0x00000002L));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- reg_offset, lower_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- reg_offset, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_RPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_WPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_CNTL,
- reg_offset, 0x00000002L);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_SIZE,
- reg_offset, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
- reg_offset);
+ jpeg_v5_0_1_init_jrbc(ring);
}
}
@@ -398,20 +591,10 @@ static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
*/
static int jpeg_v5_0_1_stop(struct amdgpu_device *adev)
{
- int i, jpeg_inst, r;
-
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+ int i;
- /* enable antihang */
- r = jpeg_v5_0_1_enable_antihang(adev, i);
- if (r)
- return r;
- }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
+ jpeg_v5_0_1_deinit_inst(adev, i);
return 0;
}
@@ -471,9 +654,9 @@ static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v5_0_1_is_idle(void *handle)
+static bool jpeg_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool ret = false;
int i, j;
@@ -514,7 +697,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -522,7 +705,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (!jpeg_v5_0_1_is_idle(adev))
+ if (!jpeg_v5_0_1_is_idle(ip_block))
return -EBUSY;
}
@@ -535,6 +718,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
int ret;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->jpeg.cur_state)
return 0;
@@ -557,6 +745,16 @@ static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+
+
static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -617,6 +815,41 @@ static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0;
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x1F);
+ SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS,
+ reg_offset, 0x1F, 0x1F);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x1F);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x00);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x00);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
+}
+
+static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ jpeg_v5_0_1_core_stall_reset(ring);
+ jpeg_v5_0_1_init_jrbc(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
.name = "jpeg_v5_0_1",
.early_init = jpeg_v5_0_1_early_init,
@@ -635,8 +868,8 @@ static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v5_0_1_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_1_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
@@ -645,6 +878,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -655,6 +889,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .emit_hdp_flush = jpeg_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v4_0_3_dec_ring_nop,
@@ -666,6 +901,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v5_0_1_ring_reset,
};
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -689,6 +925,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
.process = jpeg_v5_0_1_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = {
+ .set = jpeg_v5_0_1_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -697,6 +938,10 @@ static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
+
+ adev->jpeg.inst->ras_poison_irq.num_types = 1;
+ adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs;
+
}
const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
@@ -706,3 +951,151 @@ const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
.rev = 1,
.funcs = &jpeg_v5_0_1_ip_funcs,
};
+
+static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_JPEG_V5_0_1_JPEG0:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
+ break;
+ case AMDGPU_JPEG_V5_0_1_JPEG1:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst = 0, sub = 0, poison_stat = 0;
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
+ for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = {
+ .query_poison_status = jpeg_v5_0_1_query_ras_poison_status,
+};
+
+static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int jpeg_v5_0_1_err_codes[] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-9][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 48, 49, 50, 51,
+};
+
+static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ jpeg_v5_0_1_err_codes,
+ ARRAY_SIZE(jpeg_v5_0_1_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = {
+ .aca_bank_parser = jpeg_v5_0_1_aca_bank_parser,
+ .aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid,
+};
+
+static const struct aca_info jpeg_v5_0_1_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &jpeg_v5_0_1_aca_bank_ops,
+};
+
+static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->jpeg.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
+static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = {
+ .ras_block = {
+ .hw_ops = &jpeg_v5_0_1_ras_hw_ops,
+ .ras_late_init = jpeg_v5_0_1_ras_late_init,
+ },
+};
+
+static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.ras = &jpeg_v5_0_1_ras;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
index 8ce146c00bb6..a7e58d5fb246 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
@@ -26,4 +26,86 @@
extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
-#endif /* __JPEG_V5_0_0_H__ */
+#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094
+#define regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET 0x1bffe
+
+#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640
+#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649
+#define regUVD_JRBC0_UVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_RPTR 0x064a
+#define regUVD_JRBC0_UVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JRBC1_UVD_JRBC_RB_WPTR 0x0000
+#define regUVD_JRBC1_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_STATUS 0x0009
+#define regUVD_JRBC1_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_RPTR 0x000a
+#define regUVD_JRBC1_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_WPTR 0x0040
+#define regUVD_JRBC2_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_STATUS 0x0049
+#define regUVD_JRBC2_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_RPTR 0x004a
+#define regUVD_JRBC2_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_WPTR 0x0080
+#define regUVD_JRBC3_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_STATUS 0x0089
+#define regUVD_JRBC3_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_RPTR 0x008a
+#define regUVD_JRBC3_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_WPTR 0x00c0
+#define regUVD_JRBC4_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_STATUS 0x00c9
+#define regUVD_JRBC4_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_RPTR 0x00ca
+#define regUVD_JRBC4_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_WPTR 0x0100
+#define regUVD_JRBC5_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_STATUS 0x0109
+#define regUVD_JRBC5_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_RPTR 0x010a
+#define regUVD_JRBC5_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_WPTR 0x0140
+#define regUVD_JRBC6_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_STATUS 0x0149
+#define regUVD_JRBC6_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_RPTR 0x014a
+#define regUVD_JRBC6_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_WPTR 0x0180
+#define regUVD_JRBC7_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_STATUS 0x0189
+#define regUVD_JRBC7_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_RPTR 0x018a
+#define regUVD_JRBC7_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_RB_WPTR 0x01c0
+#define regUVD_JRBC8_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_STATUS 0x01c9
+#define regUVD_JRBC8_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_RB_RPTR 0x01ca
+#define regUVD_JRBC8_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC9_UVD_JRBC_RB_WPTR 0x0440
+#define regUVD_JRBC9_UVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC9_UVD_JRBC_STATUS 0x0449
+#define regUVD_JRBC9_UVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC9_UVD_JRBC_RB_RPTR 0x044a
+#define regUVD_JRBC9_UVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JMI0_JPEG_LMI_DROP 0x0663
+#define regUVD_JMI0_JPEG_LMI_DROP_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL 0x067a
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS 0x067b
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 1
+#define regJPEG_CORE_RST_CTRL 0x072e
+#define regJPEG_CORE_RST_CTRL_BASE_IDX 1
+
+#define regVCN_RRMT_CNTL 0x0940
+#define regVCN_RRMT_CNTL_BASE_IDX 1
+
+enum amdgpu_jpeg_v5_0_1_sub_block {
+ AMDGPU_JPEG_V5_0_1_JPEG0 = 0,
+ AMDGPU_JPEG_V5_0_1_JPEG1,
+
+ AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK,
+};
+
+#endif /* __JPEG_V5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
new file mode 100644
index 000000000000..64cae89357b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <drm/drm_drv.h>
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
+
+#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
+#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
+
+static int
+mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+{
+ int ret;
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
+ goto err_reserve_bo_failed;
+ }
+
+ ret = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (ret) {
+ DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
+ goto err_map_bo_gart_failed;
+ }
+
+ amdgpu_bo_unreserve(bo);
+ bo = amdgpu_bo_ref(bo);
+
+ return 0;
+
+err_map_bo_gart_failed:
+ amdgpu_bo_unreserve(bo);
+err_reserve_bo_failed:
+ return ret;
+}
+
+static int
+mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ uint64_t wptr)
+{
+ struct amdgpu_bo_va_mapping *wptr_mapping;
+ struct amdgpu_vm *wptr_vm;
+ struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
+ int ret;
+
+ wptr_vm = queue->vm;
+ ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
+ if (ret)
+ return ret;
+
+ wptr &= AMDGPU_GMC_HOLE_MASK;
+ wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
+ amdgpu_bo_unreserve(wptr_vm->root.bo);
+ if (!wptr_mapping) {
+ DRM_ERROR("Failed to lookup wptr bo\n");
+ return -EINVAL;
+ }
+
+ wptr_obj->obj = wptr_mapping->bo_va->base.bo;
+ if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
+ DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
+ return -EINVAL;
+ }
+
+ ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
+ if (ret) {
+ DRM_ERROR("Failed to map wptr bo to GART\n");
+ return ret;
+ }
+
+ queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
+ return 0;
+}
+
+static int convert_to_mes_priority(int priority)
+{
+ switch (priority) {
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
+ default:
+ return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
+ return AMDGPU_MES_PRIORITY_LEVEL_LOW;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
+ }
+}
+
+static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
+ struct mes_add_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
+
+ queue_input.process_va_start = 0;
+ queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
+
+ /* set process quantum to 10 ms and gang quantum to 1 ms as default */
+ queue_input.process_quantum = 100000;
+ queue_input.gang_quantum = 10000;
+ queue_input.paging = false;
+
+ queue_input.process_context_addr = ctx->gpu_addr;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
+
+ queue_input.process_id = queue->vm->pasid;
+ queue_input.queue_type = queue->queue_type;
+ queue_input.mqd_addr = queue->mqd.gpu_addr;
+ queue_input.wptr_addr = userq_props->wptr_gpu_addr;
+ queue_input.queue_size = userq_props->queue_size >> 2;
+ queue_input.doorbell_offset = userq_props->doorbell_index;
+ queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
+ queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
+ return r;
+ }
+
+ DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
+ return 0;
+}
+
+static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_remove_queue_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
+ queue_input.doorbell_offset = queue->doorbell_index;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
+ return r;
+}
+
+static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ struct drm_amdgpu_userq_in *mqd_user)
+{
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r, size;
+
+ /*
+ * The FW expects at least one page space allocated for
+ * process ctx and gang ctx each. Create an object
+ * for the same.
+ */
+ size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
+ r = amdgpu_userq_create_object(uq_mgr, ctx, size);
+ if (r) {
+ DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
+ int queue_type)
+{
+ int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
+ struct mes_detect_and_reset_queue_input input;
+ struct amdgpu_usermode_queue *queue;
+ unsigned int hung_db_num = 0;
+ unsigned long queue_id;
+ u32 db_array[8];
+ bool found_hung_queue = false;
+ int r, i;
+
+ if (db_array_size > 8) {
+ dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
+ db_array_size);
+ return -EINVAL;
+ }
+
+ memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input));
+
+ input.queue_type = queue_type;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
+ &hung_db_num, db_array);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
+ } else if (hung_db_num) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ if (queue->queue_type == queue_type) {
+ for (i = 0; i < hung_db_num; i++) {
+ if (queue->doorbell_index == db_array[i]) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
+ atomic_inc(&adev->gpu_reset_counter);
+ amdgpu_userq_fence_driver_force_completion(queue);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+ }
+ }
+ }
+ }
+ }
+
+ if (found_hung_queue) {
+ /* Resume scheduling after hang recovery */
+ r = amdgpu_mes_resume(adev);
+ }
+
+ return r;
+}
+
+static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args_in,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
+ struct drm_amdgpu_userq_in *mqd_user = args_in;
+ struct amdgpu_mqd_prop *userq_props;
+ int r;
+
+ /* Structure to initialize MQD for userqueue using generic MQD init function */
+ userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
+ if (!userq_props) {
+ DRM_ERROR("Failed to allocate memory for userq_props\n");
+ return -ENOMEM;
+ }
+
+ r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
+ if (r) {
+ DRM_ERROR("Failed to create MQD object for userqueue\n");
+ goto free_props;
+ }
+
+ /* Initialize the MQD BO with user given values */
+ userq_props->wptr_gpu_addr = mqd_user->wptr_va;
+ userq_props->rptr_gpu_addr = mqd_user->rptr_va;
+ userq_props->queue_size = mqd_user->queue_size;
+ userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
+ userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
+ userq_props->use_doorbell = true;
+ userq_props->doorbell_index = queue->doorbell_index;
+ userq_props->fence_address = queue->fence_drv->gpu_addr;
+
+ if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
+ struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
+
+ if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
+ DRM_ERROR("Invalid compute IP MQD size\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(compute_mqd)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
+ 2048);
+ if (r)
+ goto free_mqd;
+
+ userq_props->eop_gpu_addr = compute_mqd->eop_va;
+ userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
+ userq_props->hqd_active = false;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(compute_mqd);
+ } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
+ struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
+ } else {
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid GFX MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_gfx_v11)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
+ userq_props->csa_addr = mqd_gfx_v11->csa_va;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va,
+ shadow_info.shadow_size);
+ if (r)
+ goto free_mqd;
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
+ shadow_info.csa_size);
+ if (r)
+ goto free_mqd;
+
+ kfree(mqd_gfx_v11);
+ } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
+ struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid SDMA MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_sdma_v11)) {
+ DRM_ERROR("Failed to read sdma user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+ r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
+ 32);
+ if (r)
+ goto free_mqd;
+
+ userq_props->csa_addr = mqd_sdma_v11->csa_va;
+ kfree(mqd_sdma_v11);
+ }
+
+ queue->userq_prop = userq_props;
+
+ r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
+ if (r) {
+ DRM_ERROR("Failed to initialize MQD for userqueue\n");
+ goto free_mqd;
+ }
+
+ /* Create BO for FW operations */
+ r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
+ if (r) {
+ DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+ goto free_mqd;
+ }
+
+ /* FW expects WPTR BOs to be mapped into GART */
+ r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
+ if (r) {
+ DRM_ERROR("Failed to create WPTR mapping\n");
+ goto free_ctx;
+ }
+
+ return 0;
+
+free_ctx:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+
+free_mqd:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+
+free_props:
+ kfree(userq_props);
+
+ return r;
+}
+
+static void
+mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+ kfree(queue->userq_prop);
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+}
+
+static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_suspend_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ signed long timeout = 2100000; /* 2100 ms */
+ u64 fence_gpu_addr;
+ u32 fence_offset;
+ u64 *fence_ptr;
+ int i, r;
+
+ if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
+ return 0;
+ r = amdgpu_device_wb_get(adev, &fence_offset);
+ if (r)
+ return r;
+
+ fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
+ fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
+ *fence_ptr = 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.suspend_fence_addr = fence_gpu_addr;
+ queue_input.suspend_fence_value = 1;
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to suspend gang: %d\n", r);
+ goto out;
+ }
+
+ for (i = 0; i < timeout; i++) {
+ if (*fence_ptr == 1)
+ goto out;
+ udelay(1);
+ }
+ r = -ETIMEDOUT;
+
+out:
+ amdgpu_device_wb_free(adev, fence_offset);
+ return r;
+}
+
+static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_resume_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG)
+ return -EINVAL;
+ if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
+ return 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
+ return r;
+}
+
+const struct amdgpu_userq_funcs userq_mes_funcs = {
+ .mqd_create = mes_userq_mqd_create,
+ .mqd_destroy = mes_userq_mqd_destroy,
+ .unmap = mes_userq_unmap,
+ .map = mes_userq_map,
+ .detect_and_reset = mes_userq_detect_and_reset,
+ .preempt = mes_userq_preempt,
+ .restore = mes_userq_restore,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
new file mode 100644
index 000000000000..090ae8897770
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef MES_USERQ_H
+#define MES_USERQ_H
+#include "amdgpu_userq.h"
+
+extern const struct amdgpu_userq_funcs userq_mes_funcs;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 65f389eb65e5..3a52754b5cad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -54,6 +54,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_mes_2.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_mes1.bin");
static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
@@ -62,6 +64,10 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
#define GFX_MES_DRAM_SIZE 0x80000
+#define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
+
+#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */
+#define MES11_HUNG_HQD_INFO_OFFSET 4
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
@@ -284,6 +290,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -307,9 +330,9 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -346,6 +369,7 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -356,6 +380,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x60)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
@@ -455,31 +482,6 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- if (input->use_mmio)
- return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
- input->me_id, input->pipe_id,
- input->queue_id, input->vmid);
-
- union MESAPI__RESET mes_reset_queue_pkt;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- return mes_v11_0_submit_pkt_and_poll_completion(mes,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -646,8 +648,9 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
- return -EINVAL;
+ dev_warn_once(mes->adev->dev,
+ "MES FW version must be larger than 0x63 to support limit single process feature.\n");
+ return 0;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
misc_pkt.change_config.opcode =
@@ -691,7 +694,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->compute_hqd_mask[i];
for (i = 0; i < MAX_GFX_PIPES; i++)
- mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
+ mes_set_hw_res_pkt.gfx_hqd_mask[i] =
+ mes->gfx_hqd_mask[i];
for (i = 0; i < MAX_SDMA_PIPES; i++)
mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
@@ -714,13 +718,19 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(mes->adev->dev,
+ "MES FW version must be >= 0x7f to enable LR compute workaround.\n");
+
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
mes->event_log_gpu_addr;
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
@@ -730,9 +740,6 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
{
- int size = 128 * PAGE_SIZE;
- int ret = 0;
- struct amdgpu_device *adev = mes->adev;
union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
@@ -741,25 +748,20 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_pkt.enable_mes_info_ctx = 1;
- ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &mes->resource_1,
- &mes->resource_1_gpu_addr,
- &mes->resource_1_addr);
- if (ret) {
- dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", ret);
- return ret;
+ mes_set_hw_res_pkt.cleaner_shader_fence_mc_addr = mes->resource_1_gpu_addr[0];
+ if (amdgpu_sriov_is_mes_info_enable(mes->adev)) {
+ mes_set_hw_res_pkt.mes_info_ctx_mc_addr =
+ mes->resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE;
+ mes_set_hw_res_pkt.mes_info_ctx_size = MES11_HW_RESOURCE_1_SIZE;
}
- mes_set_hw_res_pkt.mes_info_ctx_mc_addr = mes->resource_1_gpu_addr;
- mes_set_hw_res_pkt.mes_info_ctx_size = mes->resource_1->tbo.base.size;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
-static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
@@ -777,7 +779,7 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -795,6 +797,32 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v11_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -803,12 +831,12 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
- .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
+ .detect_and_reset_hung_queues = mes_v11_0_detect_and_reset_hung_queues,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -843,7 +871,7 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
}
static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -884,7 +912,7 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
}
static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
@@ -899,6 +927,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
{
int pipe;
+ /* return early if we have already fetched these */
+ if (adev->mes.sched_version && adev->mes.kiq_version)
+ return;
+
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
@@ -982,7 +1014,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
/* This function is for backdoor MES firmware */
static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe, bool prime_icache)
+ enum amdgpu_mes_pipe pipe, bool prime_icache)
{
int r;
uint32_t data;
@@ -1054,7 +1086,7 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
}
static int mes_v11_0_allocate_eop_buf(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
u32 *eop;
@@ -1265,7 +1297,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
}
static int mes_v11_0_queue_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
struct amdgpu_ring *ring;
int r;
@@ -1348,7 +1380,7 @@ static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
}
static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r, mqd_size = sizeof(struct v11_compute_mqd);
struct amdgpu_ring *ring;
@@ -1389,7 +1421,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int pipe, r;
+ int pipe, r, bo_size;
adev->mes.funcs = &mes_v11_0_funcs;
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
@@ -1424,6 +1456,23 @@ static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ bo_size = AMDGPU_GPU_PAGE_SIZE;
+ if (amdgpu_sriov_is_mes_info_enable(adev))
+ bo_size += MES11_HW_RESOURCE_1_SIZE;
+
+ /* Only needed for AMDGPU_MES_SCHED_PIPE on MES 11*/
+ r = amdgpu_bo_create_kernel(adev,
+ bo_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mes.resource_1[0],
+ &adev->mes.resource_1_gpu_addr[0],
+ &adev->mes.resource_1_addr[0]);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", r);
+ return r;
+ }
+
return 0;
}
@@ -1432,6 +1481,9 @@ static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe;
+ amdgpu_bo_free_kernel(&adev->mes.resource_1[0], &adev->mes.resource_1_gpu_addr[0],
+ &adev->mes.resource_1_addr[0]);
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
kfree(adev->mes.mqd_backup[pipe]);
@@ -1619,7 +1671,7 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) {
r = mes_v11_0_set_hw_resources_1(&adev->mes);
if (r) {
DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
@@ -1633,6 +1685,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
@@ -1651,34 +1707,17 @@ failure:
static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
- if (amdgpu_sriov_is_mes_info_enable(adev)) {
- amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
- &adev->mes.resource_1_addr);
- }
return 0;
}
static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = amdgpu_mes_suspend(ip_block->adev);
- if (r)
- return r;
-
return mes_v11_0_hw_fini(ip_block);
}
static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = mes_v11_0_hw_init(ip_block);
- if (r)
- return r;
-
- return amdgpu_mes_resume(ip_block->adev);
+ return mes_v11_0_hw_init(ip_block);
}
static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
@@ -1686,6 +1725,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET;
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
@@ -1697,22 +1739,10 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
- (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
.early_init = mes_v11_0_early_init,
- .late_init = mes_v11_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
.hw_init = mes_v11_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 5b537806b4da..744e95d3984a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -47,6 +47,9 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
+#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */
+#define MES12_HUNG_HQD_INFO_OFFSET 4
+
static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -108,6 +111,7 @@ static const char *mes_v12_0_opcodes[] = {
"SET_SE_MODE",
"SET_GANG_SUBMIT",
"SET_HW_RSRC_1",
+ "INVALIDATE_TLBS",
};
static const char *mes_v12_0_misc_opcodes[] = {
@@ -225,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
pipe, x_pkt->header.opcode);
r = amdgpu_fence_wait_polling(ring, seq, timeout);
- if (r < 1 || !*status_ptr) {
+
+ /*
+ * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success).
+ * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information.
+ */
+ if (r < 1 || !(lower_32_bits(*status_ptr))) {
if (misc_op_str)
dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
@@ -274,6 +283,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -297,9 +323,9 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -335,6 +361,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -345,6 +372,9 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x5a)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v12_0_submit_pkt_and_poll_completion(mes,
AMDGPU_MES_SCHED_PIPE,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
@@ -477,32 +507,6 @@ static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- union MESAPI__RESET mes_reset_queue_pkt;
- int pipe;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- if (mes->adev->enable_uni_mes)
- pipe = AMDGPU_MES_KIQ_PIPE;
- else
- pipe = AMDGPU_MES_SCHED_PIPE;
-
- return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -576,13 +580,41 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
- return 0;
+ union MESAPI__SUSPEND mes_suspend_gang_pkt;
+
+ memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
+
+ mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
+ mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
+ mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
+ mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
+ mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
+ offsetof(union MESAPI__SUSPEND, api_status));
}
static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
- return 0;
+ union MESAPI__RESUME mes_resume_gang_pkt;
+
+ memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
+
+ mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
+ mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
+ mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
+ offsetof(union MESAPI__RESUME, api_status));
}
static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe)
@@ -686,6 +718,8 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa;
+ mes_set_hw_res_1_pkt.cleaner_shader_fence_mc_addr =
+ mes->resource_1_gpu_addr[pipe];
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
@@ -745,6 +779,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(adev->dev,
+ "MES FW version must be >= 0x82 to enable LR compute workaround.\n");
/*
* Keep oversubscribe timer for sdma . When we have unmapped doorbell
@@ -756,10 +795,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr +
+ pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
@@ -842,8 +882,8 @@ static void mes_v12_0_enable_unmapped_doorbell_handling(
WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
}
-static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
int pipe;
@@ -862,7 +902,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -875,7 +915,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
}
- if (mes->adev->enable_uni_mes)
+ if (input->is_kq)
pipe = AMDGPU_MES_KIQ_PIPE;
else
pipe = AMDGPU_MES_SCHED_PIPE;
@@ -885,6 +925,74 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v12_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
+static int mes_v12_inv_tlb_convert_hub_id(uint8_t id)
+{
+ /*
+ * MES doesn't support invalidate gc_hub on slave xcc individually
+ * master xcc will invalidate all gc_hub for the partition
+ */
+ if (AMDGPU_IS_GFXHUB(id))
+ return 0;
+ else if (AMDGPU_IS_MMHUB0(id))
+ return 1;
+ else
+ return -EINVAL;
+
+}
+
+static int mes_v12_0_inv_tlbs_pasid(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input)
+{
+ union MESAPI__INV_TLBS mes_inv_tlbs;
+ int ret;
+
+ memset(&mes_inv_tlbs, 0, sizeof(mes_inv_tlbs));
+
+ mes_inv_tlbs.header.type = MES_API_TYPE_SCHEDULER;
+ mes_inv_tlbs.header.opcode = MES_SCH_API_INV_TLBS;
+ mes_inv_tlbs.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_inv_tlbs.invalidate_tlbs.inv_sel = 0;
+ mes_inv_tlbs.invalidate_tlbs.flush_type = input->flush_type;
+ mes_inv_tlbs.invalidate_tlbs.inv_sel_id = input->pasid;
+
+ /*convert amdgpu_mes_hub_id to mes expected hub_id */
+ ret = mes_v12_inv_tlb_convert_hub_id(input->hub_id);
+ if (ret < 0)
+ return -EINVAL;
+ mes_inv_tlbs.invalidate_tlbs.hub_id = ret;
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_KIQ_PIPE,
+ &mes_inv_tlbs, sizeof(mes_inv_tlbs),
+ offsetof(union MESAPI__INV_TLBS, api_status));
+
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -893,12 +1001,13 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.suspend_gang = mes_v12_0_suspend_gang,
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
- .reset_legacy_queue = mes_v12_0_reset_legacy_queue,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
+ .invalidate_tlbs_pasid = mes_v12_0_inv_tlbs_pasid,
+ .detect_and_reset_hung_queues = mes_v12_0_detect_and_reset_hung_queues,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -932,7 +1041,7 @@ static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
}
static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -966,7 +1075,7 @@ static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
}
static void mes_v12_0_free_ucode_buffers(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
@@ -983,29 +1092,50 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
uint32_t pipe, data = 0;
if (enable) {
- data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
- WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
-
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
soc21_grbm_select(adev, 3, pipe, 0, 0);
+ if (amdgpu_mes_log_enable) {
+ u32 log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE;
+ /* In case uni mes is not enabled, only program for pipe 0 */
+ if (adev->mes.event_log_size >= (pipe + 1) * log_size) {
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO,
+ lower_32_bits(adev->mes.event_log_gpu_addr +
+ pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE));
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI,
+ upper_32_bits(adev->mes.event_log_gpu_addr +
+ pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE));
+ dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n",
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI),
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO));
+ }
+ }
+
+ data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
+ if (pipe == 0)
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
+ else
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
+ WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
lower_32_bits(ucode_addr));
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
upper_32_bits(ucode_addr));
+
+ /* unhalt MES and activate one pipe each loop */
+ data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
+ if (pipe)
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
+ dev_info(adev->dev, "program CP_MES_CNTL : 0x%x\n", data);
+
+ WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
+
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- /* unhalt MES and activate pipe0 */
- data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
- WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
-
if (amdgpu_emu_mode)
msleep(100);
else if (adev->enable_uni_mes)
@@ -1051,7 +1181,7 @@ static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev)
/* This function is for backdoor MES firmware */
static int mes_v12_0_load_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe, bool prime_icache)
+ enum amdgpu_mes_pipe pipe, bool prime_icache)
{
int r;
uint32_t data;
@@ -1115,7 +1245,7 @@ static int mes_v12_0_load_microcode(struct amdgpu_device *adev,
}
static int mes_v12_0_allocate_eop_buf(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
u32 *eop;
@@ -1336,7 +1466,7 @@ static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev)
}
static int mes_v12_0_queue_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
struct amdgpu_ring *ring;
int r;
@@ -1368,17 +1498,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
mes_v12_0_queue_init_register(ring);
}
- /* get MES scheduler/KIQ versions */
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, 3, pipe, 0, 0);
+ if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) ||
+ ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) {
+ /* get MES scheduler/KIQ versions */
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 3, pipe, 0, 0);
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
- adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
+ adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
return 0;
}
@@ -1436,7 +1569,7 @@ static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev)
}
static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r, mqd_size = sizeof(struct v12_compute_mqd);
struct amdgpu_ring *ring;
@@ -1479,8 +1612,9 @@ static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
- adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE;
-
+ adev->mes.event_log_size = adev->enable_uni_mes ?
+ (AMDGPU_MAX_MES_PIPES * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE)) :
+ (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
r = amdgpu_mes_init(adev);
if (r)
return r;
@@ -1494,12 +1628,23 @@ static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
+ if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) {
r = mes_v12_0_kiq_ring_init(adev);
- else
+ }
+ else {
r = mes_v12_0_ring_init(adev, pipe);
- if (r)
- return r;
+ if (r)
+ return r;
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mes.resource_1[pipe],
+ &adev->mes.resource_1_gpu_addr[pipe],
+ &adev->mes.resource_1_addr[pipe]);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create mes resource_1 bo pipe[%d]\n", r, pipe);
+ return r;
+ }
+ }
}
return 0;
@@ -1511,6 +1656,10 @@ static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
int pipe;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ amdgpu_bo_free_kernel(&adev->mes.resource_1[pipe],
+ &adev->mes.resource_1_gpu_addr[pipe],
+ &adev->mes.resource_1_addr[pipe]);
+
kfree(adev->mes.mqd_backup[pipe]);
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
@@ -1709,7 +1858,7 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- if (adev->enable_uni_mes)
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x4b)
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
mes_v12_0_init_aggregated_doorbell(&adev->mes);
@@ -1720,6 +1869,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
@@ -1743,24 +1896,12 @@ static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = amdgpu_mes_suspend(ip_block->adev);
- if (r)
- return r;
-
return mes_v12_0_hw_fini(ip_block);
}
static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = mes_v12_0_hw_init(ip_block);
- if (r)
- return r;
-
- return amdgpu_mes_resume(ip_block->adev);
+ return mes_v12_0_hw_init(ip_block);
}
static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
@@ -1768,6 +1909,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET;
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
@@ -1777,21 +1921,10 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v12_0_ip_funcs = {
.name = "mes_v12_0",
.early_init = mes_v12_0_early_init,
- .late_init = mes_v12_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v12_0_sw_init,
.sw_fini = mes_v12_0_sw_fini,
.hw_init = mes_v12_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 9689e2b5d4e5..2adee2b94c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -172,6 +172,30 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
}
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v1_7_init_snoop_override_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+ int i;
+ uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+
+ for (i = 0; i < 5; i++) { /* DAGB instances */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance, tmp);
+ }
+
+}
+
static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
@@ -337,6 +361,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
mmhub_v1_7_init_system_aperture_regs(adev);
mmhub_v1_7_init_tlb_regs(adev);
mmhub_v1_7_init_cache_regs(adev);
+ mmhub_v1_7_init_snoop_override_regs(adev);
mmhub_v1_7_enable_system_domain(adev);
mmhub_v1_7_disable_identity_aperture(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index e646e5cef0a2..cc688ae79e84 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -30,6 +30,7 @@
#include "soc15_common.h"
#include "soc15.h"
#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
@@ -75,6 +76,8 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
u32 inst_mask;
int i;
@@ -94,10 +97,10 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
@@ -192,10 +195,8 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
uint32_t tmp, inst_mask;
int i;
- /* Setup TLB control */
- inst_mask = adev->aid_mask;
- for_each_inst(i, inst_mask) {
- tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
1);
@@ -209,7 +210,55 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
- WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
+ }
+}
+
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp, inst_mask;
+ int i, j;
+ uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ for (j = 0; j < 5; j++) { /* DAGB instances */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp);
+ }
}
}
@@ -418,6 +467,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
mmhub_v1_8_init_system_aperture_regs(adev);
mmhub_v1_8_init_tlb_regs(adev);
mmhub_v1_8_init_cache_regs(adev);
+ mmhub_v1_8_init_snoop_override_regs(adev);
mmhub_v1_8_enable_system_domain(adev);
mmhub_v1_8_disable_identity_aperture(adev);
@@ -427,6 +477,30 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
return 0;
}
+static void mmhub_v1_8_disable_l1_tlb(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ u32 i, inst_mask;
+
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
+ }
+}
+
static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub;
@@ -440,15 +514,6 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
-
- /* Setup TLB control */
- tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
- 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 0);
- WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
-
if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
@@ -458,6 +523,8 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
}
}
+
+ mmhub_v1_8_disable_l1_tlb(adev);
}
/**
@@ -719,11 +786,13 @@ static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 134c4ec10887..910337dc28d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -36,40 +36,47 @@
static const char *mmhub_client_ids_v3_0_1[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
- [8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [8][0] = "MPM",
+ [12][0] = "ISPTNR",
+ [14][0] = "ISPCRD0",
+ [15][0] = "ISPCRD1",
+ [16][0] = "ISPCRD2",
+ [22][0] = "HDP",
+ [23][0] = "LSDMA",
+ [24][0] = "JPEG",
+ [27][0] = "VSCH",
+ [28][0] = "VCNU",
+ [29][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
- [8][1] = "MPIO",
- [10][1] = "DBGU0",
- [11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
- [14][1] = "XDP",
- [15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [8][1] = "MPM",
+ [10][1] = "ISPMWR0",
+ [11][1] = "ISPMWR1",
+ [12][1] = "ISPTNR",
+ [13][1] = "ISPSWR",
+ [14][1] = "ISPCWR0",
+ [15][1] = "ISPCWR1",
+ [16][1] = "ISPCWR2",
+ [17][1] = "ISPCWR3",
+ [18][1] = "XDP",
+ [21][1] = "OSSSYS",
+ [22][1] = "HDP",
+ [23][1] = "LSDMA",
+ [24][1] = "JPEG",
+ [27][1] = "VSCH",
+ [28][1] = "VCNU",
+ [29][1] = "VCN",
};
static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index b4ce3375d3fd..f6fc9778bc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -40,30 +40,129 @@
static const char *mmhub_client_ids_v3_3[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPM",
+ [9][0] = "ISPPDPRD",
+ [10][0] = "ISPCSTATRD",
+ [11][0] = "ISPBYRPRD",
+ [12][0] = "ISPRGBPRD",
+ [13][0] = "ISPMCFPRD",
+ [14][0] = "ISPMCFPRD1",
+ [15][0] = "ISPYUVPRD",
+ [16][0] = "ISPMCSCRD",
+ [17][0] = "ISPGDCRD",
+ [18][0] = "ISPLMERD",
+ [22][0] = "ISPXT1",
+ [23][0] = "ISPIXT1",
[24][0] = "HDP",
[25][0] = "LSDMA",
[26][0] = "JPEG",
[27][0] = "VPE",
+ [28][0] = "VSCH",
[29][0] = "VCNU",
[30][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
+ [5][1] = "ISPCSISWR",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPM",
+ [9][1] = "ISPPDPWR",
+ [10][1] = "ISPCSTATWR",
+ [11][1] = "ISPBYRPWR",
+ [12][1] = "ISPRGBPWR",
+ [13][1] = "ISPMCFPWR",
+ [14][1] = "ISPMWR0",
+ [15][1] = "ISPYUVPWR",
+ [16][1] = "ISPMCSCWR",
+ [17][1] = "ISPGDCWR",
+ [18][1] = "ISPLMEWR",
+ [20][1] = "ISPMWR2",
[21][1] = "OSSSYS",
+ [22][1] = "ISPXT1",
+ [23][1] = "ISPIXT1",
[24][1] = "HDP",
[25][1] = "LSDMA",
[26][1] = "JPEG",
[27][1] = "VPE",
+ [28][1] = "VSCH",
[29][1] = "VCNU",
[30][1] = "VCN",
};
+static const char *mmhub_client_ids_v3_3_1[][2] = {
+ [0][0] = "VMC",
+ [4][0] = "DCEDMC",
+ [6][0] = "MP0",
+ [7][0] = "MP1",
+ [8][0] = "MPM",
+ [24][0] = "HDP",
+ [25][0] = "LSDMA",
+ [26][0] = "JPEG0",
+ [27][0] = "VPE0",
+ [28][0] = "VSCH",
+ [29][0] = "VCNU0",
+ [30][0] = "VCN0",
+ [32+1][0] = "ISPXT",
+ [32+2][0] = "ISPIXT",
+ [32+9][0] = "ISPPDPRD",
+ [32+10][0] = "ISPCSTATRD",
+ [32+11][0] = "ISPBYRPRD",
+ [32+12][0] = "ISPRGBPRD",
+ [32+13][0] = "ISPMCFPRD",
+ [32+14][0] = "ISPMCFPRD1",
+ [32+15][0] = "ISPYUVPRD",
+ [32+16][0] = "ISPMCSCRD",
+ [32+17][0] = "ISPGDCRD",
+ [32+18][0] = "ISPLMERD",
+ [32+22][0] = "ISPXT1",
+ [32+23][0] = "ISPIXT1",
+ [32+26][0] = "JPEG1",
+ [32+27][0] = "VPE1",
+ [32+29][0] = "VCNU1",
+ [32+30][0] = "VCN1",
+ [3][1] = "DCEDWB",
+ [4][1] = "DCEDMC",
+ [6][1] = "MP0",
+ [7][1] = "MP1",
+ [8][1] = "MPM",
+ [21][1] = "OSSSYS",
+ [24][1] = "HDP",
+ [25][1] = "LSDMA",
+ [26][1] = "JPEG0",
+ [27][1] = "VPE0",
+ [28][1] = "VSCH",
+ [29][1] = "VCNU0",
+ [30][1] = "VCN0",
+ [32+1][1] = "ISPXT",
+ [32+2][1] = "ISPIXT",
+ [32+5][1] = "ISPCSISWR",
+ [32+9][1] = "ISPPDPWR",
+ [32+10][1] = "ISPCSTATWR",
+ [32+11][1] = "ISPBYRPWR",
+ [32+12][1] = "ISPRGBPWR",
+ [32+13][1] = "ISPMCFPWR",
+ [32+14][1] = "ISPMWR0",
+ [32+15][1] = "ISPYUVPWR",
+ [32+16][1] = "ISPMCSCWR",
+ [32+17][1] = "ISPGDCWR",
+ [32+18][1] = "ISPLMEWR",
+ [32+19][1] = "ISPMWR1",
+ [32+20][1] = "ISPMWR2",
+ [32+22][1] = "ISPXT1",
+ [32+23][1] = "ISPIXT1",
+ [32+26][1] = "JPEG1",
+ [32+27][1] = "VPE1",
+ [32+29][1] = "VCNU1",
+ [32+30][1] = "VCN1",
+};
+
static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
@@ -102,11 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
- case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break;
+ case IP_VERSION(3, 3, 1):
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ?
+ mmhub_client_ids_v3_3_1[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
+ break;
default:
mmhub_cid = NULL;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
index f2ab5001b492..951998454b25 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
@@ -37,39 +37,31 @@
static const char *mmhub_client_ids_v4_1_0[][2] = {
[0][0] = "VMC",
[4][0] = "DCEDMC",
- [5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [16][0] = "LSDMA",
+ [17][0] = "JPEG",
+ [19][0] = "VCNU",
+ [22][0] = "VSCH",
+ [23][0] = "HDP",
+ [32+23][0] = "VCNRD",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
- [5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPIO",
[10][1] = "DBGU0",
[11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
+ [12][1] = "DBGUNBIO",
[14][1] = "XDP",
[15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [16][1] = "LSDMA",
+ [17][1] = "JPEG",
+ [18][1] = "VCNWR",
+ [19][1] = "VCNU",
+ [22][1] = "VSCH",
+ [23][1] = "HDP",
};
static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index ff1b58e44689..fe0710b55c3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -198,6 +198,36 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v9_4_init_snoop_override_regs(struct amdgpu_device *adev, int hubid)
+{
+ uint32_t tmp;
+ int i;
+ uint32_t distance = mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+ uint32_t huboffset = hubid * MMHUB_INSTANCE_REGISTER_OFFSET;
+
+ for (i = 0; i < 5 - (2 * hubid); i++) {
+ /* DAGB instances 0 to 4 are in hub0 and 5 to 7 are in hub1 */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
+ huboffset + i * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
+ huboffset + i * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
+ huboffset + i * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
+ huboffset + i * distance, tmp);
+ }
+
+}
+
static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
{
uint32_t tmp;
@@ -392,6 +422,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
mmhub_v9_4_init_cache_regs(adev, i);
+ mmhub_v9_4_init_snoop_override_regs(adev, i);
mmhub_v9_4_enable_system_domain(adev, i);
if (!amdgpu_sriov_vf(adev))
mmhub_v9_4_disable_identity_aperture(adev, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
new file mode 100644
index 000000000000..6f749814929f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V5_0_H__
+#define __MMSCH_V5_0_H__
+
+#include "amdgpu_vcn.h"
+
+#define MMSCH_VERSION_MAJOR 5
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+#define RB_ENABLED (1 << 0)
+#define RB4_ENABLED (1 << 1)
+
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3
+#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4
+#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5
+
+enum mmsch_v5_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v5_0_table_info {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v5_0_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v5_0_table_info vcn0;
+ struct mmsch_v5_0_table_info mjpegdec0[5];
+ struct mmsch_v5_0_table_info mjpegdec1[5];
+};
+
+struct mmsch_v5_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_direct_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v5_0_cmd_direct_read_modify_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v5_0_cmd_direct_polling {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v5_0_cmd_end {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v5_0_cmd_indirect_write {
+ struct mmsch_v5_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+#define MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_read_modify_write); \
+ size_dw = size / 4; \
+ direct_rd_mod_wt.cmd_header.reg_offset = reg; \
+ direct_rd_mod_wt.mask_value = mask; \
+ direct_rd_mod_wt.write_data = data; \
+ memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_WT(reg, value) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_write); \
+ size_dw = size / 4; \
+ direct_wt.cmd_header.reg_offset = reg; \
+ direct_wt.reg_value = value; \
+ memcpy((void *)table_loc, &direct_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_polling); \
+ size_dw = size / 4; \
+ direct_poll.cmd_header.reg_offset = reg; \
+ direct_poll.mask_value = mask; \
+ direct_poll.wait_value = wait; \
+ memcpy((void *)table_loc, &direct_poll, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_END() { \
+ size = sizeof(struct mmsch_v5_0_cmd_end); \
+ size_dw = size / 4; \
+ memcpy((void *)table_loc, &end, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index f5411b798e11..9a40107a0869 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -274,6 +274,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -281,8 +282,6 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (amdgpu_device_should_recover_gpu(adev)
&& (!amdgpu_device_has_job_running(adev) ||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -293,6 +292,37 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -312,26 +342,47 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
- case IDH_FLR_NOTIFICATION:
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.req_bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
- &adev->virt.flr_work),
- "Failed to queue work! at %s",
- __func__);
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
break;
- case IDH_QUERY_ALIVE:
- xgpu_ai_mailbox_send_ack(adev);
- break;
- /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
- * it byfar since that polling thread will handle it,
- * other msg like flr complete is not handled here.
- */
- case IDH_CLR_MSG_BUF:
- case IDH_FLR_NOTIFICATION_CMPL:
- case IDH_READY_TO_ACCESS_GPU:
- default:
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
break;
}
@@ -387,6 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_ai_mailbox_handle_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index ed57cbc150af..874b9f8f9804 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -40,6 +40,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -54,6 +55,9 @@ enum idh_event {
IDH_RAS_POISON_READY,
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 4dcb72d1bdda..e7cd07383d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -67,6 +67,8 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg == IDH_FAIL)
r = -EINVAL;
+ if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
+ r = -ENODEV;
else if (reg != event)
return -ENOENT;
@@ -103,6 +105,7 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r;
uint64_t timeout, now;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
now = (uint64_t)ktime_to_ms(ktime_get());
timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
@@ -110,8 +113,16 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
do {
r = xgpu_nv_mailbox_rcv_msg(adev, event);
if (!r) {
- dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
+ dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
+ event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
return 0;
+ } else if (r == -ENODEV) {
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. "
+ "Runtime Services are halted.\n");
+ }
+ return r;
}
msleep(10);
@@ -162,10 +173,18 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
- int r, retry = 1;
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = 0, retry = 1;
enum idh_event event = -1;
+ mutex_lock(&virt->access_req_mutex);
send_request:
+
+ if (amdgpu_ras_is_rma(adev)) {
+ r = -ENODEV;
+ goto out;
+ }
+
xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
switch (req) {
@@ -184,6 +203,12 @@ send_request:
case IDH_REQ_RAS_ERROR_COUNT:
event = IDH_RAS_ERROR_COUNT_READY;
break;
+ case IDH_REQ_RAS_CPER_DUMP:
+ event = IDH_RAS_CPER_DUMP_READY;
+ break;
+ case IDH_REQ_RAS_CHK_CRITI:
+ event = IDH_REQ_RAS_CHK_CRITI_READY;
+ break;
default:
break;
}
@@ -196,17 +221,25 @@ send_request:
if (req != IDH_REQ_GPU_INIT_DATA) {
dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
- return r;
+ goto out;
} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
adev->virt.req_init_data_ver = 0;
} else {
if (req == IDH_REQ_GPU_INIT_DATA) {
- adev->virt.req_init_data_ver =
- RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
-
- /* assume V1 in case host doesn't set version number */
- if (adev->virt.req_init_data_ver < 1)
- adev->virt.req_init_data_ver = 1;
+ switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) {
+ case GPU_CRIT_REGION_V2:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2;
+ adev->virt.init_data_header.offset =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
+ adev->virt.init_data_header.size_kb =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3);
+ break;
+ default:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1;
+ adev->virt.init_data_header.offset = -1;
+ adev->virt.init_data_header.size_kb = 0;
+ break;
+ }
}
}
@@ -217,7 +250,10 @@ send_request:
}
}
- return 0;
+out:
+ mutex_unlock(&virt->access_req_mutex);
+
+ return r;
}
static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
@@ -264,7 +300,8 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
{
- return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+ return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA,
+ 0, GPU_CRIT_REGION_V2, 0);
}
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
@@ -317,6 +354,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -327,8 +365,6 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -339,6 +375,37 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -361,8 +428,32 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.req_bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
+ }
+
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
case IDH_FLR_NOTIFICATION:
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
@@ -433,6 +524,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
return 0;
}
@@ -467,6 +560,31 @@ static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
}
+static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
+{
+ uint32_t vf_rptr_hi, vf_rptr_lo;
+
+ vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
+ vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
+}
+
+static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
+}
+
+static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
+{
+ uint32_t addr_hi, addr_lo;
+
+ addr_hi = (uint32_t)(addr >> 32);
+ addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -478,4 +596,7 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.ras_poison_handler = xgpu_nv_ras_poison_handler,
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
.req_ras_err_count = xgpu_nv_req_ras_err_count,
+ .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
+ .req_bad_pages = xgpu_nv_req_ras_bad_pages,
+ .req_ras_chk_criti = xgpu_nv_check_vf_critical_region
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 9d61d76e1bf9..c1083e5e41e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -41,6 +41,9 @@ enum idh_request {
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
IDH_REQ_RAS_ERROR_COUNT = 203,
+ IDH_REQ_RAS_CPER_DUMP = 204,
+ IDH_REQ_RAS_BAD_PAGES = 205,
+ IDH_REQ_RAS_CHK_CRITI = 206
};
enum idh_event {
@@ -56,6 +59,11 @@ enum idh_event {
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
IDH_RAS_ERROR_COUNT_READY = 11,
+ IDH_RAS_CPER_DUMP_READY = 14,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
+ IDH_REQ_RAS_CHK_CRITI_READY = 18,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 62cdfe10e6f4..4cd325149b63 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -625,7 +625,7 @@ static int navi10_ih_resume(struct amdgpu_ip_block *ip_block)
return navi10_ih_hw_init(ip_block);
}
-static bool navi10_ih_is_idle(void *handle)
+static bool navi10_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
@@ -682,9 +682,9 @@ static int navi10_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void navi10_ih_get_clockgating_state(void *handle, u64 *flags)
+static void navi10_ih_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
index c92875ceb31f..9b4025c39e44 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbif_v6_3_1.h"
#include "nbif/nbif_6_3_1_offset.h"
@@ -474,52 +473,6 @@ const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs = {
};
-static void nbif_v6_3_1_sriov_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
-{
-}
-
-static void nbif_v6_3_1_sriov_sdma_doorbell_range(struct amdgpu_device *adev,
- int instance, bool use_doorbell,
- int doorbell_index,
- int doorbell_size)
-{
-}
-
-static void nbif_v6_3_1_sriov_vcn_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell,
- int doorbell_index, int instance)
-{
-}
-
-static void nbif_v6_3_1_sriov_gc_doorbell_init(struct amdgpu_device *adev)
-{
-}
-
-const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs = {
- .get_hdp_flush_req_offset = nbif_v6_3_1_get_hdp_flush_req_offset,
- .get_hdp_flush_done_offset = nbif_v6_3_1_get_hdp_flush_done_offset,
- .get_pcie_index_offset = nbif_v6_3_1_get_pcie_index_offset,
- .get_pcie_data_offset = nbif_v6_3_1_get_pcie_data_offset,
- .get_rev_id = nbif_v6_3_1_get_rev_id,
- .mc_access_enable = nbif_v6_3_1_mc_access_enable,
- .get_memsize = nbif_v6_3_1_get_memsize,
- .sdma_doorbell_range = nbif_v6_3_1_sriov_sdma_doorbell_range,
- .vcn_doorbell_range = nbif_v6_3_1_sriov_vcn_doorbell_range,
- .gc_doorbell_init = nbif_v6_3_1_sriov_gc_doorbell_init,
- .enable_doorbell_aperture = nbif_v6_3_1_enable_doorbell_aperture,
- .enable_doorbell_selfring_aperture = nbif_v6_3_1_enable_doorbell_selfring_aperture,
- .ih_doorbell_range = nbif_v6_3_1_sriov_ih_doorbell_range,
- .update_medium_grain_clock_gating = nbif_v6_3_1_update_medium_grain_clock_gating,
- .update_medium_grain_light_sleep = nbif_v6_3_1_update_medium_grain_light_sleep,
- .get_clockgating_state = nbif_v6_3_1_get_clockgating_state,
- .ih_control = nbif_v6_3_1_ih_control,
- .init_registers = nbif_v6_3_1_init_registers,
- .remap_hdp_registers = nbif_v6_3_1_remap_hdp_registers,
- .get_rom_offset = nbif_v6_3_1_get_rom_offset,
- .set_reg_remap = nbif_v6_3_1_set_reg_remap,
-};
-
static int nbif_v6_3_1_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
index 9ac4831d39e1..3afec715a9fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
@@ -28,7 +28,6 @@
extern const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs;
-extern const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs;
extern struct amdgpu_nbio_ras nbif_v6_3_1_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index 739fce4fa8fd..04041b398781 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v2_3.h"
#include "nbio/nbio_2_3_default.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index a54052dea8bf..f89e5f40e1a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v4_3.h"
#include "nbio/nbio_4_3_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 34180c6070dd..e911368c1aeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v6_1.h"
#include "nbio/nbio_6_1_default.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index d1032e9992b4..1569a1e934ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_0.h"
#include "nbio/nbio_7_0_default.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 41421da63a08..bed5ef4d8788 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_11.h"
#include "nbio/nbio_7_11_0_offset.h"
@@ -361,7 +360,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
+#define MMIO_REG_HOLE_OFFSET 0x44000
static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index a766e2d90cd0..acc5f363684a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_2.h"
#include "nbio/nbio_7_2_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index a26a9be58eac..860bc5cb03c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_4.h"
#include "amdgpu_ras.h"
@@ -152,9 +151,9 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
-+ * BIF_SDMA4_DOORBELL_RANGE:
-+ * ARCTURUS: 0x3be0
-+ * ALDEBARAN: 0x3be4
+ * BIF_SDMA4_DOORBELL_RANGE:
+ * ARCTURUS: 0x3be0
+ * ALDEBARAN: 0x3be4
*/
if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
reg = instance + 0x4 + 0x1 +
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 3fb6d2aa7e3b..2ee60b8746a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_7.h"
#include "nbio/nbio_7_7_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 8a0a63ac88d2..bdfd2917e3ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_9.h"
#include "amdgpu_ras.h"
@@ -32,9 +31,6 @@
#define NPS_MODE_MASK 0x000000FFL
-/* Core 0 Port 0 counter */
-#define smnPCIEP_NAK_COUNTER 0x1A340218
-
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
@@ -45,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp;
-
- tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
- /* If it is VF or subrevision holds a non-zero value, that should be used */
- if (tmp || amdgpu_sriov_vf(adev))
- return tmp;
+ u32 rev_id;
- /* If discovery subrev is not updated, use register version */
- tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
- tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
- STRAP_ATI_REV_ID_DEV0_F0);
+ /*
+ * fetch the sub-revision field from the IP-discovery table
+ * (returns zero if the table entry is not populated).
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
+ } else {
+ rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+ rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
+ STRAP_ATI_REV_ID_DEV0_F0);
+ }
- return tmp;
+ return rev_id;
}
static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
@@ -178,8 +176,12 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
u32 aid_id = instance;
+ u32 range_size;
if (use_doorbell) {
+ range_size = (amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 5, 0)) ?
+ 0xb : 0x9;
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
@@ -187,7 +189,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
- 0x9);
+ range_size);
if (aid_id)
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
@@ -205,7 +207,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
- S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9);
+ S2A_DOORBELL_PORT1_RANGE_SIZE, range_size);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
@@ -464,22 +466,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
}
}
-static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
-{
- u32 val, nak_r, nak_g;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- /* Get the number of NAKs received and generated */
- val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
- nak_r = val & 0xFFFF;
- nak_g = val >> 16;
-
- /* Add the total number of NAKs, i.e the number of replays */
- return (nak_r + nak_g);
-}
-
#define MMIO_REG_HOLE_OFFSET 0x1A000
static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev)
@@ -521,7 +507,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested,
.init_registers = nbio_v7_9_init_registers,
- .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
.set_reg_remap = nbio_v7_9_set_reg_remap,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 47db483c3516..50e77d9b30af 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -78,12 +78,12 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = {
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -104,10 +104,10 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = {
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -115,10 +115,10 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -141,23 +141,23 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -454,6 +454,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
@@ -1034,7 +1035,7 @@ static int nv_common_resume(struct amdgpu_ip_block *ip_block)
return nv_common_hw_init(ip_block);
}
-static bool nv_common_is_idle(void *handle)
+static bool nv_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -1077,9 +1078,9 @@ static int nv_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void nv_common_get_clockgating_state(void *handle, u64 *flags)
+static void nv_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
*flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 83e9782aef39..8f4817404f10 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block;
void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h
index 631dafb92299..56f1bfac0b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/nvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/nvd.h
@@ -64,6 +64,24 @@
#define PACKET3_INDIRECT_BUFFER_CNST_END 0x19
#define PACKET3_ATOMIC_GDS 0x1D
#define PACKET3_ATOMIC_MEM 0x1E
+#define PACKET3_ATOMIC_MEM__ATOMIC(x) ((((unsigned)(x)) & 0x7F) << 0)
+#define PACKET3_ATOMIC_MEM__COMMAND(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_ATOMIC_MEM__ADDR_LO(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__ADDR_HI(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__SRC_DATA_LO(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__SRC_DATA_HI(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__CMP_DATA_LO(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__CMP_DATA_HI(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__LOOP_INTERVAL(x) ((((unsigned)(x)) & 0x1FFF) << 0)
+#define PACKET3_ATOMIC_MEM__COMMAND__SINGLE_PASS_ATOMIC 0
+#define PACKET3_ATOMIC_MEM__COMMAND__LOOP_UNTIL_COMPARE_SATISFIED 1
+#define PACKET3_ATOMIC_MEM__COMMAND__WAIT_FOR_WRITE_CONFIRMATION 2
+#define PACKET3_ATOMIC_MEM__COMMAND__SEND_AND_CONTINUE 3
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY__LRU 0
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY__STREAM 1
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY__NOA 2
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY__BYPASS 3
#define PACKET3_OCCLUSION_QUERY 0x1F
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
@@ -105,6 +123,38 @@
* 1 - pfp
* 2 - ce
*/
+#define PACKET3_WRITE_DATA__DST_SEL(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_WRITE_DATA__ADDR_INCR(x) ((((unsigned)(x)) & 0x1) << 16)
+#define PACKET3_WRITE_DATA__WR_CONFIRM(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_WRITE_DATA__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WRITE_DATA__DST_MMREG_ADDR(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WRITE_DATA__DST_GDS_ADDR(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_WRITE_DATA__DST_MEM_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_WRITE_DATA__DST_MEM_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_WRITE_DATA__MODE(x) ((((unsigned)(x)) & 0x1) << 21)
+#define PACKET3_WRITE_DATA__AID_ID(x) ((((unsigned)(x)) & 0x3) << 22)
+#define PACKET3_WRITE_DATA__TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 24)
+#define PACKET3_WRITE_DATA__DST_MMREG_ADDR_LO(x) ((unsigned)(x))
+#define PACKET3_WRITE_DATA__DST_MMREG_ADDR_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_WRITE_DATA__DST_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_WRITE_DATA__DST_SEL__TC_L2 2
+#define PACKET3_WRITE_DATA__DST_SEL__GDS 3
+#define PACKET3_WRITE_DATA__DST_SEL__MEMORY 5
+#define PACKET3_WRITE_DATA__DST_SEL__MEMORY_MAPPED_ADC_PERSISTENT_STATE 6
+#define PACKET3_WRITE_DATA__ADDR_INCR__INCREMENT_ADDRESS 0
+#define PACKET3_WRITE_DATA__ADDR_INCR__DO_NOT_INCREMENT_ADDRESS 1
+#define PACKET3_WRITE_DATA__WR_CONFIRM__DO_NOT_WAIT_FOR_WRITE_CONFIRMATION 0
+#define PACKET3_WRITE_DATA__WR_CONFIRM__WAIT_FOR_WRITE_CONFIRMATION 1
+#define PACKET3_WRITE_DATA__MODE__PF_VF_DISABLED 0
+#define PACKET3_WRITE_DATA__MODE__PF_VF_ENABLED 1
+#define PACKET3_WRITE_DATA__TEMPORAL__RT 0
+#define PACKET3_WRITE_DATA__TEMPORAL__NT 1
+#define PACKET3_WRITE_DATA__TEMPORAL__HT 2
+#define PACKET3_WRITE_DATA__TEMPORAL__LU 3
+#define PACKET3_WRITE_DATA__CACHE_POLICY__LRU 0
+#define PACKET3_WRITE_DATA__CACHE_POLICY__STREAM 1
+#define PACKET3_WRITE_DATA__CACHE_POLICY__NOA 2
+#define PACKET3_WRITE_DATA__CACHE_POLICY__BYPASS 3
#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
# define PACKET3_SEM_USE_MAILBOX (0x1 << 16)
@@ -135,6 +185,42 @@
/* 0 - me
* 1 - pfp
*/
+#define PACKET3_WAIT_REG_MEM__FUNCTION(x) ((((unsigned)(x)) & 0x7) << 0)
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE(x) ((((unsigned)(x)) & 0x3) << 4)
+#define PACKET3_WAIT_REG_MEM__OPERATION(x) ((((unsigned)(x)) & 0x3) << 6)
+#define PACKET3_WAIT_REG_MEM__MES_INTR_PIPE(x) ((((unsigned)(x)) & 0x3) << 22)
+#define PACKET3_WAIT_REG_MEM__MES_ACTION(x) ((((unsigned)(x)) & 0x1) << 24)
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WAIT_REG_MEM__TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WAIT_REG_MEM__MEM_POLL_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_WAIT_REG_MEM__REG_POLL_ADDR(x) ((((unsigned)(x)) & 0X3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__REG_WRITE_ADDR1(x) ((((unsigned)(x)) & 0X3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__MEM_POLL_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__REG_WRITE_ADDR2(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__REFERENCE(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__MASK(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__POLL_INTERVAL(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__OPTIMIZE_ACE_OFFLOAD_MODE(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_WAIT_REG_MEM__FUNCTION__ALWAYS_PASS 0
+#define PACKET3_WAIT_REG_MEM__FUNCTION__LESS_THAN_REF_VALUE 1
+#define PACKET3_WAIT_REG_MEM__FUNCTION__LESS_THAN_EQUAL_TO_THE_REF_VALUE 2
+#define PACKET3_WAIT_REG_MEM__FUNCTION__EQUAL_TO_THE_REFERENCE_VALUE 3
+#define PACKET3_WAIT_REG_MEM__FUNCTION__NOT_EQUAL_REFERENCE_VALUE 4
+#define PACKET3_WAIT_REG_MEM__FUNCTION__GREATER_THAN_OR_EQUAL_REFERENCE_VALUE 5
+#define PACKET3_WAIT_REG_MEM__FUNCTION__GREATER_THAN_REFERENCE_VALUE 6
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE__REGISTER_SPACE 0
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE__MEMORY_SPACE 1
+#define PACKET3_WAIT_REG_MEM__OPERATION__WAIT_REG_MEM 0
+#define PACKET3_WAIT_REG_MEM__OPERATION__WR_WAIT_WR_REG 1
+#define PACKET3_WAIT_REG_MEM__OPERATION__WAIT_MEM_PREEMPTABLE 3
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY__LRU 0
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY__STREAM 1
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY__NOA 2
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY__BYPASS 3
+#define PACKET3_WAIT_REG_MEM__TEMPORAL__RT 0
+#define PACKET3_WAIT_REG_MEM__TEMPORAL__NT 1
+#define PACKET3_WAIT_REG_MEM__TEMPORAL__HT 2
+#define PACKET3_WAIT_REG_MEM__TEMPORAL__LU 3
#define PACKET3_INDIRECT_BUFFER 0x3F
#define INDIRECT_BUFFER_VALID (1 << 23)
#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
@@ -144,8 +230,94 @@
*/
#define INDIRECT_BUFFER_PRE_ENB(x) ((x) << 21)
#define INDIRECT_BUFFER_PRE_RESUME(x) ((x) << 30)
+#define PACKET3_INDIRECT_BUFFER__IB_BASE_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_INDIRECT_BUFFER__IB_BASE_HI(x) ((unsigned)(x))
+#define PACKET3_INDIRECT_BUFFER__IB_SIZE(x) ((((unsigned)(x)) & 0xFFFFF) << 0)
+#define PACKET3_INDIRECT_BUFFER__CHAIN(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_INDIRECT_BUFFER__OFFLOAD_POLLING(x) ((((unsigned)(x)) & 0x1) << 21)
+#define PACKET3_INDIRECT_BUFFER__VALID(x) ((((unsigned)(x)) & 0x1) << 23)
+#define PACKET3_INDIRECT_BUFFER__VMID(x) ((((unsigned)(x)) & 0xF) << 24)
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 28)
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 28)
+#define PACKET3_INDIRECT_BUFFER__PRIV(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL__RT 0
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL__NT 1
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL__HT 2
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL__LU 3
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__LRU 0
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__STREAM 1
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__NOA 2
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__BYPASS 3
#define PACKET3_COND_INDIRECT_BUFFER 0x3F
#define PACKET3_COPY_DATA 0x40
+#define PACKET3_COPY_DATA__SRC_SEL(x) ((((unsigned)(x)) & 0xF) << 0)
+#define PACKET3_COPY_DATA__DST_SEL(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 13)
+#define PACKET3_COPY_DATA__SRC_TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 13)
+#define PACKET3_COPY_DATA__COUNT_SEL(x) ((((unsigned)(x)) & 0x1) << 16)
+#define PACKET3_COPY_DATA__WR_CONFIRM(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS(x) ((((unsigned)(x)) & 0x1) << 29)
+#define PACKET3_COPY_DATA__SRC_REG_OFFSET(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_COPY_DATA__SRC_32B_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_COPY_DATA__SRC_64B_ADDR_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_COPY_DATA__SRC_GDS_ADDR_LO(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_COPY_DATA__IMM_DATA(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_MEMTC_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_IMM_DATA(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__DST_REG_OFFSET(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_COPY_DATA__DST_32B_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_COPY_DATA__DST_64B_ADDR_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_COPY_DATA__DST_GDS_ADDR_LO(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_COPY_DATA__DST_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__MODE(x) ((((unsigned)(x)) & 0x1) << 21)
+#define PACKET3_COPY_DATA__AID_ID(x) ((((unsigned)(x)) & 0x3) << 23)
+#define PACKET3_COPY_DATA__DST_TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_COPY_DATA__SRC_REG_OFFSET_LO(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_REG_OFFSET_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_COPY_DATA__DST_REG_OFFSET_LO(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__DST_REG_OFFSET_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_COPY_DATA__SRC_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_COPY_DATA__SRC_SEL__TC_L2_OBSOLETE 1
+#define PACKET3_COPY_DATA__SRC_SEL__TC_L2 2
+#define PACKET3_COPY_DATA__SRC_SEL__GDS 3
+#define PACKET3_COPY_DATA__SRC_SEL__PERFCOUNTERS 4
+#define PACKET3_COPY_DATA__SRC_SEL__IMMEDIATE_DATA 5
+#define PACKET3_COPY_DATA__SRC_SEL__ATOMIC_RETURN_DATA 6
+#define PACKET3_COPY_DATA__SRC_SEL__GDS_ATOMIC_RETURN_DATA0 7
+#define PACKET3_COPY_DATA__SRC_SEL__GDS_ATOMIC_RETURN_DATA1 8
+#define PACKET3_COPY_DATA__SRC_SEL__GPU_CLOCK_COUNT 9
+#define PACKET3_COPY_DATA__SRC_SEL__SYSTEM_CLOCK_COUNT 10
+#define PACKET3_COPY_DATA__DST_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_COPY_DATA__DST_SEL__TC_L2 2
+#define PACKET3_COPY_DATA__DST_SEL__GDS 3
+#define PACKET3_COPY_DATA__DST_SEL__PERFCOUNTERS 4
+#define PACKET3_COPY_DATA__DST_SEL__TC_L2_OBSOLETE 5
+#define PACKET3_COPY_DATA__DST_SEL__MEM_MAPPED_REG_DC 6
+#define PACKET3_COPY_DATA__SRC_TEMPORAL__RT 0
+#define PACKET3_COPY_DATA__SRC_TEMPORAL__NT 1
+#define PACKET3_COPY_DATA__SRC_TEMPORAL__HT 2
+#define PACKET3_COPY_DATA__SRC_TEMPORAL__LU 3
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__LRU 0
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__STREAM 1
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__NOA 2
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__BYPASS 3
+#define PACKET3_COPY_DATA__COUNT_SEL__32_BITS_OF_DATA 0
+#define PACKET3_COPY_DATA__COUNT_SEL__64_BITS_OF_DATA 1
+#define PACKET3_COPY_DATA__WR_CONFIRM__DO_NOT_WAIT_FOR_CONFIRMATION 0
+#define PACKET3_COPY_DATA__WR_CONFIRM__WAIT_FOR_CONFIRMATION 1
+#define PACKET3_COPY_DATA__MODE__PF_VF_DISABLED 0
+#define PACKET3_COPY_DATA__MODE__PF_VF_ENABLED 1
+#define PACKET3_COPY_DATA__DST_TEMPORAL__RT 0
+#define PACKET3_COPY_DATA__DST_TEMPORAL__NT 1
+#define PACKET3_COPY_DATA__DST_TEMPORAL__HT 2
+#define PACKET3_COPY_DATA__DST_TEMPORAL__LU 3
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__LRU 0
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__STREAM 1
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__NOA 2
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__BYPASS 3
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS__DEFAULT 0
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS__PHASE_UPDATE 1
#define PACKET3_CP_DMA 0x41
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
@@ -160,6 +332,23 @@
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
*/
+#define PACKET3_EVENT_WRITE__EVENT_TYPE(x) ((((unsigned)(x)) & 0x3F) << 0)
+#define PACKET3_EVENT_WRITE__EVENT_INDEX(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE(x) ((((unsigned)(x)) & 0x3) << 29)
+#define PACKET3_EVENT_WRITE__OFFLOAD_ENABLE(x) ((((unsigned)(x)) & 0x1) << 0)
+#define PACKET3_EVENT_WRITE__ADDRESS_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_EVENT_WRITE__ADDRESS_HI(x) ((unsigned)(x))
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__OTHER 0
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_PIPELINESTAT 2
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__CS_PARTIAL_FLUSH 4
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_STREAMOUTSTATS 8
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_STREAMOUTSTATS1 9
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_STREAMOUTSTATS2 10
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_STREAMOUTSTATS3 11
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE__LEGACY_MODE 0
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE__MIXED_MODE1 1
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE__NEW_MODE 2
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE__MIXED_MODE3 3
#define PACKET3_EVENT_WRITE_EOP 0x47
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_RELEASE_MEM 0x49
@@ -304,6 +493,12 @@
* 2: REVERSE
*/
#define PACKET3_ACQUIRE_MEM_GCR_RANGE_IS_PA (1 << 18)
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE(x) ((unsigned)(x))
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_ACQUIRE_MEM__COHER_BASE_LO(x) ((unsigned)(x))
+#define PACKET3_ACQUIRE_MEM__COHER_BASE_HI(x) ((((unsigned)(x)) & 0xFFFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__POLL_INTERVAL(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__GCR_CNTL(x) ((((unsigned)(x)) & 0x7FFFF) << 0)
#define PACKET3_REWIND 0x59
#define PACKET3_INTERRUPT 0x5A
#define PACKET3_GEN_PDEPTE 0x5B
@@ -330,11 +525,17 @@
#define PACKET3_SET_SH_REG 0x76
#define PACKET3_SET_SH_REG_START 0x00002c00
#define PACKET3_SET_SH_REG_END 0x00003000
+#define PACKET3_SET_SH_REG__REG_OFFSET(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_SET_SH_REG__VMID_SHIFT(x) ((((unsigned)(x)) & 0x1F) << 23)
+#define PACKET3_SET_SH_REG__INDEX(x) ((((unsigned)(x)) & 0xF) << 28)
+#define PACKET3_SET_SH_REG__INDEX__DEFAULT 0
+#define PACKET3_SET_SH_REG__INDEX__INSERT_VMID 1
#define PACKET3_SET_SH_REG_OFFSET 0x77
#define PACKET3_SET_QUEUE_REG 0x78
#define PACKET3_SET_UCONFIG_REG 0x79
#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
+#define PACKET3_SET_UCONFIG_REG__REG_OFFSET(x) ((((unsigned)(x)) & 0xFFFF) << 0)
#define PACKET3_SET_UCONFIG_REG_INDEX 0x7A
#define PACKET3_FORWARD_HEADER 0x7C
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
@@ -369,6 +570,7 @@
# define PACKET3_INVALIDATE_TLBS_DST_SEL(x) ((x) << 0)
# define PACKET3_INVALIDATE_TLBS_ALL_HUB(x) ((x) << 4)
# define PACKET3_INVALIDATE_TLBS_PASID(x) ((x) << 5)
+# define PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(x) ((x) << 29)
#define PACKET3_AQL_PACKET 0x99
#define PACKET3_DMA_DATA_FILL_MULTI 0x9A
#define PACKET3_SET_SH_REG_INDEX 0x9B
@@ -462,6 +664,12 @@
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
#define PACKET3_RUN_LIST 0xA5
#define PACKET3_MAP_PROCESS_VM 0xA6
+
+#define PACKET3_RUN_CLEANER_SHADER 0xD2
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
+
/* GFX11 */
#define PACKET3_SET_Q_PREEMPTION_MODE 0xF0
# define PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(x) ((x) << 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f4a91b126c73..73f87131a7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -106,7 +106,9 @@ enum psp_gfx_cmd_id
/*IDs of performance monitoring/profiling*/
GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */
/* Dynamic memory partitioninig (NPS mode change)*/
- GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_FW_RESERV_ADDR = 0x00000050, /* Query FW reservation addr */
+ GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR = 0x00000051, /* Query FW reservation extended addr */
};
/* PSP boot config sub-commands */
@@ -404,11 +406,19 @@ struct psp_gfx_uresp_bootcfg {
uint32_t boot_cfg; /* boot config data */
};
+/* Command-specific response for fw reserve info */
+struct psp_gfx_uresp_fw_reserve_info {
+ uint32_t reserve_base_address_hi;
+ uint32_t reserve_base_address_lo;
+ uint32_t reserve_size;
+};
+
/* Union of command-specific responses for GPCOM ring. */
union psp_gfx_uresp {
struct psp_gfx_uresp_reserved reserved;
struct psp_gfx_uresp_bootcfg boot_cfg;
struct psp_gfx_uresp_fwar_db_info fwar_db_info;
+ struct psp_gfx_uresp_fw_reserve_info fw_reserve_info;
};
/* Structure of GFX Response buffer.
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 145186a1e48f..3584b8c18fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -94,7 +94,7 @@ static int psp_v10_0_ring_create(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -115,7 +115,7 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 2395f1856962..a9be7a505026 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -129,6 +129,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
err = psp_init_ta_microcode(psp, ucode_prefix);
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
@@ -141,21 +142,43 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
return err;
}
-static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+static int psp_v11_wait_for_tos_unload(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg1, sol_reg2;
+ int retry_loop;
+
+ /* Wait for the TOS to be unloaded */
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ usleep_range(1000, 2000);
+ sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg1 == sol_reg2)
+ return 0;
+ }
+ dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x",
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33),
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81));
+ return -ETIME;
+}
+
+static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
int ret;
int retry_loop;
- for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* For a reset done at the end of S3, only wait for TOS to be unloaded */
+ if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev))
+ return psp_v11_wait_for_tos_unload(psp);
+
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x8000FFFF, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -251,8 +274,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -276,11 +299,13 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -316,13 +341,15 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
return ret;
@@ -346,8 +373,9 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -380,7 +408,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -392,17 +421,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
msleep(500);
- offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
-
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
-
- if (ret) {
- DRM_INFO("psp mode 1 reset failed!\n");
- return -EINVAL;
- }
-
- DRM_INFO("psp mode1 reset succeed \n");
-
return 0;
}
@@ -420,8 +438,9 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -532,7 +551,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
@@ -600,7 +619,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -637,7 +656,7 @@ static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
@@ -658,7 +677,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_get_wptr = psp_v11_0_ring_get_wptr,
.ring_set_wptr = psp_v11_0_ring_set_wptr,
.load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
- .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
+ .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw,
+ .wait_for_bootloader = psp_v11_0_wait_for_bootloader
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
index 5697760a819b..93787a90d598 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
@@ -41,8 +41,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
@@ -50,8 +51,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -87,13 +89,15 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -117,8 +121,9 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index fcd708eae75c..4c6450d62299 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -34,9 +34,6 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_7_4_offset.h"
-#include "oss/osssys_4_0_offset.h"
-#include "oss/osssys_4_0_sh_mask.h"
-
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
@@ -85,7 +82,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -99,11 +96,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -124,7 +118,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -138,46 +132,13 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
-static void psp_v12_0_reroute_ih(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- uint32_t tmp;
-
- /* Change IH ring for VMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-
- /* Change IH ring for UMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -186,49 +147,23 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- psp_v12_0_reroute_ih(psp);
-
- if (amdgpu_sriov_vf(psp->adev)) {
- /* Write low address of the ring to C2PMSG_102 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_103 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
-
- /* Write the ring initialization command to C2PMSG_101 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
-
- } else {
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
- }
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -247,16 +182,15 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
GFX_CTRL_CMD_ID_DESTROY_RINGS);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -287,7 +221,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -301,7 +236,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
+ 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index cc621064610f..af4a7d7c4abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
@@ -71,20 +73,13 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin");
/* Retry times for vmbx ready wait */
#define PSP_VMBX_POLLING_LIMIT 3000
-/* VBIOS gfl defines */
-#define MBOX_READY_MASK 0x80000000
-#define MBOX_STATUS_MASK 0x0000FFFF
-#define MBOX_COMMAND_MASK 0x00FF0000
-#define MBOX_READY_FLAG 0x80000000
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
-#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
-
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
#define regMP1_PUB_SCRATCH0 0x3b10090
+#define PSP13_BL_STATUS_SIZE 100
+
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -151,6 +146,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
+static void psp_v13_0_bootloader_print_status(struct psp_context *psp,
+ const char *msg)
+{
+ struct amdgpu_device *adev = psp->adev;
+ u32 bl_status_reg;
+ char bl_status_msg[PSP13_BL_STATUS_SIZE];
+ int i, at;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
+ at = 0;
+ for_each_inst(i, adev->aid_mask) {
+ bl_status_reg =
+ (SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_92)
+ << 2) +
+ adev->asic_funcs->encode_ext_smn_addressing(i);
+ at += snprintf(bl_status_msg + at,
+ PSP13_BL_STATUS_SIZE - at,
+ " status(%02i): 0x%08x", i,
+ RREG32_PCIE_EXT(bl_status_reg));
+ }
+ dev_info(adev->dev, "%s - %s", msg, bl_status_msg);
+ }
+}
+
static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -161,7 +182,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
ready having bit 31 of C2PMSG_33 set to 1 */
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
@@ -192,10 +213,13 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
+ if (retry_loop && !(retry_loop % 10))
+ psp_v13_0_bootloader_print_status(
+ psp, "Waiting for bootloader completion");
}
return ret;
@@ -338,8 +362,8 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
if (!ret)
psp_v13_0_init_sos_version(psp);
@@ -360,8 +384,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -369,8 +394,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -406,13 +432,15 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -436,8 +464,9 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -500,8 +529,9 @@ static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -610,7 +640,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
@@ -653,7 +683,7 @@ static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -690,7 +720,7 @@ static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36);
@@ -710,12 +740,14 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
/* Ring the doorbell */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1);
- if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
+ if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE ||
+ cmd == C2PMSG_CMD_SPI_GET_FLASH_IMAGE)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -739,7 +771,7 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
@@ -766,6 +798,37 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
return 0;
}
+static int psp_v13_0_dump_spirom(struct psp_context *psp,
+ uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* Confirm PSP is ready to start */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
+ if (ret) {
+ dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
+ return ret;
+ }
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO);
+ if (ret)
+ return ret;
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI);
+ if (ret)
+ return ret;
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_FLASH_IMAGE);
+
+ return ret;
+}
+
static int psp_v13_0_vbflash_status(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -858,6 +921,26 @@ static bool psp_v13_0_is_reload_needed(struct psp_context *psp)
return false;
}
+static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret = -EOPNOTSUPP;
+
+ /* PSP will broadcast the value to all instances */
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_GBR_IH_SET);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val);
+
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
+ }
+
+ return ret;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -879,11 +962,13 @@ static const struct psp_funcs psp_v13_0_funcs = {
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw,
.update_spirom = psp_v13_0_update_spirom,
+ .dump_spirom = psp_v13_0_dump_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
.is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
.is_reload_needed = psp_v13_0_is_reload_needed,
+ .reg_program_no_ring = psp_v13_0_reg_program_no_ring,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index eaa5512a21da..5f39a2edcc95 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -76,11 +76,9 @@ static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -185,8 +183,8 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -204,8 +202,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -213,8 +212,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -250,13 +250,15 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -280,8 +282,9 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 4d33c95a5116..38dfc5c19f2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -34,7 +34,11 @@
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -72,6 +76,14 @@ static int psp_v14_0_init_microcode(struct psp_context *psp)
if (err)
return err;
break;
+ case IP_VERSION(14, 0, 5):
+ err = psp_init_toc_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
+ break;
default:
BUG();
}
@@ -99,11 +111,9 @@ static int psp_v14_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -218,9 +228,10 @@ static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81),
- 0, true);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -238,8 +249,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64,
@@ -247,8 +259,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -284,13 +297,15 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -314,8 +329,9 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -378,8 +394,9 @@ static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -488,7 +505,7 @@ static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
@@ -530,8 +547,9 @@ static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -567,8 +585,9 @@ static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36);
@@ -592,11 +611,13 @@ static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -619,8 +640,9 @@ static int psp_v14_0_update_spirom(struct psp_context *psp,
int ret;
/* Confirm PSP is ready to start */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index f6b75e3e47ff..833830bc3e2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -91,7 +91,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -109,7 +109,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -130,7 +130,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -147,8 +147,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -168,7 +168,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
/* Change IH ring for UMC */
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
@@ -180,7 +180,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
}
static int psp_v3_1_ring_create(struct psp_context *psp,
@@ -217,9 +217,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_101), 0x80000000,
- 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, 0);
} else {
/* Write low address of the ring to C2PMSG_69 */
@@ -240,10 +240,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_64), 0x80000000,
- 0x8000FFFF, false);
-
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, 0);
}
return ret;
}
@@ -267,11 +266,13 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -311,7 +312,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -325,7 +326,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 135c5099bfb8..92ce580647cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -911,9 +911,9 @@ static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
return sdma_v2_4_hw_init(ip_block);
}
-static bool sdma_v2_4_is_idle(void *handle)
+static bool sdma_v2_4_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index c611328671ed..1c076bd1cf73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1200,9 +1200,9 @@ static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v3_0_hw_init(ip_block);
}
-static bool sdma_v3_0_is_idle(void *handle)
+static bool sdma_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -1514,9 +1514,9 @@ static int sdma_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v3_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index b48d9c0b2e1c..f38004e6064e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2015,9 +2015,9 @@ static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v4_0_hw_init(ip_block);
}
-static bool sdma_v4_0_is_idle(void *handle)
+static bool sdma_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -2187,7 +2187,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
dev_dbg_ratelimited(adev->dev,
" for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
@@ -2331,9 +2331,9 @@ static int sdma_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v4_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -2381,7 +2381,6 @@ static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
if (!adev->sdma.ip_dump)
return;
- amdgpu_gfx_off_ctrl(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
instance_offset = i * reg_count;
for (j = 0; j < reg_count; j++)
@@ -2389,7 +2388,6 @@ static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
RREG32(sdma_v4_0_get_reg_offset(adev, i,
sdma_reg_list_4_0[j].reg_offset));
}
- amdgpu_gfx_off_ctrl(adev, true);
}
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 56507ae919b0..a1443990d5c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -30,6 +30,7 @@
#include "amdgpu_xcp.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
+#include "amdgpu_reset.h"
#include "sdma/sdma_4_4_2_offset.h"
#include "sdma/sdma_4_4_2_sh_mask.h"
@@ -44,6 +45,7 @@
#include "amdgpu_ras.h"
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
+MODULE_FIRMWARE("amdgpu/sdma_4_4_4.bin");
MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
@@ -105,6 +107,11 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -486,7 +493,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
{
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, sdma_cntl;
int i;
for_each_inst(i, inst_mask) {
@@ -498,6 +505,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+ sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
if (sdma[i]->use_doorbell) {
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
@@ -681,6 +691,7 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, b
u32 doorbell;
u32 doorbell_offset;
u64 wptr_gpu_addr;
+ u64 rwptr;
wb_offset = (ring->rptr_offs * 4);
@@ -706,12 +717,20 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, b
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
+ /* For the guilty queue, set RPTR to the current wptr to skip bad commands,
+ * It is not a guilty queue, restore cache_rptr and continue execution.
+ */
+ if (adev->sdma.instance[i].gfx_guilty)
+ rwptr = ring->wptr;
+ else
+ rwptr = ring->cached_rptr;
+
/* Initialize the ring buffer's read and write pointers */
if (restore) {
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(rwptr << 2));
} else {
WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
@@ -778,6 +797,7 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i,
u32 doorbell;
u32 doorbell_offset;
u64 wptr_gpu_addr;
+ u64 rwptr;
wb_offset = (ring->rptr_offs * 4);
@@ -785,12 +805,20 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i,
rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+ /* For the guilty queue, set RPTR to the current wptr to skip bad commands,
+ * It is not a guilty queue, restore cache_rptr and continue execution.
+ */
+ if (adev->sdma.instance[i].page_guilty)
+ rwptr = ring->wptr;
+ else
+ rwptr = ring->cached_rptr;
+
/* Initialize the ring buffer's read and write pointers */
if (restore) {
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, upper_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, upper_32_bits(rwptr << 2));
} else {
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
@@ -973,6 +1001,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
/* enable context empty interrupt during initialization */
@@ -1312,6 +1341,12 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
+static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v4_4_2_stop_queue,
+ .start_kernel_queue = &sdma_v4_4_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
+};
+
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1330,7 +1365,6 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
-
return 0;
}
@@ -1351,6 +1385,12 @@ static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_persistent_edc_harvesting_supported(adev))
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
+ /* The initialization is done in the late_init stage to ensure that the SMU
+ * initialization and capability setup are completed before we check the SDMA
+ * reset capability
+ */
+ sdma_v4_4_2_update_reset_mask(adev);
+
return 0;
}
@@ -1406,9 +1446,21 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
&adev->sdma.srbm_write_irq);
if (r)
return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_CTXEMPTY,
+ &adev->sdma.ctxt_empty_irq);
+ if (r)
+ return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
+ /* Initialize guilty flags for GFX and PAGE queues */
+ adev->sdma.instance[i].gfx_guilty = false;
+ adev->sdma.instance[i].page_guilty = false;
+ adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs;
+
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1452,7 +1504,6 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
@@ -1555,9 +1606,9 @@ static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
return sdma_v4_4_2_hw_init(ip_block);
}
-static bool sdma_v4_4_2_is_idle(void *handle)
+static bool sdma_v4_4_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1596,25 +1647,74 @@ static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t instance_id, bool is_page_queue)
+{
+ uint32_t reg_offset = is_page_queue ? regSDMA_PAGE_CONTEXT_STATUS : regSDMA_GFX_CONTEXT_STATUS;
+ uint32_t context_status = RREG32(sdma_v4_4_2_get_reg_offset(adev, instance_id, reg_offset));
+
+ /* Check if the SELECTED bit is set */
+ return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
+}
+
+static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ u32 id = ring->me;
+ int r;
+
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, id, false);
+ amdgpu_amdkfd_resume(adev, true);
+ return r;
+}
+
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 instance_id = ring->me;
u32 inst_mask;
+ uint64_t rptr;
- if ((adev->flags & AMD_IS_APU) || amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
+ /* Check if this queue is the guilty one */
+ adev->sdma.instance[instance_id].gfx_guilty =
+ sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
+ if (adev->sdma.has_page_queue)
+ adev->sdma.instance[instance_id].page_guilty =
+ sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
+
+ /* Cache the rptr before reset, after the reset,
+ * all of the registers will be reset to 0
+ */
+ rptr = amdgpu_ring_get_rptr(ring);
+ ring->cached_rptr = rptr;
+ /* Cache the rptr for the page queue if it exists */
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page_ring = &adev->sdma.instance[instance_id].page;
+ rptr = amdgpu_ring_get_rptr(page_ring);
+ page_ring->cached_rptr = rptr;
+ }
+
/* stop queue */
inst_mask = 1 << ring->me;
sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
if (adev->sdma.has_page_queue)
sdma_v4_4_2_inst_page_stop(adev, inst_mask);
- r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, ring->me));
- if (r)
- return r;
+ return 0;
+}
+
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_mask;
+ int i, r;
+ inst_mask = 1 << ring->me;
udelay(50);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -1629,7 +1729,18 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
return -ETIMEDOUT;
}
- return sdma_v4_4_2_inst_start(adev, inst_mask, true);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, true);
+
+ return r;
+}
+
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id)
+{
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility
+ * we need to convert the logical instance ID to physical instance ID before reset.
+ */
+ return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
}
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
@@ -1677,6 +1788,9 @@ static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
case 0:
amdgpu_fence_process(&adev->sdma.instance[i].ring);
break;
+ case 1:
+ amdgpu_fence_process(&adev->sdma.instance[i].page);
+ break;
default:
break;
}
@@ -1768,7 +1882,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
if (task_info) {
dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
@@ -1814,6 +1928,16 @@ static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_4_2_process_ctxt_empty_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* There is nothing useful to be done here, only kept for debug */
+ dev_dbg_ratelimited(adev->dev, "SDMA context empty interrupt");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
{
@@ -1904,9 +2028,9 @@ static int sdma_v4_4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v4_4_2_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -2044,6 +2168,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = sdma_v4_4_2_reset_queue,
};
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -2096,6 +2221,10 @@ static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
.process = sdma_v4_4_2_process_srbm_write_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ctxt_empty_irq_funcs = {
+ .process = sdma_v4_4_2_process_ctxt_empty_irq,
+};
+
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
@@ -2104,6 +2233,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.ctxt_empty_irq.num_types = adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
@@ -2112,6 +2242,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
+ adev->sdma.ctxt_empty_irq.funcs = &sdma_v4_4_2_ctxt_empty_irq_funcs;
}
/**
@@ -2209,6 +2340,44 @@ static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
+/**
+ * sdma_v4_4_2_update_reset_mask - update reset mask for SDMA
+ * @adev: Pointer to the AMDGPU device structure
+ *
+ * This function update reset mask for SDMA and sets the supported
+ * reset types based on the IP version and firmware versions.
+ *
+ */
+static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
+{
+ /* per queue reset not supported for SRIOV */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /*
+ * the user queue relies on MEC fw and pmfw when the sdma queue do reset.
+ * it needs to check both of them at here to skip old mec and pmfw.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ if ((adev->gfx.mec_fw_version >= 0xb0) &&
+ amdgpu_dpm_reset_sdma_is_supported(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ case IP_VERSION(9, 5, 0):
+ if ((adev->gfx.mec_fw_version >= 0xf) &&
+ amdgpu_dpm_reset_sdma_is_supported(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
+
+}
+
const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 4,
@@ -2370,11 +2539,13 @@ static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_ban
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b764550834a0..8ddc4df06a1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -112,6 +112,8 @@ static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
@@ -369,67 +371,36 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
-
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index,
- ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
- "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -575,11 +546,9 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -588,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_0_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers (NAVI10).
*/
-static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -688,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_0_gfx_stop(adev);
+ sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
sdma_v5_0_rlc_stop(adev);
}
@@ -1046,33 +1017,22 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1085,10 +1045,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1100,8 +1057,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1124,38 +1080,24 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256,
- AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1183,10 +1125,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1197,8 +1136,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1389,6 +1327,36 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
+{
+ u32 grbm_soft_reset;
+ u32 tmp;
+
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
+
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
+
+static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_0_stop_queue,
+ .start_kernel_queue = &sdma_v5_0_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
+};
+
static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1431,6 +1399,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
+ adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1458,7 +1428,9 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
- if (adev->sdma.instance[0].fw_version >= 35)
+ if ((adev->sdma.instance[0].fw_version >= 35) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1530,9 +1502,9 @@ static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v5_0_hw_init(ip_block);
}
-static bool sdma_v5_0_is_idle(void *handle)
+static bool sdma_v5_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1569,35 +1541,43 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
+ amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_0_gfx_stop(adev, 1 << i);
/* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1628,31 +1608,27 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze*/
- freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
-
- r = sdma_v5_0_gfx_resume_instance(adev, i, true);
+ WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
-err0:
+ r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
@@ -1883,9 +1859,9 @@ static int sdma_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v5_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index b1818e87889a..51101b0aa2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -113,6 +113,8 @@ static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring);
static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
{
@@ -340,7 +342,7 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me > 1) {
- amdgpu_asic_flush_hdp(adev, ring);
+ amdgpu_hdp_flush(adev, ring);
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
@@ -394,11 +396,9 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if ((flags & AMDGPU_FENCE_FLAG_INT)) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -407,15 +407,15 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_2_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers.
*/
-static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -506,9 +506,11 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_2_gfx_stop(adev);
+ sdma_v5_2_gfx_stop(adev, inst_mask);
sdma_v5_2_rlc_stop(adev);
}
@@ -761,37 +763,49 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+static int sdma_v5_2_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
{
- struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset;
u32 tmp;
- int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- grbm_soft_reset = REG_SET_FIELD(0,
- GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
- 1);
- grbm_soft_reset <<= i;
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma_v5_2_soft_reset_engine(adev, i);
udelay(50);
}
return 0;
}
+static const struct amdgpu_sdma_funcs sdma_v5_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_2_stop_queue,
+ .start_kernel_queue = &sdma_v5_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_2_soft_reset_engine,
+};
+
/**
* sdma_v5_2_start - setup and start the async dma engines
*
@@ -903,33 +917,22 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -942,10 +945,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -957,8 +957,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -981,37 +980,23 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1039,10 +1024,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1053,8 +1035,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1337,6 +1318,8 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
+ adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1364,11 +1347,15 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 2, 2):
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 4):
- if (adev->sdma.instance[0].fw_version >= 76)
+ if ((adev->sdma.instance[0].fw_version >= 76) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(5, 2, 5):
- if (adev->sdma.instance[0].fw_version >= 34)
+ if ((adev->sdma.instance[0].fw_version >= 34) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1435,9 +1422,9 @@ static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block)
return sdma_v5_2_hw_init(ip_block);
}
-static bool sdma_v5_2_is_idle(void *handle)
+static bool sdma_v5_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1469,35 +1456,43 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
return -ETIMEDOUT;
}
-static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
+ amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_2_gfx_stop(adev, 1 << i);
/*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1530,32 +1525,28 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze and unhalt */
- freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+ WREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
- r = sdma_v5_2_gfx_resume_instance(adev, i, true);
+ r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
-err0:
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
@@ -1847,9 +1838,9 @@ static int sdma_v5_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v5_2_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 1a023b45f0be..217040044987 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -43,6 +43,8 @@
#include "sdma_common.h"
#include "sdma_v6_0.h"
#include "v11_structs.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
@@ -51,6 +53,7 @@ MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_2.bin");
+MODULE_FIRMWARE("amdgpu/sdma_6_1_3.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
@@ -375,11 +378,9 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -890,6 +891,12 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
+ m->sdmax_rlcx_f32_dbg0 = lower_32_bits(prop->fence_address);
+ m->sdmax_rlcx_f32_dbg1 = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -916,33 +923,22 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -955,10 +951,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -970,8 +963,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -994,37 +986,23 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1052,10 +1030,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1066,8 +1041,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1299,6 +1273,23 @@ static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r)
return r;
@@ -1328,11 +1319,19 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ /* SDMA user fence event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+ GFX_11_0_0__SRCID__SDMA_FENCE,
+ &adev->sdma.fence_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1356,7 +1355,9 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
- if (adev->sdma.instance[0].fw_version >= 21)
+ if ((adev->sdma.instance[0].fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1375,6 +1376,43 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 2):
+ if ((adev->sdma.instance[0].fw_version >= 23) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 3):
+ if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 0):
+ if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 1):
+ if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 2):
+ if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 3):
+ if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ default:
+ break;
+ }
+
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -1398,11 +1436,39 @@ static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v6_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v6_0_start(adev);
+ r = sdma_v6_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v6_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1414,6 +1480,7 @@ static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
+ sdma_v6_0_set_userq_trap_interrupts(adev, false);
return 0;
}
@@ -1428,9 +1495,9 @@ static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v6_0_hw_init(ip_block);
}
-static bool sdma_v6_0_is_idle(void *handle)
+static bool sdma_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1505,29 +1572,29 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v6_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
@@ -1554,25 +1621,9 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
- uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
- return 0;
- }
-
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
@@ -1594,6 +1645,29 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v6_0_process_fence_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 doorbell_offset = entry->src_data[0];
+
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
+
+ doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
+ }
+
+ return 0;
+}
+
static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1613,7 +1687,7 @@ static int sdma_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
}
@@ -1730,6 +1804,10 @@ static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = {
.process = sdma_v6_0_process_trap_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v6_0_fence_irq_funcs = {
+ .process = sdma_v6_0_process_fence_irq,
+};
+
static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = {
.process = sdma_v6_0_process_illegal_inst_irq,
};
@@ -1739,6 +1817,7 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs;
+ adev->sdma.fence_irq.funcs = &sdma_v6_0_fence_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 9c17df2cf37b..2b81344dcd66 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -33,7 +33,7 @@
#include "gc/gc_12_0_0_offset.h"
#include "gc/gc_12_0_0_sh_mask.h"
#include "hdp/hdp_6_0_0_offset.h"
-#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
#include "soc15_common.h"
#include "soc15.h"
@@ -42,6 +42,8 @@
#include "sdma_common.h"
#include "sdma_v7_0.h"
#include "v12_structs.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
MODULE_FIRMWARE("amdgpu/sdma_7_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
@@ -204,66 +206,39 @@ static uint64_t sdma_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "regSDMA%i_GFX_RB_WPTR == 0x%08x "
- "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "regSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -407,11 +382,9 @@ static void sdma_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -829,29 +802,29 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
return false;
}
-static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v7_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
/**
@@ -935,6 +908,12 @@ static int sdma_v7_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = 0x4000; //regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = 0xf; //regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
+ m->sdmax_rlcx_mcu_dbg0 = lower_32_bits(prop->fence_address);
+ m->sdmax_rlcx_mcu_dbg1 = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -961,33 +940,22 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1000,10 +968,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1015,8 +980,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1039,37 +1003,23 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1097,10 +1047,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1111,8 +1058,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1312,6 +1258,23 @@ static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to init sdma firmware!\n");
@@ -1337,16 +1300,24 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
- GFX_11_0_0__SRCID__SDMA_TRAP,
+ GFX_12_0_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
+ /* SDMA user fence event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+ GFX_12_0_0__SRCID__SDMA_FENCE,
+ &adev->sdma.fence_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1366,7 +1337,9 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
- adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
@@ -1378,6 +1351,16 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ default:
+ break;
+ }
+
return r;
}
@@ -1400,11 +1383,39 @@ static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v7_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v7_0_start(adev);
+ r = sdma_v7_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v7_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1416,6 +1427,7 @@ static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v7_0_ctx_switch_enable(adev, false);
sdma_v7_0_enable(adev, false);
+ sdma_v7_0_set_userq_trap_interrupts(adev, false);
return 0;
}
@@ -1430,9 +1442,9 @@ static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v7_0_hw_init(ip_block);
}
-static bool sdma_v7_0_is_idle(void *handle)
+static bool sdma_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1533,25 +1545,9 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
- uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
- return 0;
- }
-
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
@@ -1573,6 +1569,29 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v7_0_process_fence_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 doorbell_offset = entry->src_data[0];
+
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
+
+ doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
+ }
+
+ return 0;
+}
+
static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1592,7 +1611,7 @@ static int sdma_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v7_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
}
@@ -1710,6 +1729,10 @@ static const struct amdgpu_irq_src_funcs sdma_v7_0_trap_irq_funcs = {
.process = sdma_v7_0_process_trap_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v7_0_fence_irq_funcs = {
+ .process = sdma_v7_0_process_fence_irq,
+};
+
static const struct amdgpu_irq_src_funcs sdma_v7_0_illegal_inst_irq_funcs = {
.process = sdma_v7_0_process_illegal_inst_irq,
};
@@ -1719,6 +1742,7 @@ static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v7_0_trap_irq_funcs;
+ adev->sdma.fence_irq.funcs = &sdma_v7_0_fence_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v7_0_illegal_inst_irq_funcs;
}
@@ -1741,11 +1765,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint32_t byte_count,
uint32_t copy_flags)
{
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_cm;
max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED);
data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT);
num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE);
+ write_cm = AMDGPU_COPY_FLAGS_GET(copy_flags, WRITE_COMPRESS_DISABLE) ? 2 : 1;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
@@ -1762,7 +1787,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) |
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
- ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
+ ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(write_cm) : 0) |
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
else
ib->ptr[ib->length_dw++] = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 77ef7da2e4fe..f7288372ee61 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -35,6 +35,7 @@
#include "amdgpu_vce.h"
#include "atom.h"
#include "amd_pcie.h"
+
#include "si_dpm.h"
#include "sid.h"
#include "si_ih.h"
@@ -44,17 +45,31 @@
#include "dce_v6_0.h"
#include "si.h"
#include "uvd_v3_1.h"
-#include "amdgpu_vkms.h"
+#include "vce_v1_0.h"
+
+#include "uvd/uvd_4_0_d.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
-#include "uvd/uvd_4_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+#include "si_enums.h"
#include "amdgpu_dm.h"
+#include "amdgpu_vkms.h"
static const u32 tahiti_golden_registers[] =
{
@@ -907,9 +922,7 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x3630, 0xfffffff0, 0x00000100,
};
-/* XXX: update when we support VCE */
-#if 0
-/* tahiti, pitcarin, verde */
+/* tahiti, pitcairn, verde */
static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
{
{
@@ -926,13 +939,7 @@ static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
.codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array),
.codec_array = tahiti_video_codecs_encode_array,
};
-#else
-static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
-{
- .codec_count = 0,
- .codec_array = NULL,
-};
-#endif
+
/* oland and hainan don't support encode */
static const struct amdgpu_video_codecs hainan_video_codecs_encode =
{
@@ -940,7 +947,7 @@ static const struct amdgpu_video_codecs hainan_video_codecs_encode =
.codec_array = NULL,
};
-/* tahiti, pitcarin, verde, oland */
+/* tahiti, pitcairn, verde, oland */
static const struct amdgpu_video_codec_info tahiti_video_codecs_decode_array[] =
{
{
@@ -1071,8 +1078,8 @@ static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- r = RREG32(SMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ r = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -1082,8 +1089,8 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- WREG32(SMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ WREG32(mmSMC_IND_DATA_0, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -1110,55 +1117,55 @@ static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
}
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
- {GRBM_STATUS},
+ {mmGRBM_STATUS},
{mmGRBM_STATUS2},
{mmGRBM_STATUS_SE0},
{mmGRBM_STATUS_SE1},
{mmSRBM_STATUS},
{mmSRBM_STATUS2},
- {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
- {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA1_REGISTER_OFFSET},
{mmCP_STAT},
{mmCP_STALLED_STAT1},
{mmCP_STALLED_STAT2},
{mmCP_STALLED_STAT3},
- {GB_ADDR_CONFIG},
- {MC_ARB_RAMCFG},
- {GB_TILE_MODE0},
- {GB_TILE_MODE1},
- {GB_TILE_MODE2},
- {GB_TILE_MODE3},
- {GB_TILE_MODE4},
- {GB_TILE_MODE5},
- {GB_TILE_MODE6},
- {GB_TILE_MODE7},
- {GB_TILE_MODE8},
- {GB_TILE_MODE9},
- {GB_TILE_MODE10},
- {GB_TILE_MODE11},
- {GB_TILE_MODE12},
- {GB_TILE_MODE13},
- {GB_TILE_MODE14},
- {GB_TILE_MODE15},
- {GB_TILE_MODE16},
- {GB_TILE_MODE17},
- {GB_TILE_MODE18},
- {GB_TILE_MODE19},
- {GB_TILE_MODE20},
- {GB_TILE_MODE21},
- {GB_TILE_MODE22},
- {GB_TILE_MODE23},
- {GB_TILE_MODE24},
- {GB_TILE_MODE25},
- {GB_TILE_MODE26},
- {GB_TILE_MODE27},
- {GB_TILE_MODE28},
- {GB_TILE_MODE29},
- {GB_TILE_MODE30},
- {GB_TILE_MODE31},
- {CC_RB_BACKEND_DISABLE, true},
- {GC_USER_RB_BACKEND_DISABLE, true},
- {PA_SC_RASTER_CONFIG, true},
+ {mmGB_ADDR_CONFIG},
+ {mmMC_ARB_RAMCFG},
+ {mmGB_TILE_MODE0},
+ {mmGB_TILE_MODE1},
+ {mmGB_TILE_MODE2},
+ {mmGB_TILE_MODE3},
+ {mmGB_TILE_MODE4},
+ {mmGB_TILE_MODE5},
+ {mmGB_TILE_MODE6},
+ {mmGB_TILE_MODE7},
+ {mmGB_TILE_MODE8},
+ {mmGB_TILE_MODE9},
+ {mmGB_TILE_MODE10},
+ {mmGB_TILE_MODE11},
+ {mmGB_TILE_MODE12},
+ {mmGB_TILE_MODE13},
+ {mmGB_TILE_MODE14},
+ {mmGB_TILE_MODE15},
+ {mmGB_TILE_MODE16},
+ {mmGB_TILE_MODE17},
+ {mmGB_TILE_MODE18},
+ {mmGB_TILE_MODE19},
+ {mmGB_TILE_MODE20},
+ {mmGB_TILE_MODE21},
+ {mmGB_TILE_MODE22},
+ {mmGB_TILE_MODE23},
+ {mmGB_TILE_MODE24},
+ {mmGB_TILE_MODE25},
+ {mmGB_TILE_MODE26},
+ {mmGB_TILE_MODE27},
+ {mmGB_TILE_MODE28},
+ {mmGB_TILE_MODE29},
+ {mmGB_TILE_MODE30},
+ {mmGB_TILE_MODE31},
+ {mmCC_RB_BACKEND_DISABLE, true},
+ {mmGC_USER_RB_BACKEND_DISABLE, true},
+ {mmPA_SC_RASTER_CONFIG, true},
};
static uint32_t si_get_register_value(struct amdgpu_device *adev,
@@ -1264,37 +1271,37 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev)
u32 rom_cntl;
bool r;
- bus_cntl = RREG32(R600_BUS_CNTL);
+ bus_cntl = RREG32(mmBUS_CNTL);
if (adev->mode_info.num_crtc) {
- d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
- d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
- vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ d1vga_control = RREG32(mmD1VGA_CONTROL);
+ d2vga_control = RREG32(mmD2VGA_CONTROL);
+ vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
}
rom_cntl = RREG32(R600_ROM_CNTL);
/* enable the rom */
- WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
if (adev->mode_info.num_crtc) {
/* Disable VGA mode */
- WREG32(AVIVO_D1VGA_CONTROL,
- (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_D2VGA_CONTROL,
- (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(VGA_RENDER_CONTROL,
- (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+ WREG32(mmD1VGA_CONTROL,
+ (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmD2VGA_CONTROL,
+ (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmVGA_RENDER_CONTROL,
+ (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
}
WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
r = amdgpu_read_bios(adev);
/* restore regs */
- WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(mmBUS_CNTL, bus_cntl);
if (adev->mode_info.num_crtc) {
- WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
- WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
- WREG32(VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(mmD1VGA_CONTROL, d1vga_control);
+ WREG32(mmD2VGA_CONTROL, d2vga_control);
+ WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
}
WREG32(R600_ROM_CNTL, rom_cntl);
return r;
@@ -1331,23 +1338,24 @@ static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
{
u32 tmp, i;
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_BYPASS_EN;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp |= SPLL_CTLREQ_CHG;
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp |= CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ if (RREG32(mmCG_SPLL_STATUS) & CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK)
break;
udelay(1);
}
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK |
+ CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK);
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
tmp = RREG32(MPLL_CNTL_MODE);
tmp &= ~MPLL_MCLK_SEL;
@@ -1358,21 +1366,21 @@ static void si_spll_powerdown(struct amdgpu_device *adev)
{
u32 tmp;
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp |= SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp |= SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_RESET;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_SLEEP;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp &= ~SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp &= ~SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
}
static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
@@ -1454,14 +1462,14 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
{
uint32_t temp;
- temp = RREG32(CONFIG_CNTL);
+ temp = RREG32(mmCONFIG_CNTL);
if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
temp &= ~(1<<1);
}
- WREG32(CONFIG_CNTL, temp);
+ WREG32(mmCONFIG_CNTL, temp);
}
static u32 si_get_xclk(struct amdgpu_device *adev)
@@ -1469,12 +1477,12 @@ static u32 si_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- tmp = RREG32(CG_CLKPIN_CNTL_2);
- if (tmp & MUX_TCLK_TO_XCLK)
+ tmp = RREG32(mmCG_CLKPIN_CNTL_2);
+ if (tmp & CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK)
return TCLK;
- tmp = RREG32(CG_CLKPIN_CNTL);
- if (tmp & XTALIN_DIVIDE)
+ tmp = RREG32(mmCG_CLKPIN_CNTL);
+ if (tmp & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
return reference_clock / 4;
return reference_clock;
@@ -1519,9 +1527,9 @@ static int si_get_pcie_lanes(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
- switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
+ switch ((link_width_cntl & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT) {
case LC_LINK_WIDTH_X1:
return 1;
case LC_LINK_WIDTH_X2:
@@ -1568,13 +1576,13 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
return;
}
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- link_width_cntl &= ~LC_LINK_WIDTH_MASK;
- link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
- link_width_cntl |= (LC_RECONFIG_NOW |
- LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT;
+ link_width_cntl |= (PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK |
+ PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK);
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
@@ -1888,7 +1896,7 @@ static int si_vce_send_vcepll_ctlreq(struct amdgpu_device *adev)
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) {
- DRM_ERROR("Timeout setting UVD clocks!\n");
+ DRM_ERROR("Timeout setting VCE clocks!\n");
return -ETIMEDOUT;
}
@@ -1910,6 +1918,14 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
~VCEPLL_BYPASS_EN_MASK);
if (!evclk || !ecclk) {
+ /*
+ * On some chips, the PLL takes way too long to get out of
+ * sleep mode, causing a timeout waiting on CTLACK/CTLACK2.
+ * Leave the PLL running in bypass mode.
+ */
+ if (adev->pdev->device == 0x6780)
+ return 0;
+
/* Keep the Bypass mode, put PLL to sleep */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
~VCEPLL_SLEEP_MASK);
@@ -2018,7 +2034,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
{
- return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+ return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
@@ -2239,9 +2255,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
- LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
@@ -2268,17 +2284,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
- tmp = RREG32_PCIE(PCIE_LC_STATUS1);
- max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
- current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+ tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
+ current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
if (current_lw < max_lw) {
- tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- if (tmp & LC_RENEGOTIATION_SUPPORT) {
- tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
- tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
- tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
+ tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
+ tmp |= (max_lw << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
+ tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
}
}
@@ -2301,13 +2317,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
PCI_EXP_LNKCTL2,
&gpu_cfg2);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_REDO_EQ;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
mdelay(100);
@@ -2333,16 +2349,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN));
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp &= ~LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
}
}
}
- speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
- speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK | PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
+ speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
tmp16 = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -2354,13 +2370,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
PCI_EXP_LNKCTL2_TLS, tmp16);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
for (i = 0; i < adev->usec_timeout; i++) {
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
break;
udelay(1);
}
@@ -2418,121 +2434,121 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (!amdgpu_device_should_use_aspm(adev))
return;
- orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- data &= ~LC_XMIT_N_FTS_MASK;
- data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
- data |= LC_GO_TO_RECOVERY;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL3, data);
- orig = data = RREG32_PCIE(PCIE_P_CNTL);
- data |= P_IGNORE_EDB_ERR;
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_P_CNTL, data);
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
- data |= LC_PMI_TO_L1_DIS;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (!disable_l0s)
- data |= LC_L0S_INACTIVITY(7);
+ data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
if (!disable_l1) {
- data |= LC_L1_INACTIVITY(7);
- data &= ~LC_PMI_TO_L1_DIS;
+ data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
if (!disable_plloff_in_l1) {
bool clk_req_support;
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_2);
+ data &= ~PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_3);
+ data &= ~PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_3, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_2);
+ data &= ~PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_3);
+ data &= ~PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_3, data);
}
- orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- data &= ~LC_DYN_LANES_PWR_STATE_MASK;
- data |= LC_DYN_LANES_PWR_STATE(3);
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ data |= (3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_CNTL);
+ data &= ~PB0_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB0_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_CNTL, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_CNTL);
+ data &= ~PB1_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB1_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_CNTL, data);
if (!disable_clkreq &&
!pci_is_root_bus(adev->pdev->bus)) {
@@ -2548,64 +2564,64 @@ static void si_program_aspm(struct amdgpu_device *adev)
}
if (clk_req_support) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
- data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL2, data);
- orig = data = RREG32(THM_CLK_CNTL);
- data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
- data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ orig = data = RREG32(mmTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
if (orig != data)
- WREG32(THM_CLK_CNTL, data);
+ WREG32(mmTHM_CLK_CNTL, data);
- orig = data = RREG32(MISC_CLK_CNTL);
- data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
- data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ orig = data = RREG32(mmMISC_CLK_CNTL);
+ data &= ~(MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK | MISC_CLK_CNTL__ZCLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT) | (1 << MISC_CLK_CNTL__ZCLK_SEL__SHIFT);
if (orig != data)
- WREG32(MISC_CLK_CNTL, data);
+ WREG32(mmMISC_CLK_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL);
- data &= ~BCLK_AS_XCLK;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL);
+ data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL, data);
+ WREG32(mmCG_CLKPIN_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL_2);
- data &= ~FORCE_BIF_REFCLK_EN;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL_2);
+ data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL_2, data);
+ WREG32(mmCG_CLKPIN_CNTL_2, data);
- orig = data = RREG32(MPLL_BYPASSCLK_SEL);
- data &= ~MPLL_CLKOUT_SEL_MASK;
- data |= MPLL_CLKOUT_SEL(4);
+ orig = data = RREG32(mmMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= 4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT;
if (orig != data)
- WREG32(MPLL_BYPASSCLK_SEL, data);
+ WREG32(mmMPLL_BYPASSCLK_SEL, data);
- orig = data = RREG32(SPLL_CNTL_MODE);
- data &= ~SPLL_REFCLK_SEL_MASK;
+ orig = data = RREG32(mmSPLL_CNTL_MODE);
+ data &= ~SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK;
if (orig != data)
- WREG32(SPLL_CNTL_MODE, data);
+ WREG32(mmSPLL_CNTL_MODE, data);
}
}
} else {
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
- orig = data = RREG32_PCIE(PCIE_CNTL2);
- data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+ data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | PCIE_CNTL2__MST_MEM_LS_EN_MASK | PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_CNTL2, data);
+ WREG32_PCIE(ixPCIE_CNTL2, data);
if (!disable_l0s) {
- data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
- data = RREG32_PCIE(PCIE_LC_STATUS1);
- if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~LC_L0S_INACTIVITY_MASK;
+ data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ if((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
+ data = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) && (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
}
}
@@ -2644,7 +2660,7 @@ static int si_common_resume(struct amdgpu_ip_block *ip_block)
return si_common_hw_init(ip_block);
}
-static bool si_common_is_idle(void *handle)
+static bool si_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -2702,7 +2718,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
else
amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
- /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block);
break;
case CHIP_OLAND:
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
@@ -2720,7 +2736,6 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
else
amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
- /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index dbd78d5345a4..7f18e4875287 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -27,6 +27,8 @@
#include "si.h"
#include "sid.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
DMA0_REGISTER_OFFSET,
@@ -38,17 +40,31 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+/**
+ * si_dma_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr;
}
+/**
+ * si_dma_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
}
static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
@@ -56,7 +72,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
@@ -117,9 +133,9 @@ static void si_dma_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
/* dma0 */
- rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ rb_cntl = RREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i]);
+ rb_cntl &= ~DMA_GFX_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
}
}
@@ -133,44 +149,44 @@ static int si_dma_start(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
- WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], 0);
rptr_addr = ring->rptr_gpu_addr;
- WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
- WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
- WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+ WREG32(mmDMA_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
/* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+ ib_cntl = DMA_GFX_IB_CNTL__IB_ENABLE_MASK | DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK;
#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
+ ib_cntl |= DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+ WREG32(mmDMA_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+ dma_cntl = RREG32(mmDMA_CNTL + sdma_offsets[i]);
+ dma_cntl &= ~DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + sdma_offsets[i], dma_cntl);
ring->wptr = 0;
- WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_GFX_RB_CNTL__RB_ENABLE_MASK);
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -461,7 +477,7 @@ static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->sdma.num_instances = 2;
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
si_dma_set_ring_funcs(adev);
si_dma_set_buffer_funcs(adev);
@@ -541,13 +557,13 @@ static int si_dma_resume(struct amdgpu_ip_block *ip_block)
return si_dma_hw_init(ip_block);
}
-static bool si_dma_is_idle(void *handle)
+static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS2);
+ u32 tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+ if (tmp & (SRBM_STATUS2__DMA_BUSY_MASK | SRBM_STATUS2__DMA1_BUSY_MASK))
return false;
return true;
@@ -559,7 +575,7 @@ static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_dma_is_idle(adev))
+ if (si_dma_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -583,14 +599,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -599,14 +615,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -645,11 +661,11 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data &= ~MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data &= ~DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
- WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, 0x00000100);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -657,15 +673,15 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data |= MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data |= DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
- orig = data = RREG32(DMA_CLK_CTRL + offset);
+ orig = data = RREG32(mmDMA_CLK_CTRL + offset);
data = 0xff000000;
if (data != orig)
- WREG32(DMA_CLK_CTRL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, data);
}
}
@@ -679,11 +695,11 @@ static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
- WREG32(DMA_PGFSM_WRITE, 0x00002000);
- WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+ WREG32(mmDMA_PGFSM_WRITE, 0x00002000);
+ WREG32(mmDMA_PGFSM_CONFIG, 0x100010ff);
for (tmp = 0; tmp < 5; tmp++)
- WREG32(DMA_PGFSM_WRITE, 0);
+ WREG32(mmDMA_PGFSM_WRITE, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index 4e935baa7b91..6da65778292b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -23,123 +23,15 @@
#ifndef SI_ENUMS_H
#define SI_ENUMS_H
-#define VBLANK_INT_MASK (1 << 0)
-#define DC_HPDx_INT_EN (1 << 16)
-#define VBLANK_ACK (1 << 4)
-#define VLINE_ACK (1 << 4)
-
-#define CURSOR_WIDTH 64
-#define CURSOR_HEIGHT 64
-
-#define VGA_VSTATUS_CNTL 0xFFFCFFFF
#define PRIORITY_MARK_MASK 0x7fff
#define PRIORITY_OFF (1 << 16)
#define PRIORITY_ALWAYS_ON (1 << 20)
-#define INTERLEAVE_EN (1 << 0)
-
-#define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-
-#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-#define GRPH_ENDIAN_NONE 0
-#define GRPH_ENDIAN_8IN16 1
-#define GRPH_ENDIAN_8IN32 2
-#define GRPH_ENDIAN_8IN64 3
-#define GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-#define GRPH_RED_SEL_R 0
-#define GRPH_RED_SEL_G 1
-#define GRPH_RED_SEL_B 2
-#define GRPH_RED_SEL_A 3
-#define GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-#define GRPH_GREEN_SEL_G 0
-#define GRPH_GREEN_SEL_B 1
-#define GRPH_GREEN_SEL_A 2
-#define GRPH_GREEN_SEL_R 3
-#define GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-#define GRPH_BLUE_SEL_B 0
-#define GRPH_BLUE_SEL_A 1
-#define GRPH_BLUE_SEL_R 2
-#define GRPH_BLUE_SEL_G 3
-#define GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-#define GRPH_ALPHA_SEL_A 0
-#define GRPH_ALPHA_SEL_R 1
-#define GRPH_ALPHA_SEL_G 2
-#define GRPH_ALPHA_SEL_B 3
-
-#define GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define GRPH_DEPTH_8BPP 0
-#define GRPH_DEPTH_16BPP 1
-#define GRPH_DEPTH_32BPP 2
-
-#define GRPH_FORMAT(x) (((x) & 0x7) << 8)
-#define GRPH_FORMAT_INDEXED 0
-#define GRPH_FORMAT_ARGB1555 0
-#define GRPH_FORMAT_ARGB565 1
-#define GRPH_FORMAT_ARGB4444 2
-#define GRPH_FORMAT_AI88 3
-#define GRPH_FORMAT_MONO16 4
-#define GRPH_FORMAT_BGRA5551 5
-#define GRPH_FORMAT_ARGB8888 0
-#define GRPH_FORMAT_ARGB2101010 1
-#define GRPH_FORMAT_32BPP_DIG 2
-#define GRPH_FORMAT_8B_ARGB2101010 3
-#define GRPH_FORMAT_BGRA1010102 4
-#define GRPH_FORMAT_8B_BGRA1010102 5
-#define GRPH_FORMAT_RGB111110 6
-#define GRPH_FORMAT_BGR101111 7
-
-#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_ARRAY_LINEAR_GENERAL 0
-#define GRPH_ARRAY_LINEAR_ALIGNED 1
-#define GRPH_ARRAY_1D_TILED_THIN1 2
-#define GRPH_ARRAY_2D_TILED_THIN1 4
-#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
-
-#define CURSOR_EN (1 << 0)
-#define CURSOR_MODE(x) (((x) & 0x3) << 8)
-#define CURSOR_MONO 0
-#define CURSOR_24_1 1
-#define CURSOR_24_8_PRE_MULT 2
-#define CURSOR_24_8_UNPRE_MULT 3
-#define CURSOR_2X_MAGNIFY (1 << 16)
-#define CURSOR_FORCE_MC_ON (1 << 20)
-#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-#define CURSOR_URGENT_ALWAYS 0
-#define CURSOR_URGENT_1_8 1
-#define CURSOR_URGENT_1_4 2
-#define CURSOR_URGENT_3_8 3
-#define CURSOR_URGENT_1_2 4
-#define CURSOR_UPDATE_PENDING (1 << 0)
-#define CURSOR_UPDATE_TAKEN (1 << 1)
-#define CURSOR_UPDATE_LOCK (1 << 16)
-#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-#define SI_CRTC0_REGISTER_OFFSET 0
-#define SI_CRTC1_REGISTER_OFFSET 0x300
-#define SI_CRTC2_REGISTER_OFFSET 0x2600
-#define SI_CRTC3_REGISTER_OFFSET 0x2900
-#define SI_CRTC4_REGISTER_OFFSET 0x2c00
-#define SI_CRTC5_REGISTER_OFFSET 0x2f00
-
-#define DMA0_REGISTER_OFFSET 0x000
-#define DMA1_REGISTER_OFFSET 0x200
-#define ES_AND_GS_AUTO 3
-#define RADEON_PACKET_TYPE3 3
-#define CE_PARTITION_BASE 3
-#define BUF_SWAP_32BIT (2 << 16)
#define GFX_POWER_STATUS (1 << 1)
#define GFX_CLOCK_STATUS (1 << 2)
#define GFX_LS_STATUS (1 << 3)
-#define RLC_BUSY_STATUS (1 << 0)
+#define RLC_BUSY_STATUS (1 << 0)
#define RLC_PUD(x) ((x) << 0)
#define RLC_PUD_MASK (0xff << 0)
#define RLC_PDD(x) ((x) << 8)
@@ -148,144 +40,8 @@
#define RLC_TTPD_MASK (0xff << 16)
#define RLC_MSD(x) ((x) << 24)
#define RLC_MSD_MASK (0xff << 24)
-#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
-#define WRITE_DATA_DST_SEL(x) ((x) << 8)
-#define EVENT_TYPE(x) ((x) << 0)
-#define EVENT_INDEX(x) ((x) << 8)
-#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
-#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
-#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
-#define GFX6_NUM_GFX_RINGS 1
-#define GFX6_NUM_COMPUTE_RINGS 2
#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
-#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
-#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002
-#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
-
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
- (((op) & 0xFF) << 8) | \
- ((n) & 0x3FFF) << 16)
-#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
-#define PACKET3_NOP 0x10
-#define PACKET3_SET_BASE 0x11
-#define PACKET3_BASE_INDEX(x) ((x) << 0)
-#define PACKET3_CLEAR_STATE 0x12
-#define PACKET3_INDEX_BUFFER_SIZE 0x13
-#define PACKET3_DISPATCH_DIRECT 0x15
-#define PACKET3_DISPATCH_INDIRECT 0x16
-#define PACKET3_ALLOC_GDS 0x1B
-#define PACKET3_WRITE_GDS_RAM 0x1C
-#define PACKET3_ATOMIC_GDS 0x1D
-#define PACKET3_ATOMIC 0x1E
-#define PACKET3_OCCLUSION_QUERY 0x1F
-#define PACKET3_SET_PREDICATION 0x20
-#define PACKET3_REG_RMW 0x21
-#define PACKET3_COND_EXEC 0x22
-#define PACKET3_PRED_EXEC 0x23
-#define PACKET3_DRAW_INDIRECT 0x24
-#define PACKET3_DRAW_INDEX_INDIRECT 0x25
-#define PACKET3_INDEX_BASE 0x26
-#define PACKET3_DRAW_INDEX_2 0x27
-#define PACKET3_CONTEXT_CONTROL 0x28
-#define PACKET3_INDEX_TYPE 0x2A
-#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
-#define PACKET3_DRAW_INDEX_AUTO 0x2D
-#define PACKET3_DRAW_INDEX_IMMD 0x2E
-#define PACKET3_NUM_INSTANCES 0x2F
-#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
-#define PACKET3_INDIRECT_BUFFER_CONST 0x31
-#define PACKET3_INDIRECT_BUFFER 0x3F
-#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
-#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
-#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
-#define PACKET3_WRITE_DATA 0x37
-#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
-#define PACKET3_MEM_SEMAPHORE 0x39
-#define PACKET3_MPEG_INDEX 0x3A
-#define PACKET3_COPY_DW 0x3B
-#define PACKET3_WAIT_REG_MEM 0x3C
-#define PACKET3_MEM_WRITE 0x3D
-#define PACKET3_COPY_DATA 0x40
-#define PACKET3_CP_DMA 0x41
-# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
-# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
-# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
-# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
-# define PACKET3_CP_DMA_DIS_WC (1 << 21)
-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
-# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
-# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
-# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
-# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
-# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
-# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_PFP_SYNC_ME 0x42
-#define PACKET3_SURFACE_SYNC 0x43
-# define PACKET3_DEST_BASE_0_ENA (1 << 0)
-# define PACKET3_DEST_BASE_1_ENA (1 << 1)
-# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
-# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
-# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
-# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
-# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
-# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
-# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
-# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
-# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
-# define PACKET3_DEST_BASE_2_ENA (1 << 19)
-# define PACKET3_DEST_BASE_3_ENA (1 << 21)
-# define PACKET3_TCL1_ACTION_ENA (1 << 22)
-# define PACKET3_TC_ACTION_ENA (1 << 23)
-# define PACKET3_CB_ACTION_ENA (1 << 25)
-# define PACKET3_DB_ACTION_ENA (1 << 26)
-# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
-# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
-#define PACKET3_ME_INITIALIZE 0x44
-#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
-#define PACKET3_COND_WRITE 0x45
-#define PACKET3_EVENT_WRITE 0x46
-#define PACKET3_EVENT_WRITE_EOP 0x47
-#define PACKET3_EVENT_WRITE_EOS 0x48
-#define PACKET3_PREAMBLE_CNTL 0x4A
-# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
-# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
-#define PACKET3_ONE_REG_WRITE 0x57
-#define PACKET3_LOAD_CONFIG_REG 0x5F
-#define PACKET3_LOAD_CONTEXT_REG 0x60
-#define PACKET3_LOAD_SH_REG 0x61
-#define PACKET3_SET_CONFIG_REG 0x68
-#define PACKET3_SET_CONFIG_REG_START 0x00002000
-#define PACKET3_SET_CONFIG_REG_END 0x00002c00
-#define PACKET3_SET_CONTEXT_REG 0x69
-#define PACKET3_SET_CONTEXT_REG_START 0x000a000
-#define PACKET3_SET_CONTEXT_REG_END 0x000a400
-#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
-#define PACKET3_SET_RESOURCE_INDIRECT 0x74
-#define PACKET3_SET_SH_REG 0x76
-#define PACKET3_SET_SH_REG_START 0x00002c00
-#define PACKET3_SET_SH_REG_END 0x00003000
-#define PACKET3_SET_SH_REG_OFFSET 0x77
-#define PACKET3_ME_WRITE 0x7A
-#define PACKET3_SCRATCH_RAM_WRITE 0x7D
-#define PACKET3_SCRATCH_RAM_READ 0x7E
-#define PACKET3_CE_WRITE 0x7F
-#define PACKET3_LOAD_CONST_RAM 0x80
-#define PACKET3_WRITE_CONST_RAM 0x81
-#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
-#define PACKET3_DUMP_CONST_RAM 0x83
-#define PACKET3_INCREMENT_CE_COUNTER 0x84
-#define PACKET3_INCREMENT_DE_COUNTER 0x85
-#define PACKET3_WAIT_ON_CE_COUNTER 0x86
-#define PACKET3_WAIT_ON_DE_COUNTER 0x87
-#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
-#define PACKET3_SET_CE_DE_COUNTERS 0x89
-#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
-#define PACKET3_SWITCH_BUFFER 0x8B
-#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
-#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
-#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index a32b6243c1f8..66f650f87243 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -27,6 +27,7 @@
#include "amdgpu_ih.h"
#include "sid.h"
#include "si_ih.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
@@ -95,6 +96,9 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
si_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -111,6 +115,9 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
@@ -126,6 +133,8 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
}
+
+out:
return (wptr & ih->ptr_mask);
}
@@ -174,6 +183,10 @@ static int si_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
return amdgpu_irq_init(adev);
}
@@ -210,10 +223,10 @@ static int si_ih_resume(struct amdgpu_ip_block *ip_block)
return si_ih_hw_init(ip_block);
}
-static bool si_ih_is_idle(void *handle)
+static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(SRBM_STATUS);
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
return false;
@@ -227,7 +240,7 @@ static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_ih_is_idle(adev))
+ if (si_ih_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -239,23 +252,23 @@ static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ dev_info(adev->dev, "mmSRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index 9a39cbfe6db9..561462a8332e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -24,47 +24,12 @@
#ifndef SI_H
#define SI_H
-#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
-
-#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
-#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
-#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
-
-#define SI_MAX_SH_GPRS 256
-#define SI_MAX_TEMP_GPRS 16
-#define SI_MAX_SH_THREADS 256
-#define SI_MAX_SH_STACK_ENTRIES 4096
-#define SI_MAX_FRC_EOV_CNT 16384
-#define SI_MAX_BACKENDS 8
-#define SI_MAX_BACKENDS_MASK 0xFF
-#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
-#define SI_MAX_SIMDS 12
-#define SI_MAX_SIMDS_MASK 0x0FFF
-#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
-#define SI_MAX_PIPES 8
-#define SI_MAX_PIPES_MASK 0xFF
-#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
-#define SI_MAX_LDS_NUM 0xFFFF
-#define SI_MAX_TCC 16
-#define SI_MAX_TCC_MASK 0xFFFF
#define SI_MAX_CTLACKS_ASSERTION_WAIT 100
-/* SMC IND accessor regs */
-#define SMC_IND_INDEX_0 0x80
-#define SMC_IND_DATA_0 0x81
-
-#define SMC_IND_ACCESS_CNTL 0x8A
-# define AUTO_INCREMENT_IND_0 (1 << 0)
-#define SMC_MESSAGE_0 0x8B
-#define SMC_RESP_0 0x8C
-
/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
#define SMC_CG_IND_START 0xc0030000
#define SMC_CG_IND_END 0xc0040000
-#define CG_CGTT_LOCAL_0 0x400
-#define CG_CGTT_LOCAL_1 0x401
-
/* SMC IND registers */
#define SMC_SYSCON_RESET_CNTL 0x80000000
# define RST_REG (1 << 0)
@@ -72,9 +37,6 @@
# define CK_DISABLE (1 << 0)
# define CKEN (1 << 24)
-#define VGA_HDP_CONTROL 0xCA
-#define VGA_MEMORY_DISABLE (1 << 4)
-
#define DCCG_DISP_SLOW_SELECT_REG 0x13F
#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
@@ -83,47 +45,6 @@
#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
-#define CG_SPLL_FUNC_CNTL 0x180
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_PDIV_A(x) ((x) << 20)
-#define SPLL_PDIV_A_MASK (0x7f << 20)
-#define SPLL_PDIV_A_SHIFT 20
-#define CG_SPLL_FUNC_CNTL_2 0x181
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define SPLL_CTLREQ_CHG (1 << 23)
-#define SCLK_MUX_UPDATE (1 << 26)
-#define CG_SPLL_FUNC_CNTL_3 0x182
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_FB_DIV_SHIFT 0
-#define SPLL_DITHEN (1 << 28)
-#define CG_SPLL_FUNC_CNTL_4 0x183
-
-#define SPLL_STATUS 0x185
-#define SPLL_CHG_STATUS (1 << 1)
-#define SPLL_CNTL_MODE 0x186
-#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 26)
-# define SPLL_REFCLK_SEL_MASK (3 << 26)
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x188
-#define SSEN (1 << 0)
-#define CLK_S(x) ((x) << 4)
-#define CLK_S_MASK (0xfff << 4)
-#define CLK_S_SHIFT 4
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x189
-#define CLK_V(x) ((x) << 0)
-#define CLK_V_MASK (0x3ffffff << 0)
-#define CLK_V_SHIFT 0
-
-#define CG_SPLL_AUTOSCALE_CNTL 0x18b
-# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
-
/* discrete uvd clocks */
#define CG_UPLL_FUNC_CNTL 0x18d
# define UPLL_RESET_MASK 0x00000001
@@ -153,317 +74,13 @@
#define CG_UPLL_SPREAD_SPECTRUM 0x194
# define SSEN_MASK 0x00000001
-#define MPLL_BYPASSCLK_SEL 0x197
-# define MPLL_CLKOUT_SEL(x) ((x) << 8)
-# define MPLL_CLKOUT_SEL_MASK 0xFF00
-
-#define CG_CLKPIN_CNTL 0x198
-# define XTALIN_DIVIDE (1 << 1)
-# define BCLK_AS_XCLK (1 << 2)
-#define CG_CLKPIN_CNTL_2 0x199
-# define FORCE_BIF_REFCLK_EN (1 << 3)
-# define MUX_TCLK_TO_XCLK (1 << 8)
-
-#define THM_CLK_CNTL 0x19b
-# define CMON_CLK_SEL(x) ((x) << 0)
-# define CMON_CLK_SEL_MASK 0xFF
-# define TMON_CLK_SEL(x) ((x) << 8)
-# define TMON_CLK_SEL_MASK 0xFF00
-#define MISC_CLK_CNTL 0x19c
-# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
-# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
-# define ZCLK_SEL(x) ((x) << 8)
-# define ZCLK_SEL_MASK 0xFF00
-
-#define CG_THERMAL_CTRL 0x1c0
-#define DPM_EVENT_SRC(x) ((x) << 0)
-#define DPM_EVENT_SRC_MASK (7 << 0)
-#define DIG_THERM_DPM(x) ((x) << 14)
-#define DIG_THERM_DPM_MASK 0x003FC000
-#define DIG_THERM_DPM_SHIFT 14
-#define CG_THERMAL_STATUS 0x1c1
-#define FDO_PWM_DUTY(x) ((x) << 9)
-#define FDO_PWM_DUTY_MASK (0xff << 9)
-#define FDO_PWM_DUTY_SHIFT 9
-#define CG_THERMAL_INT 0x1c2
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
-#define CG_MULT_THERMAL_CTRL 0x1c4
-#define TEMP_SEL(x) ((x) << 20)
-#define TEMP_SEL_MASK (0xff << 20)
-#define TEMP_SEL_SHIFT 20
-#define CG_MULT_THERMAL_STATUS 0x1c5
-#define ASIC_MAX_TEMP(x) ((x) << 0)
-#define ASIC_MAX_TEMP_MASK 0x000001ff
-#define ASIC_MAX_TEMP_SHIFT 0
-#define CTF_TEMP(x) ((x) << 9)
-#define CTF_TEMP_MASK 0x0003fe00
-#define CTF_TEMP_SHIFT 9
-
-#define CG_FDO_CTRL0 0x1d5
-#define FDO_STATIC_DUTY(x) ((x) << 0)
-#define FDO_STATIC_DUTY_MASK 0x000000FF
-#define FDO_STATIC_DUTY_SHIFT 0
-#define CG_FDO_CTRL1 0x1d6
-#define FMAX_DUTY100(x) ((x) << 0)
-#define FMAX_DUTY100_MASK 0x000000FF
-#define FMAX_DUTY100_SHIFT 0
-#define CG_FDO_CTRL2 0x1d7
-#define TMIN(x) ((x) << 0)
-#define TMIN_MASK 0x000000FF
-#define TMIN_SHIFT 0
-#define FDO_PWM_MODE(x) ((x) << 11)
-#define FDO_PWM_MODE_MASK (7 << 11)
-#define FDO_PWM_MODE_SHIFT 11
-#define TACH_PWM_RESP_RATE(x) ((x) << 25)
-#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
-#define TACH_PWM_RESP_RATE_SHIFT 25
-
-#define CG_TACH_CTRL 0x1dc
-# define EDGE_PER_REV(x) ((x) << 0)
-# define EDGE_PER_REV_MASK (0x7 << 0)
-# define EDGE_PER_REV_SHIFT 0
-# define TARGET_PERIOD(x) ((x) << 3)
-# define TARGET_PERIOD_MASK 0xfffffff8
-# define TARGET_PERIOD_SHIFT 3
-#define CG_TACH_STATUS 0x1dd
-# define TACH_PERIOD(x) ((x) << 0)
-# define TACH_PERIOD_MASK 0xffffffff
-# define TACH_PERIOD_SHIFT 0
-
-#define GENERAL_PWRMGT 0x1e0
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (1 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define VOLT_PWRMGT_EN (1 << 10)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-#define CG_TPC 0x1e1
-#define SCLK_PWRMGT_CNTL 0x1e2
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-# define DYN_LIGHT_SLEEP_EN (1 << 14)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x1e6
-# define CURRENT_STATE_INDEX_MASK (0xf << 4)
-# define CURRENT_STATE_INDEX_SHIFT 4
-
-#define CG_FTV 0x1ef
-
-#define CG_FFCT_0 0x1f0
-# define UTC_0(x) ((x) << 0)
-# define UTC_0_MASK (0x3ff << 0)
-# define DTC_0(x) ((x) << 10)
-# define DTC_0_MASK (0x3ff << 10)
-
-#define CG_BSP 0x1ff
-# define BSP(x) ((x) << 0)
-# define BSP_MASK (0xffff << 0)
-# define BSU(x) ((x) << 16)
-# define BSU_MASK (0xf << 16)
-#define CG_AT 0x200
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-
-#define CG_GIT 0x201
-# define CG_GICST(x) ((x) << 0)
-# define CG_GICST_MASK (0xffff << 0)
-# define CG_GIPOT(x) ((x) << 16)
-# define CG_GIPOT_MASK (0xffff << 16)
-
-#define CG_SSP 0x203
-# define SST(x) ((x) << 0)
-# define SST_MASK (0xffff << 0)
-# define SSTU(x) ((x) << 16)
-# define SSTU_MASK (0xf << 16)
-
-#define CG_DISPLAY_GAP_CNTL 0x20a
-# define DISP1_GAP(x) ((x) << 0)
-# define DISP1_GAP_MASK (3 << 0)
-# define DISP2_GAP(x) ((x) << 2)
-# define DISP2_GAP_MASK (3 << 2)
-# define VBI_TIMER_COUNT(x) ((x) << 4)
-# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
-# define VBI_TIMER_UNIT(x) ((x) << 20)
-# define VBI_TIMER_UNIT_MASK (7 << 20)
-# define DISP1_GAP_MCHG(x) ((x) << 24)
-# define DISP1_GAP_MCHG_MASK (3 << 24)
-# define DISP2_GAP_MCHG(x) ((x) << 26)
-# define DISP2_GAP_MCHG_MASK (3 << 26)
-
-#define CG_ULV_CONTROL 0x21e
-#define CG_ULV_PARAMETER 0x21f
-
-#define SMC_SCRATCH0 0x221
-
-#define CG_CAC_CTRL 0x22e
-# define CAC_WINDOW(x) ((x) << 0)
-# define CAC_WINDOW_MASK 0x00ffffff
-
-#define DMIF_ADDR_CONFIG 0x2F5
-
-#define DMIF_ADDR_CALC 0x300
-
-#define PIPE0_DMIF_BUFFER_CONTROL 0x0328
-# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
-# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
-
-#define SRBM_STATUS 0x394
-#define GRBM_RQ_PENDING (1 << 5)
-#define VMC_BUSY (1 << 8)
-#define MCB_BUSY (1 << 9)
-#define MCB_NON_DISPLAY_BUSY (1 << 10)
-#define MCC_BUSY (1 << 11)
-#define MCD_BUSY (1 << 12)
-#define SEM_BUSY (1 << 14)
-#define IH_BUSY (1 << 17)
-
-#define SRBM_SOFT_RESET 0x398
-#define SOFT_RESET_BIF (1 << 1)
-#define SOFT_RESET_DC (1 << 5)
-#define SOFT_RESET_DMA1 (1 << 6)
-#define SOFT_RESET_GRBM (1 << 8)
-#define SOFT_RESET_HDP (1 << 9)
-#define SOFT_RESET_IH (1 << 10)
-#define SOFT_RESET_MC (1 << 11)
-#define SOFT_RESET_ROM (1 << 14)
-#define SOFT_RESET_SEM (1 << 15)
-#define SOFT_RESET_VMC (1 << 17)
-#define SOFT_RESET_DMA (1 << 20)
-#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
-#define SOFT_RESET_ORB (1 << 23)
-
-#define CC_SYS_RB_BACKEND_DISABLE 0x3A0
-#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3A1
-
-#define SRBM_READ_ERROR 0x3A6
-#define SRBM_INT_CNTL 0x3A8
-#define SRBM_INT_ACK 0x3AA
-
-#define SRBM_STATUS2 0x3B1
-#define DMA_BUSY (1 << 5)
-#define DMA1_BUSY (1 << 6)
-
-#define VM_L2_CNTL 0x500
-#define ENABLE_L2_CACHE (1 << 0)
-#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
-#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
-#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
-#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
-#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
-#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
-#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
-#define VM_L2_CNTL2 0x501
-#define INVALIDATE_ALL_L1_TLBS (1 << 0)
-#define INVALIDATE_L2_CACHE (1 << 1)
-#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
-#define INVALIDATE_PTE_AND_PDE_CACHES 0
-#define INVALIDATE_ONLY_PTE_CACHES 1
-#define INVALIDATE_ONLY_PDE_CACHES 2
-#define VM_L2_CNTL3 0x502
-#define BANK_SELECT(x) ((x) << 0)
-#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
-#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
-#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
-#define VM_L2_STATUS 0x503
-#define L2_BUSY (1 << 0)
-#define VM_CONTEXT0_CNTL 0x504
-#define ENABLE_CONTEXT (1 << 0)
-#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
-#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
-#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
-#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
-#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
-#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
-#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
-#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
-#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
-#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
-#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
-#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
-#define VM_CONTEXT1_CNTL 0x505
-#define VM_CONTEXT0_CNTL2 0x50C
-#define VM_CONTEXT1_CNTL2 0x50D
-#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x50E
-#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x50F
-#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x510
-#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x511
-#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x512
-#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x513
-#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x514
-#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x515
-
-#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x53f
-#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x537
-#define PROTECTIONS_MASK (0xf << 0)
-#define PROTECTIONS_SHIFT 0
- /* bit 0: range
- * bit 1: pde0
- * bit 2: valid
- * bit 3: read
- * bit 4: write
- */
-#define MEMORY_CLIENT_ID_MASK (0xff << 12)
-#define MEMORY_CLIENT_ID_SHIFT 12
-#define MEMORY_CLIENT_RW_MASK (1 << 24)
-#define MEMORY_CLIENT_RW_SHIFT 24
-#define FAULT_VMID_MASK (0xf << 25)
-#define FAULT_VMID_SHIFT 25
-
#define VM_INVALIDATE_REQUEST 0x51E
#define VM_INVALIDATE_RESPONSE 0x51F
-#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x546
-#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x547
-
-#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54F
-#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x550
-#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x551
-#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x552
-#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x553
-#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x554
-#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x555
-#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x556
-#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x557
-#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x558
-
-#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x55F
-#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x560
-
#define VM_L2_CG 0x570
#define MC_CG_ENABLE (1 << 18)
#define MC_LS_ENABLE (1 << 19)
-#define MC_SHARED_CHMAP 0x801
-#define NOOFCHAN_SHIFT 12
-#define NOOFCHAN_MASK 0x0000f000
-#define MC_SHARED_CHREMAP 0x802
-
#define MC_VM_FB_LOCATION 0x809
#define MC_VM_AGP_TOP 0x80A
#define MC_VM_AGP_BOT 0x80B
@@ -495,21 +112,6 @@
#define MC_CITF_MISC_WR_CG 0x993
#define MC_CITF_MISC_VM_CG 0x994
-#define MC_ARB_RAMCFG 0x9D8
-#define NOOFBANK_SHIFT 0
-#define NOOFBANK_MASK 0x00000003
-#define NOOFRANK_SHIFT 2
-#define NOOFRANK_MASK 0x00000004
-#define NOOFROWS_SHIFT 3
-#define NOOFROWS_MASK 0x00000038
-#define NOOFCOLS_SHIFT 6
-#define NOOFCOLS_MASK 0x000000C0
-#define CHANSIZE_SHIFT 8
-#define CHANSIZE_MASK 0x00000100
-#define CHANSIZE_OVERRIDE (1 << 11)
-#define NOOFGROUPS_SHIFT 12
-#define NOOFGROUPS_MASK 0x00001000
-
#define MC_ARB_DRAM_TIMING 0x9DD
#define MC_ARB_DRAM_TIMING2 0x9DE
@@ -635,20 +237,6 @@
#define CLKS(x) ((x) << 0)
#define CLKS_MASK (0xfff << 0)
-#define HDP_HOST_PATH_CNTL 0xB00
-#define CLOCK_GATING_DIS (1 << 23)
-#define HDP_NONSURFACE_BASE 0xB01
-#define HDP_NONSURFACE_INFO 0xB02
-#define HDP_NONSURFACE_SIZE 0xB03
-
-#define HDP_DEBUG0 0xBCC
-
-#define HDP_ADDR_CONFIG 0xBD2
-#define HDP_MISC_CNTL 0xBD3
-#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
-#define HDP_MEM_POWER_LS 0xBD4
-#define HDP_LS_ENABLE (1 << 0)
-
#define ATC_MISC_CG 0xCD4
#define IH_RB_CNTL 0xF80
@@ -678,8 +266,6 @@
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
-#define CONFIG_MEMSIZE 0x150A
-
#define INTERRUPT_CNTL 0x151A
# define IH_DUMMY_RD_OVERRIDE (1 << 0)
# define IH_DUMMY_RD_EN (1 << 1)
@@ -687,734 +273,28 @@
# define GEN_IH_INT_EN (1 << 8)
#define INTERRUPT_CNTL2 0x151B
-#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
-
-#define BIF_FB_EN 0x1524
-#define FB_READ_EN (1 << 0)
-#define FB_WRITE_EN (1 << 1)
-
-#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528
-
-/* DCE6 ELD audio interface */
-#define AZ_F0_CODEC_ENDPOINT_INDEX 0x1780
-# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
-# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
-#define AZ_F0_CODEC_ENDPOINT_DATA 0x1781
-
-#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
-#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
-#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
-#define SPEAKER_ALLOCATION_SHIFT 0
-#define HDMI_CONNECTION (1 << 16)
-#define DP_CONNECTION (1 << 17)
-
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
-# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
-/* max channels minus one. 7 = 8 channels */
-# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
-# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
-# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
-/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
- * bit0 = 32 kHz
- * bit1 = 44.1 kHz
- * bit2 = 48 kHz
- * bit3 = 88.2 kHz
- * bit4 = 96 kHz
- * bit5 = 176.4 kHz
- * bit6 = 192 kHz
- */
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
-# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
-# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
-/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
- * 0 = invalid
- * x = legal delay value
- * 255 = sync not supported
- */
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
-# define HBR_CAPABLE (1 << 0) /* enabled by default */
-
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
-# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
-# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
-# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
-# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
-# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
-# define DESCRIPTION0(x) (((x) & 0xff) << 0)
-# define DESCRIPTION1(x) (((x) & 0xff) << 8)
-# define DESCRIPTION2(x) (((x) & 0xff) << 16)
-# define DESCRIPTION3(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
-# define DESCRIPTION4(x) (((x) & 0xff) << 0)
-# define DESCRIPTION5(x) (((x) & 0xff) << 8)
-# define DESCRIPTION6(x) (((x) & 0xff) << 16)
-# define DESCRIPTION7(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
-# define DESCRIPTION8(x) (((x) & 0xff) << 0)
-# define DESCRIPTION9(x) (((x) & 0xff) << 8)
-# define DESCRIPTION10(x) (((x) & 0xff) << 16)
-# define DESCRIPTION11(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
-# define DESCRIPTION12(x) (((x) & 0xff) << 0)
-# define DESCRIPTION13(x) (((x) & 0xff) << 8)
-# define DESCRIPTION14(x) (((x) & 0xff) << 16)
-# define DESCRIPTION15(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
-# define DESCRIPTION16(x) (((x) & 0xff) << 0)
-# define DESCRIPTION17(x) (((x) & 0xff) << 8)
-
-#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
-# define AUDIO_ENABLED (1 << 31)
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
-#define PORT_CONNECTIVITY_MASK (3 << 30)
-#define PORT_CONNECTIVITY_SHIFT 30
-
-#define DC_LB_MEMORY_SPLIT 0x1AC3
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-
-#define PRIORITY_A_CNT 0x1AC6
-#define PRIORITY_MARK_MASK 0x7fff
-#define PRIORITY_OFF (1 << 16)
-#define PRIORITY_ALWAYS_ON (1 << 20)
-#define PRIORITY_B_CNT 0x1AC7
-
-#define DPG_PIPE_ARBITRATION_CONTROL3 0x1B32
-# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DPG_PIPE_LATENCY_CONTROL 0x1B33
-# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
-# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
-
-/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
-#define VLINE_STATUS 0x1AEE
-# define VLINE_OCCURRED (1 << 0)
-# define VLINE_ACK (1 << 4)
-# define VLINE_STAT (1 << 12)
-# define VLINE_INTERRUPT (1 << 16)
-# define VLINE_INTERRUPT_TYPE (1 << 17)
-/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
-#define VBLANK_STATUS 0x1AEF
-# define VBLANK_OCCURRED (1 << 0)
-# define VBLANK_ACK (1 << 4)
-# define VBLANK_STAT (1 << 12)
-# define VBLANK_INTERRUPT (1 << 16)
-# define VBLANK_INTERRUPT_TYPE (1 << 17)
-
-/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
-#define INT_MASK 0x1AD0
-# define VBLANK_INT_MASK (1 << 0)
-# define VLINE_INT_MASK (1 << 4)
-
-#define DISP_INTERRUPT_STATUS 0x183D
-# define LB_D1_VLINE_INTERRUPT (1 << 2)
-# define LB_D1_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD1_INTERRUPT (1 << 17)
-# define DC_HPD1_RX_INTERRUPT (1 << 18)
-# define DACA_AUTODETECT_INTERRUPT (1 << 22)
-# define DACB_AUTODETECT_INTERRUPT (1 << 23)
-# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
-# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
-#define DISP_INTERRUPT_STATUS_CONTINUE 0x183E
-# define LB_D2_VLINE_INTERRUPT (1 << 2)
-# define LB_D2_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD2_INTERRUPT (1 << 17)
-# define DC_HPD2_RX_INTERRUPT (1 << 18)
-# define DISP_TIMER_INTERRUPT (1 << 24)
-#define DISP_INTERRUPT_STATUS_CONTINUE2 0x183F
-# define LB_D3_VLINE_INTERRUPT (1 << 2)
-# define LB_D3_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD3_INTERRUPT (1 << 17)
-# define DC_HPD3_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE3 0x1840
-# define LB_D4_VLINE_INTERRUPT (1 << 2)
-# define LB_D4_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD4_INTERRUPT (1 << 17)
-# define DC_HPD4_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE4 0x1853
-# define LB_D5_VLINE_INTERRUPT (1 << 2)
-# define LB_D5_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD5_INTERRUPT (1 << 17)
-# define DC_HPD5_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5 0x1854
-# define LB_D6_VLINE_INTERRUPT (1 << 2)
-# define LB_D6_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD6_INTERRUPT (1 << 17)
-# define DC_HPD6_RX_INTERRUPT (1 << 18)
-
-/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
-#define GRPH_INT_STATUS 0x1A16
-# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
-# define GRPH_PFLIP_INT_CLEAR (1 << 8)
-/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
-#define GRPH_INT_CONTROL 0x1A17
-# define GRPH_PFLIP_INT_MASK (1 << 0)
-# define GRPH_PFLIP_INT_TYPE (1 << 8)
-
-#define DAC_AUTODETECT_INT_CONTROL 0x19F2
-
-#define DC_HPD1_INT_STATUS 0x1807
-#define DC_HPD2_INT_STATUS 0x180A
-#define DC_HPD3_INT_STATUS 0x180D
-#define DC_HPD4_INT_STATUS 0x1810
-#define DC_HPD5_INT_STATUS 0x1813
-#define DC_HPD6_INT_STATUS 0x1816
-# define DC_HPDx_INT_STATUS (1 << 0)
-# define DC_HPDx_SENSE (1 << 1)
-# define DC_HPDx_RX_INT_STATUS (1 << 8)
-
-#define DC_HPD1_INT_CONTROL 0x1808
-#define DC_HPD2_INT_CONTROL 0x180B
-#define DC_HPD3_INT_CONTROL 0x180E
-#define DC_HPD4_INT_CONTROL 0x1811
-#define DC_HPD5_INT_CONTROL 0x1814
-#define DC_HPD6_INT_CONTROL 0x1817
-# define DC_HPDx_INT_ACK (1 << 0)
-# define DC_HPDx_INT_POLARITY (1 << 8)
-# define DC_HPDx_INT_EN (1 << 16)
-# define DC_HPDx_RX_INT_ACK (1 << 20)
-# define DC_HPDx_RX_INT_EN (1 << 24)
-
-#define DC_HPD1_CONTROL 0x1809
-#define DC_HPD2_CONTROL 0x180C
-#define DC_HPD3_CONTROL 0x180F
-#define DC_HPD4_CONTROL 0x1812
-#define DC_HPD5_CONTROL 0x1815
-#define DC_HPD6_CONTROL 0x1818
-# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
-# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
-# define DC_HPDx_EN (1 << 28)
-
-#define DPG_PIPE_STUTTER_CONTROL 0x1B35
-# define STUTTER_ENABLE (1 << 0)
-
-/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
-#define CRTC_STATUS_FRAME_COUNT 0x1BA6
-
-/* Audio clocks */
-#define DCCG_AUDIO_DTO_SOURCE 0x05ac
-# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
-# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
-
-#define DCCG_AUDIO_DTO0_PHASE 0x05b0
-#define DCCG_AUDIO_DTO0_MODULE 0x05b4
-#define DCCG_AUDIO_DTO1_PHASE 0x05c0
-#define DCCG_AUDIO_DTO1_MODULE 0x05c4
-
-#define AFMT_AUDIO_SRC_CONTROL 0x1c4f
-#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
-/* AFMT_AUDIO_SRC_SELECT
- * 0 = stream0
- * 1 = stream1
- * 2 = stream2
- * 3 = stream3
- * 4 = stream4
- * 5 = stream5
- */
-
-#define GRBM_CNTL 0x2000
-#define GRBM_READ_TIMEOUT(x) ((x) << 0)
-
-#define GRBM_STATUS2 0x2002
-#define RLC_RQ_PENDING (1 << 0)
-#define RLC_BUSY (1 << 8)
-#define TC_BUSY (1 << 9)
-
-#define GRBM_STATUS 0x2004
-#define CMDFIFO_AVAIL_MASK 0x0000000F
-#define RING2_RQ_PENDING (1 << 4)
-#define SRBM_RQ_PENDING (1 << 5)
-#define RING1_RQ_PENDING (1 << 6)
-#define CF_RQ_PENDING (1 << 7)
-#define PF_RQ_PENDING (1 << 8)
-#define GDS_DMA_RQ_PENDING (1 << 9)
-#define GRBM_EE_BUSY (1 << 10)
-#define DB_CLEAN (1 << 12)
-#define CB_CLEAN (1 << 13)
-#define TA_BUSY (1 << 14)
-#define GDS_BUSY (1 << 15)
-#define VGT_BUSY (1 << 17)
-#define IA_BUSY_NO_DMA (1 << 18)
-#define IA_BUSY (1 << 19)
-#define SX_BUSY (1 << 20)
-#define SPI_BUSY (1 << 22)
-#define BCI_BUSY (1 << 23)
-#define SC_BUSY (1 << 24)
-#define PA_BUSY (1 << 25)
-#define DB_BUSY (1 << 26)
-#define CP_COHERENCY_BUSY (1 << 28)
-#define CP_BUSY (1 << 29)
-#define CB_BUSY (1 << 30)
-#define GUI_ACTIVE (1 << 31)
-#define GRBM_STATUS_SE0 0x2005
-#define GRBM_STATUS_SE1 0x2006
-#define SE_DB_CLEAN (1 << 1)
-#define SE_CB_CLEAN (1 << 2)
-#define SE_BCI_BUSY (1 << 22)
-#define SE_VGT_BUSY (1 << 23)
-#define SE_PA_BUSY (1 << 24)
-#define SE_TA_BUSY (1 << 25)
-#define SE_SX_BUSY (1 << 26)
-#define SE_SPI_BUSY (1 << 27)
-#define SE_SC_BUSY (1 << 29)
-#define SE_DB_BUSY (1 << 30)
-#define SE_CB_BUSY (1 << 31)
-
-#define GRBM_SOFT_RESET 0x2008
-#define SOFT_RESET_CP (1 << 0)
-#define SOFT_RESET_CB (1 << 1)
-#define SOFT_RESET_RLC (1 << 2)
-#define SOFT_RESET_DB (1 << 3)
-#define SOFT_RESET_GDS (1 << 4)
-#define SOFT_RESET_PA (1 << 5)
-#define SOFT_RESET_SC (1 << 6)
-#define SOFT_RESET_BCI (1 << 7)
-#define SOFT_RESET_SPI (1 << 8)
-#define SOFT_RESET_SX (1 << 10)
-#define SOFT_RESET_TC (1 << 11)
-#define SOFT_RESET_TA (1 << 12)
-#define SOFT_RESET_VGT (1 << 14)
-#define SOFT_RESET_IA (1 << 15)
-
-#define GRBM_GFX_INDEX 0x200B
-#define INSTANCE_INDEX(x) ((x) << 0)
-#define SH_INDEX(x) ((x) << 8)
-#define SE_INDEX(x) ((x) << 16)
-#define SH_BROADCAST_WRITES (1 << 29)
-#define INSTANCE_BROADCAST_WRITES (1 << 30)
-#define SE_BROADCAST_WRITES (1 << 31)
-
-#define GRBM_INT_CNTL 0x2018
-# define RDERR_INT_ENABLE (1 << 0)
-# define GUI_IDLE_INT_ENABLE (1 << 19)
-
-#define CP_STRMOUT_CNTL 0x213F
-#define SCRATCH_REG0 0x2140
-#define SCRATCH_REG1 0x2141
-#define SCRATCH_REG2 0x2142
-#define SCRATCH_REG3 0x2143
-#define SCRATCH_REG4 0x2144
-#define SCRATCH_REG5 0x2145
-#define SCRATCH_REG6 0x2146
-#define SCRATCH_REG7 0x2147
-
-#define SCRATCH_UMSK 0x2150
-#define SCRATCH_ADDR 0x2151
-
-#define CP_SEM_WAIT_TIMER 0x216F
-
-#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
-
-#define CP_ME_CNTL 0x21B6
-#define CP_CE_HALT (1 << 24)
-#define CP_PFP_HALT (1 << 26)
-#define CP_ME_HALT (1 << 28)
-
-#define CP_COHER_CNTL2 0x217A
-
-#define CP_RB2_RPTR 0x21BE
-#define CP_RB1_RPTR 0x21BF
-#define CP_RB0_RPTR 0x21C0
-#define CP_RB_WPTR_DELAY 0x21C1
-
-#define CP_QUEUE_THRESHOLDS 0x21D8
-#define ROQ_IB1_START(x) ((x) << 0)
-#define ROQ_IB2_START(x) ((x) << 8)
-#define CP_MEQ_THRESHOLDS 0x21D9
-#define MEQ1_START(x) ((x) << 0)
-#define MEQ2_START(x) ((x) << 8)
-
-#define CP_PERFMON_CNTL 0x21FF
-
#define VGT_VTX_VECT_EJECT_REG 0x222C
-
-#define VGT_CACHE_INVALIDATION 0x2231
-#define CACHE_INVALIDATION(x) ((x) << 0)
-#define VC_ONLY 0
-#define TC_ONLY 1
-#define VC_AND_TC 2
-#define AUTO_INVLD_EN(x) ((x) << 6)
-#define NO_AUTO 0
-#define ES_AUTO 1
-#define GS_AUTO 2
-#define ES_AND_GS_AUTO 3
#define VGT_ESGS_RING_SIZE 0x2232
#define VGT_GSVS_RING_SIZE 0x2233
-
#define VGT_GS_VERTEX_REUSE 0x2235
-
#define VGT_PRIMITIVE_TYPE 0x2256
#define VGT_INDEX_TYPE 0x2257
-
#define VGT_NUM_INDICES 0x225C
#define VGT_NUM_INSTANCES 0x225D
-
#define VGT_TF_RING_SIZE 0x2262
-
#define VGT_HS_OFFCHIP_PARAM 0x226C
-
#define VGT_TF_MEMORY_BASE 0x226E
-#define CC_GC_SHADER_ARRAY_CONFIG 0x226F
-#define INACTIVE_CUS_MASK 0xFFFF0000
-#define INACTIVE_CUS_SHIFT 16
-#define GC_USER_SHADER_ARRAY_CONFIG 0x2270
-
-#define PA_CL_ENHANCE 0x2285
-#define CLIP_VTX_REORDER_ENA (1 << 0)
-#define NUM_CLIP_SEQ(x) ((x) << 1)
-
-#define PA_SU_LINE_STIPPLE_VALUE 0x2298
-
-#define PA_SC_LINE_STIPPLE_STATE 0x22C4
-
-#define PA_SC_FORCE_EOV_MAX_CNTS 0x22C9
-#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
-#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
-
-#define PA_SC_FIFO_SIZE 0x22F3
-#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
-#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
-#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
-#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
-
#define PA_SC_ENHANCE 0x22FC
-#define SQ_CONFIG 0x2300
-
-#define SQC_CACHES 0x2302
-
-#define SQ_POWER_THROTTLE 0x2396
-#define MIN_POWER(x) ((x) << 0)
-#define MIN_POWER_MASK (0x3fff << 0)
-#define MIN_POWER_SHIFT 0
-#define MAX_POWER(x) ((x) << 16)
-#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
-#define SQ_POWER_THROTTLE2 0x2397
-#define MAX_POWER_DELTA(x) ((x) << 0)
-#define MAX_POWER_DELTA_MASK (0x3fff << 0)
-#define MAX_POWER_DELTA_SHIFT 0
-#define STI_SIZE(x) ((x) << 16)
-#define STI_SIZE_MASK (0x3ff << 16)
-#define STI_SIZE_SHIFT 16
-#define LTI_RATIO(x) ((x) << 27)
-#define LTI_RATIO_MASK (0xf << 27)
-#define LTI_RATIO_SHIFT 27
-
-#define SX_DEBUG_1 0x2418
-
-#define SPI_STATIC_THREAD_MGMT_1 0x2438
-#define SPI_STATIC_THREAD_MGMT_2 0x2439
-#define SPI_STATIC_THREAD_MGMT_3 0x243A
-#define SPI_PS_MAX_WAVE_ID 0x243B
-
-#define SPI_CONFIG_CNTL 0x2440
-
-#define SPI_CONFIG_CNTL_1 0x244F
-#define VTX_DONE_DELAY(x) ((x) << 0)
-#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
-
-#define CGTS_TCC_DISABLE 0x2452
-#define CGTS_USER_TCC_DISABLE 0x2453
-#define TCC_DISABLE_MASK 0xFFFF0000
-#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x2454
-#define OVERRIDE (1 << 21)
-#define LS_OVERRIDE (1 << 22)
-
-#define SPI_LB_CU_MASK 0x24D5
-
#define TA_CNTL_AUX 0x2542
-#define CC_RB_BACKEND_DISABLE 0x263D
-#define BACKEND_DISABLE(x) ((x) << 16)
-#define GB_ADDR_CONFIG 0x263E
-#define NUM_PIPES(x) ((x) << 0)
-#define NUM_PIPES_MASK 0x00000007
-#define NUM_PIPES_SHIFT 0
-#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
-#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
-#define PIPE_INTERLEAVE_SIZE_SHIFT 4
-#define NUM_SHADER_ENGINES(x) ((x) << 12)
-#define NUM_SHADER_ENGINES_MASK 0x00003000
-#define NUM_SHADER_ENGINES_SHIFT 12
-#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
-#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
-#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
-#define NUM_GPUS(x) ((x) << 20)
-#define NUM_GPUS_MASK 0x00700000
-#define NUM_GPUS_SHIFT 20
-#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
-#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
-#define MULTI_GPU_TILE_SIZE_SHIFT 24
-#define ROW_SIZE(x) ((x) << 28)
-#define ROW_SIZE_MASK 0x30000000
-#define ROW_SIZE_SHIFT 28
-
-#define GB_TILE_MODE0 0x2644
-# define MICRO_TILE_MODE(x) ((x) << 0)
-# define ADDR_SURF_DISPLAY_MICRO_TILING 0
-# define ADDR_SURF_THIN_MICRO_TILING 1
-# define ADDR_SURF_DEPTH_MICRO_TILING 2
-# define ARRAY_MODE(x) ((x) << 2)
-# define ARRAY_LINEAR_GENERAL 0
-# define ARRAY_LINEAR_ALIGNED 1
-# define ARRAY_1D_TILED_THIN1 2
-# define ARRAY_2D_TILED_THIN1 4
-# define PIPE_CONFIG(x) ((x) << 6)
-# define ADDR_SURF_P2 0
-# define ADDR_SURF_P4_8x16 4
-# define ADDR_SURF_P4_16x16 5
-# define ADDR_SURF_P4_16x32 6
-# define ADDR_SURF_P4_32x32 7
-# define ADDR_SURF_P8_16x16_8x16 8
-# define ADDR_SURF_P8_16x32_8x16 9
-# define ADDR_SURF_P8_32x32_8x16 10
-# define ADDR_SURF_P8_16x32_16x16 11
-# define ADDR_SURF_P8_32x32_16x16 12
-# define ADDR_SURF_P8_32x32_16x32 13
-# define ADDR_SURF_P8_32x64_32x32 14
-# define TILE_SPLIT(x) ((x) << 11)
-# define ADDR_SURF_TILE_SPLIT_64B 0
-# define ADDR_SURF_TILE_SPLIT_128B 1
-# define ADDR_SURF_TILE_SPLIT_256B 2
-# define ADDR_SURF_TILE_SPLIT_512B 3
-# define ADDR_SURF_TILE_SPLIT_1KB 4
-# define ADDR_SURF_TILE_SPLIT_2KB 5
-# define ADDR_SURF_TILE_SPLIT_4KB 6
-# define BANK_WIDTH(x) ((x) << 14)
-# define ADDR_SURF_BANK_WIDTH_1 0
-# define ADDR_SURF_BANK_WIDTH_2 1
-# define ADDR_SURF_BANK_WIDTH_4 2
-# define ADDR_SURF_BANK_WIDTH_8 3
-# define BANK_HEIGHT(x) ((x) << 16)
-# define ADDR_SURF_BANK_HEIGHT_1 0
-# define ADDR_SURF_BANK_HEIGHT_2 1
-# define ADDR_SURF_BANK_HEIGHT_4 2
-# define ADDR_SURF_BANK_HEIGHT_8 3
-# define MACRO_TILE_ASPECT(x) ((x) << 18)
-# define ADDR_SURF_MACRO_ASPECT_1 0
-# define ADDR_SURF_MACRO_ASPECT_2 1
-# define ADDR_SURF_MACRO_ASPECT_4 2
-# define ADDR_SURF_MACRO_ASPECT_8 3
-# define NUM_BANKS(x) ((x) << 20)
-# define ADDR_SURF_2_BANK 0
-# define ADDR_SURF_4_BANK 1
-# define ADDR_SURF_8_BANK 2
-# define ADDR_SURF_16_BANK 3
-#define GB_TILE_MODE1 0x2645
-#define GB_TILE_MODE2 0x2646
-#define GB_TILE_MODE3 0x2647
-#define GB_TILE_MODE4 0x2648
-#define GB_TILE_MODE5 0x2649
-#define GB_TILE_MODE6 0x264a
-#define GB_TILE_MODE7 0x264b
-#define GB_TILE_MODE8 0x264c
-#define GB_TILE_MODE9 0x264d
-#define GB_TILE_MODE10 0x264e
-#define GB_TILE_MODE11 0x264f
-#define GB_TILE_MODE12 0x2650
-#define GB_TILE_MODE13 0x2651
-#define GB_TILE_MODE14 0x2652
-#define GB_TILE_MODE15 0x2653
-#define GB_TILE_MODE16 0x2654
-#define GB_TILE_MODE17 0x2655
-#define GB_TILE_MODE18 0x2656
-#define GB_TILE_MODE19 0x2657
-#define GB_TILE_MODE20 0x2658
-#define GB_TILE_MODE21 0x2659
-#define GB_TILE_MODE22 0x265a
-#define GB_TILE_MODE23 0x265b
-#define GB_TILE_MODE24 0x265c
-#define GB_TILE_MODE25 0x265d
-#define GB_TILE_MODE26 0x265e
-#define GB_TILE_MODE27 0x265f
-#define GB_TILE_MODE28 0x2660
-#define GB_TILE_MODE29 0x2661
-#define GB_TILE_MODE30 0x2662
-#define GB_TILE_MODE31 0x2663
-
-#define CB_PERFCOUNTER0_SELECT0 0x2688
-#define CB_PERFCOUNTER0_SELECT1 0x2689
-#define CB_PERFCOUNTER1_SELECT0 0x268A
-#define CB_PERFCOUNTER1_SELECT1 0x268B
-#define CB_PERFCOUNTER2_SELECT0 0x268C
-#define CB_PERFCOUNTER2_SELECT1 0x268D
-#define CB_PERFCOUNTER3_SELECT0 0x268E
-#define CB_PERFCOUNTER3_SELECT1 0x268F
-
-#define CB_CGTT_SCLK_CTRL 0x2698
-
-#define GC_USER_RB_BACKEND_DISABLE 0x26DF
-#define BACKEND_DISABLE_MASK 0x00FF0000
-#define BACKEND_DISABLE_SHIFT 16
-
-#define TCP_CHAN_STEER_LO 0x2B03
-#define TCP_CHAN_STEER_HI 0x2B94
-
-#define CP_RB0_BASE 0x3040
-#define CP_RB0_CNTL 0x3041
-#define RB_BUFSZ(x) ((x) << 0)
-#define RB_BLKSZ(x) ((x) << 8)
-#define BUF_SWAP_32BIT (2 << 16)
-#define RB_NO_UPDATE (1 << 27)
-#define RB_RPTR_WR_ENA (1 << 31)
-
-#define CP_RB0_RPTR_ADDR 0x3043
-#define CP_RB0_RPTR_ADDR_HI 0x3044
-#define CP_RB0_WPTR 0x3045
-
-#define CP_PFP_UCODE_ADDR 0x3054
-#define CP_PFP_UCODE_DATA 0x3055
-#define CP_ME_RAM_RADDR 0x3056
-#define CP_ME_RAM_WADDR 0x3057
-#define CP_ME_RAM_DATA 0x3058
-
-#define CP_CE_UCODE_ADDR 0x305A
-#define CP_CE_UCODE_DATA 0x305B
-
-#define CP_RB1_BASE 0x3060
-#define CP_RB1_CNTL 0x3061
-#define CP_RB1_RPTR_ADDR 0x3062
-#define CP_RB1_RPTR_ADDR_HI 0x3063
-#define CP_RB1_WPTR 0x3064
-#define CP_RB2_BASE 0x3065
-#define CP_RB2_CNTL 0x3066
-#define CP_RB2_RPTR_ADDR 0x3067
-#define CP_RB2_RPTR_ADDR_HI 0x3068
-#define CP_RB2_WPTR 0x3069
-#define CP_INT_CNTL_RING0 0x306A
-#define CP_INT_CNTL_RING1 0x306B
-#define CP_INT_CNTL_RING2 0x306C
-# define CNTX_BUSY_INT_ENABLE (1 << 19)
-# define CNTX_EMPTY_INT_ENABLE (1 << 20)
-# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
-# define TIME_STAMP_INT_ENABLE (1 << 26)
-# define CP_RINGID2_INT_ENABLE (1 << 29)
-# define CP_RINGID1_INT_ENABLE (1 << 30)
-# define CP_RINGID0_INT_ENABLE (1 << 31)
-#define CP_INT_STATUS_RING0 0x306D
-#define CP_INT_STATUS_RING1 0x306E
-#define CP_INT_STATUS_RING2 0x306F
-# define WAIT_MEM_SEM_INT_STAT (1 << 21)
-# define TIME_STAMP_INT_STAT (1 << 26)
-# define CP_RINGID2_INT_STAT (1 << 29)
-# define CP_RINGID1_INT_STAT (1 << 30)
-# define CP_RINGID0_INT_STAT (1 << 31)
-
-#define CP_MEM_SLP_CNTL 0x3079
-# define CP_MEM_LS_EN (1 << 0)
-
-#define CP_DEBUG 0x307F
-
-#define RLC_CNTL 0x30C0
-# define RLC_ENABLE (1 << 0)
-#define RLC_RL_BASE 0x30C1
-#define RLC_RL_SIZE 0x30C2
-#define RLC_LB_CNTL 0x30C3
-# define LOAD_BALANCE_ENABLE (1 << 0)
-#define RLC_SAVE_AND_RESTORE_BASE 0x30C4
-#define RLC_LB_CNTR_MAX 0x30C5
-#define RLC_LB_CNTR_INIT 0x30C6
-
-#define RLC_CLEAR_STATE_RESTORE_BASE 0x30C8
-
-#define RLC_UCODE_ADDR 0x30CB
-#define RLC_UCODE_DATA 0x30CC
-
-#define RLC_GPU_CLOCK_COUNT_LSB 0x30CE
-#define RLC_GPU_CLOCK_COUNT_MSB 0x30CF
-#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x30D0
-#define RLC_MC_CNTL 0x30D1
-#define RLC_UCODE_CNTL 0x30D2
-#define RLC_STAT 0x30D3
-# define RLC_BUSY_STATUS (1 << 0)
-# define GFX_POWER_STATUS (1 << 1)
-# define GFX_CLOCK_STATUS (1 << 2)
-# define GFX_LS_STATUS (1 << 3)
-
-#define RLC_PG_CNTL 0x30D7
-# define GFX_PG_ENABLE (1 << 0)
-# define GFX_PG_SRC (1 << 1)
-
-#define RLC_CGTT_MGCG_OVERRIDE 0x3100
-#define RLC_CGCG_CGLS_CTRL 0x3101
-# define CGCG_EN (1 << 0)
-# define CGLS_EN (1 << 1)
-
-#define RLC_TTOP_D 0x3105
-# define RLC_PUD(x) ((x) << 0)
-# define RLC_PUD_MASK (0xff << 0)
-# define RLC_PDD(x) ((x) << 8)
-# define RLC_PDD_MASK (0xff << 8)
-# define RLC_TTPD(x) ((x) << 16)
-# define RLC_TTPD_MASK (0xff << 16)
-# define RLC_MSD(x) ((x) << 24)
-# define RLC_MSD_MASK (0xff << 24)
-
-#define RLC_LB_INIT_CU_MASK 0x3107
-
-#define RLC_PG_AO_CU_MASK 0x310B
-#define RLC_MAX_PG_CU 0x310C
-# define MAX_PU_CU(x) ((x) << 0)
-# define MAX_PU_CU_MASK (0xff << 0)
-#define RLC_AUTO_PG_CTRL 0x310C
-# define AUTO_PG_EN (1 << 0)
-# define GRBM_REG_SGIT(x) ((x) << 3)
-# define GRBM_REG_SGIT_MASK (0xffff << 3)
-# define PG_AFTER_GRBM_REG_ST(x) ((x) << 19)
-# define PG_AFTER_GRBM_REG_ST_MASK (0x1fff << 19)
-
-#define RLC_SERDES_WR_MASTER_MASK_0 0x3115
-#define RLC_SERDES_WR_MASTER_MASK_1 0x3116
-#define RLC_SERDES_WR_CTRL 0x3117
-
-#define RLC_SERDES_MASTER_BUSY_0 0x3119
-#define RLC_SERDES_MASTER_BUSY_1 0x311A
-
-#define RLC_GCPM_GENERAL_3 0x311E
-
-#define DB_RENDER_CONTROL 0xA000
-
-#define DB_DEPTH_INFO 0xA00F
-
-#define PA_SC_RASTER_CONFIG 0xA0D4
-# define RB_MAP_PKR0(x) ((x) << 0)
-# define RB_MAP_PKR0_MASK (0x3 << 0)
-# define RB_MAP_PKR1(x) ((x) << 2)
-# define RB_MAP_PKR1_MASK (0x3 << 2)
-# define RASTER_CONFIG_RB_MAP_0 0
-# define RASTER_CONFIG_RB_MAP_1 1
-# define RASTER_CONFIG_RB_MAP_2 2
-# define RASTER_CONFIG_RB_MAP_3 3
+// #define PA_SC_RASTER_CONFIG 0xA0D4
# define RB_XSEL2(x) ((x) << 4)
# define RB_XSEL2_MASK (0x3 << 4)
# define RB_XSEL (1 << 6)
# define RB_YSEL (1 << 7)
# define PKR_MAP(x) ((x) << 8)
-# define PKR_MAP_MASK (0x3 << 8)
-# define RASTER_CONFIG_PKR_MAP_0 0
-# define RASTER_CONFIG_PKR_MAP_1 1
-# define RASTER_CONFIG_PKR_MAP_2 2
-# define RASTER_CONFIG_PKR_MAP_3 3
# define PKR_XSEL(x) ((x) << 10)
# define PKR_XSEL_MASK (0x3 << 10)
# define PKR_YSEL(x) ((x) << 12)
@@ -1426,221 +306,19 @@
# define SC_YSEL(x) ((x) << 20)
# define SC_YSEL_MASK (0x3 << 20)
# define SE_MAP(x) ((x) << 24)
-# define SE_MAP_MASK (0x3 << 24)
-# define RASTER_CONFIG_SE_MAP_0 0
-# define RASTER_CONFIG_SE_MAP_1 1
-# define RASTER_CONFIG_SE_MAP_2 2
-# define RASTER_CONFIG_SE_MAP_3 3
# define SE_XSEL(x) ((x) << 26)
# define SE_XSEL_MASK (0x3 << 26)
# define SE_YSEL(x) ((x) << 28)
# define SE_YSEL_MASK (0x3 << 28)
-
-#define VGT_EVENT_INITIATOR 0xA2A4
-# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
-# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
-# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
-# define CACHE_FLUSH_TS (4 << 0)
-# define CACHE_FLUSH (6 << 0)
-# define CS_PARTIAL_FLUSH (7 << 0)
-# define VGT_STREAMOUT_RESET (10 << 0)
-# define END_OF_PIPE_INCR_DE (11 << 0)
-# define END_OF_PIPE_IB_END (12 << 0)
-# define RST_PIX_CNT (13 << 0)
-# define VS_PARTIAL_FLUSH (15 << 0)
-# define PS_PARTIAL_FLUSH (16 << 0)
-# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
-# define ZPASS_DONE (21 << 0)
-# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
-# define PERFCOUNTER_START (23 << 0)
-# define PERFCOUNTER_STOP (24 << 0)
-# define PIPELINESTAT_START (25 << 0)
-# define PIPELINESTAT_STOP (26 << 0)
-# define PERFCOUNTER_SAMPLE (27 << 0)
-# define SAMPLE_PIPELINESTAT (30 << 0)
-# define SAMPLE_STREAMOUTSTATS (32 << 0)
-# define RESET_VTX_CNT (33 << 0)
-# define VGT_FLUSH (36 << 0)
-# define BOTTOM_OF_PIPE_TS (40 << 0)
-# define DB_CACHE_FLUSH_AND_INV (42 << 0)
-# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
-# define FLUSH_AND_INV_DB_META (44 << 0)
-# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
-# define FLUSH_AND_INV_CB_META (46 << 0)
-# define CS_DONE (47 << 0)
-# define PS_DONE (48 << 0)
-# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
-# define THREAD_TRACE_START (51 << 0)
-# define THREAD_TRACE_STOP (52 << 0)
-# define THREAD_TRACE_FLUSH (54 << 0)
-# define THREAD_TRACE_FINISH (55 << 0)
-
-/* PIF PHY0 registers idx/data 0x8/0xc */
-#define PB0_PIF_CNTL 0x10
-# define LS2_EXIT_TIME(x) ((x) << 17)
-# define LS2_EXIT_TIME_MASK (0x7 << 17)
-# define LS2_EXIT_TIME_SHIFT 17
-#define PB0_PIF_PAIRING 0x11
-# define MULTI_PIF (1 << 25)
-#define PB0_PIF_PWRDOWN_0 0x12
-# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
-# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_0_SHIFT 24
-#define PB0_PIF_PWRDOWN_1 0x13
-# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
-# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_1_SHIFT 24
-
-#define PB0_PIF_PWRDOWN_2 0x17
-# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
-# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_2_SHIFT 24
-#define PB0_PIF_PWRDOWN_3 0x18
-# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
-# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_3_SHIFT 24
-/* PIF PHY1 registers idx/data 0x10/0x14 */
-#define PB1_PIF_CNTL 0x10
-#define PB1_PIF_PAIRING 0x11
-#define PB1_PIF_PWRDOWN_0 0x12
-#define PB1_PIF_PWRDOWN_1 0x13
-
-#define PB1_PIF_PWRDOWN_2 0x17
-#define PB1_PIF_PWRDOWN_3 0x18
-/* PCIE registers idx/data 0x30/0x34 */
-#define PCIE_CNTL2 0x1c /* PCIE */
-# define SLV_MEM_LS_EN (1 << 16)
-# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
-# define MST_MEM_LS_EN (1 << 18)
-# define REPLAY_MEM_LS_EN (1 << 19)
-#define PCIE_LC_STATUS1 0x28 /* PCIE */
-# define LC_REVERSE_RCVR (1 << 0)
-# define LC_REVERSE_XMIT (1 << 1)
-# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
-# define LC_OPERATING_LINK_WIDTH_SHIFT 2
-# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
-# define LC_DETECTED_LINK_WIDTH_SHIFT 5
-
-#define PCIE_P_CNTL 0x40 /* PCIE */
-# define P_IGNORE_EDB_ERR (1 << 6)
-
/* PCIE PORT registers idx/data 0x38/0x3c */
-#define PCIE_LC_CNTL 0xa0
-# define LC_L0S_INACTIVITY(x) ((x) << 8)
-# define LC_L0S_INACTIVITY_MASK (0xf << 8)
-# define LC_L0S_INACTIVITY_SHIFT 8
-# define LC_L1_INACTIVITY(x) ((x) << 12)
-# define LC_L1_INACTIVITY_MASK (0xf << 12)
-# define LC_L1_INACTIVITY_SHIFT 12
-# define LC_PMI_TO_L1_DIS (1 << 16)
-# define LC_ASPM_TO_L1_DIS (1 << 24)
-#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
-# define LC_LINK_WIDTH_SHIFT 0
-# define LC_LINK_WIDTH_MASK 0x7
+// #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
-# define LC_LINK_WIDTH_RD_SHIFT 4
-# define LC_LINK_WIDTH_RD_MASK 0x70
-# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
-# define LC_RECONFIG_NOW (1 << 8)
-# define LC_RENEGOTIATION_SUPPORT (1 << 9)
-# define LC_RENEGOTIATE_EN (1 << 10)
-# define LC_SHORT_RECONFIG_EN (1 << 11)
-# define LC_UPCONFIGURE_SUPPORT (1 << 12)
-# define LC_UPCONFIGURE_DIS (1 << 13)
-# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
-# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
-# define LC_DYN_LANES_PWR_STATE_SHIFT 21
-#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
-# define LC_XMIT_N_FTS(x) ((x) << 0)
-# define LC_XMIT_N_FTS_MASK (0xff << 0)
-# define LC_XMIT_N_FTS_SHIFT 0
-# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
-# define LC_N_FTS_MASK (0xff << 24)
-#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
-# define LC_GEN2_EN_STRAP (1 << 0)
-# define LC_GEN3_EN_STRAP (1 << 1)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
-# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
-# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
-# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
-# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
-# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
-# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
-# define LC_CURRENT_DATA_RATE_SHIFT 13
-# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
-# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
-# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
-# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
-# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
-
-#define PCIE_LC_CNTL2 0xb1
-# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
-# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
-
-#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
-# define LC_GO_TO_RECOVERY (1 << 30)
-#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
-# define LC_REDO_EQ (1 << 5)
-# define LC_SET_QUIESCE (1 << 13)
-
-/*
- * UVD
- */
-#define UVD_UDEC_ADDR_CONFIG 0x3bd3
-#define UVD_UDEC_DB_ADDR_CONFIG 0x3bd4
-#define UVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
-#define UVD_RBC_RB_RPTR 0x3da4
-#define UVD_RBC_RB_WPTR 0x3da5
-#define UVD_STATUS 0x3daf
-
-#define UVD_CGC_CTRL 0x3dc2
-# define DCM (1 << 0)
-# define CG_DT(x) ((x) << 2)
-# define CG_DT_MASK (0xf << 2)
-# define CLK_OD(x) ((x) << 6)
-# define CLK_OD_MASK (0x1f << 6)
-
- /* UVD CTX indirect */
-#define UVD_CGC_MEM_CTRL 0xC0
-#define UVD_CGC_CTRL2 0xC1
-# define DYN_OR_EN (1 << 0)
-# define DYN_RR_EN (1 << 1)
-# define G_DIV_ID(x) ((x) << 2)
-# define G_DIV_ID_MASK (0x7 << 2)
/*
* PM4
@@ -1874,45 +552,7 @@
/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
#define DMA1_REGISTER_OFFSET 0x200 /* not a register */
-
-#define DMA_RB_CNTL 0x3400
-# define DMA_RB_ENABLE (1 << 0)
-# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
-# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
-# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
-#define DMA_RB_BASE 0x3401
-#define DMA_RB_RPTR 0x3402
-#define DMA_RB_WPTR 0x3403
-
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-
-#define DMA_IB_CNTL 0x3409
-# define DMA_IB_ENABLE (1 << 0)
-# define DMA_IB_SWAP_ENABLE (1 << 4)
-# define CMD_VMID_FORCE (1 << 31)
-#define DMA_IB_RPTR 0x340a
-#define DMA_CNTL 0x340b
-# define TRAP_ENABLE (1 << 0)
-# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
-# define SEM_WAIT_INT_ENABLE (1 << 2)
-# define DATA_SWAP_ENABLE (1 << 3)
-# define FENCE_SWAP_ENABLE (1 << 4)
-# define CTXEMPTY_INT_ENABLE (1 << 28)
-#define DMA_STATUS_REG 0x340d
-# define DMA_IDLE (1 << 0)
-#define DMA_TILING_CONFIG 0x342e
-
-#define DMA_POWER_CNTL 0x342f
-# define MEM_POWER_OVERRIDE (1 << 8)
-#define DMA_CLK_CTRL 0x3430
-
-#define DMA_PG 0x3435
-# define PG_CNTL_ENABLE (1 << 0)
-#define DMA_PGFSM_CONFIG 0x3436
-#define DMA_PGFSM_WRITE 0x3437
+#define SDMA_MAX_INSTANCE 2
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
(((b) & 0x1) << 26) | \
@@ -1941,45 +581,7 @@
#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
-#define VCE_STATUS 0x20004
-#define VCE_VCPU_CNTL 0x20014
-#define VCE_CLK_EN (1 << 0)
-#define VCE_VCPU_CACHE_OFFSET0 0x20024
-#define VCE_VCPU_CACHE_SIZE0 0x20028
-#define VCE_VCPU_CACHE_OFFSET1 0x2002c
-#define VCE_VCPU_CACHE_SIZE1 0x20030
-#define VCE_VCPU_CACHE_OFFSET2 0x20034
-#define VCE_VCPU_CACHE_SIZE2 0x20038
-#define VCE_SOFT_RESET 0x20120
-#define VCE_ECPU_SOFT_RESET (1 << 0)
-#define VCE_FME_SOFT_RESET (1 << 2)
-#define VCE_RB_BASE_LO2 0x2016c
-#define VCE_RB_BASE_HI2 0x20170
-#define VCE_RB_SIZE2 0x20174
-#define VCE_RB_RPTR2 0x20178
-#define VCE_RB_WPTR2 0x2017c
-#define VCE_RB_BASE_LO 0x20180
-#define VCE_RB_BASE_HI 0x20184
-#define VCE_RB_SIZE 0x20188
-#define VCE_RB_RPTR 0x2018c
-#define VCE_RB_WPTR 0x20190
-#define VCE_CLOCK_GATING_A 0x202f8
-#define VCE_CLOCK_GATING_B 0x202fc
-#define VCE_UENC_CLOCK_GATING 0x205bc
-#define VCE_UENC_REG_CLOCK_GATING 0x205c0
-#define VCE_FW_REG_STATUS 0x20e10
-# define VCE_FW_REG_STATUS_BUSY (1 << 0)
-# define VCE_FW_REG_STATUS_PASS (1 << 3)
-# define VCE_FW_REG_STATUS_DONE (1 << 11)
-#define VCE_LMI_FW_START_KEYSEL 0x20e18
-#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
-#define VCE_LMI_CTRL2 0x20e74
-#define VCE_LMI_CTRL 0x20e98
-#define VCE_LMI_VM_CTRL 0x20ea0
-#define VCE_LMI_SWAP_CNTL 0x20eb4
-#define VCE_LMI_SWAP_CNTL1 0x20eb8
-#define VCE_LMI_CACHE_CTRL 0x20ef4
-
+/* VCE */
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
#define VCE_CMD_IB 0x00000002
@@ -1988,434 +590,146 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
-
//#dce stupp
/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
-#define SI_CRTC0_REGISTER_OFFSET 0 //(0x6df0 - 0x6df0)/4
-#define SI_CRTC1_REGISTER_OFFSET 0x300 //(0x79f0 - 0x6df0)/4
-#define SI_CRTC2_REGISTER_OFFSET 0x2600 //(0x105f0 - 0x6df0)/4
-#define SI_CRTC3_REGISTER_OFFSET 0x2900 //(0x111f0 - 0x6df0)/4
-#define SI_CRTC4_REGISTER_OFFSET 0x2c00 //(0x11df0 - 0x6df0)/4
-#define SI_CRTC5_REGISTER_OFFSET 0x2f00 //(0x129f0 - 0x6df0)/4
+#define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c) //(0x6df0 - 0x6df0)/4
+#define CRTC1_REGISTER_OFFSET (0x1e7c - 0x1b7c) //(0x79f0 - 0x6df0)/4
+#define CRTC2_REGISTER_OFFSET (0x417c - 0x1b7c) //(0x105f0 - 0x6df0)/4
+#define CRTC3_REGISTER_OFFSET (0x447c - 0x1b7c) //(0x111f0 - 0x6df0)/4
+#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c) //(0x11df0 - 0x6df0)/4
+#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c) //(0x129f0 - 0x6df0)/4
+
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+
+/* audio endpt instance offsets */
+#define AUD0_REGISTER_OFFSET (0x1780 - 0x1780)
+#define AUD1_REGISTER_OFFSET (0x1786 - 0x1780)
+#define AUD2_REGISTER_OFFSET (0x178c - 0x1780)
+#define AUD3_REGISTER_OFFSET (0x1792 - 0x1780)
+#define AUD4_REGISTER_OFFSET (0x1798 - 0x1780)
+#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
+#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
-#define AMDGPU_MM_INDEX 0x0000
-#define AMDGPU_MM_DATA 0x0001
-
-#define VERDE_NUM_CRTC 6
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-#define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-#define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define EVERGREEN_DATA_FORMAT 0x1ac0
-# define EVERGREEN_INTERLEAVE_EN (1 << 0)
-
-#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0xf000
-#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0xc
-
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
-#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
-#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
-
-#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a45
-#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1845
-
-#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847
-#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47
-
-#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
-
-#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
-
-#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
-
-#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
-#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
-
-#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x1
-
-#define R600_D1GRPH_SWAP_CONTROL 0x1843
-#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
-
-#define AVIVO_D1VGA_CONTROL 0x00cc
-# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1 << 0)
-# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1 << 8)
-# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
-# define AVIVO_DVGA_CONTROL_ROTATE (1 << 24)
-#define AVIVO_D2VGA_CONTROL 0x00ce
-
-#define R600_BUS_CNTL 0x1508
-# define R600_BIOS_ROM_DIS (1 << 1)
+
#define R600_ROM_CNTL 0x580
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
-#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
-
-#define FMT_BIT_DEPTH_CONTROL 0x1bf2
-#define FMT_TRUNCATE_EN (1 << 0)
-#define FMT_TRUNCATE_DEPTH (1 << 4)
-#define FMT_SPATIAL_DITHER_EN (1 << 8)
-#define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
-#define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
-#define FMT_FRAME_RANDOM_ENABLE (1 << 13)
-#define FMT_RGB_RANDOM_ENABLE (1 << 14)
-#define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
-#define FMT_TEMPORAL_DITHER_EN (1 << 16)
-#define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
-#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
-#define FMT_TEMPORAL_LEVEL (1 << 24)
-#define FMT_TEMPORAL_DITHER_RESET (1 << 25)
-#define FMT_25FRC_SEL(x) ((x) << 26)
-#define FMT_50FRC_SEL(x) ((x) << 28)
-#define FMT_75FRC_SEL(x) ((x) << 30)
-
-#define EVERGREEN_DC_LUT_CONTROL 0x1a80
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x1a81
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x1a82
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x1a83
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x1a84
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x1a85
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x1a86
-#define EVERGREEN_DC_LUT_30_COLOR 0x1a7c
-#define EVERGREEN_DC_LUT_RW_INDEX 0x1a79
-#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x1a7e
-#define EVERGREEN_DC_LUT_RW_MODE 0x1a78
-
-#define EVERGREEN_GRPH_ENABLE 0x1a00
-#define EVERGREEN_GRPH_CONTROL 0x1a01
-#define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define EVERGREEN_GRPH_DEPTH_8BPP 0
-#define EVERGREEN_GRPH_DEPTH_16BPP 1
-#define EVERGREEN_GRPH_DEPTH_32BPP 2
-#define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define EVERGREEN_ADDR_SURF_2_BANK 0
-#define EVERGREEN_ADDR_SURF_4_BANK 1
-#define EVERGREEN_ADDR_SURF_8_BANK 2
-#define EVERGREEN_ADDR_SURF_16_BANK 3
-#define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
-#define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
-#define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
-
-#define EVERGREEN_GRPH_FORMAT_INDEXED 0
-#define EVERGREEN_GRPH_FORMAT_ARGB1555 0
-#define EVERGREEN_GRPH_FORMAT_ARGB565 1
-#define EVERGREEN_GRPH_FORMAT_ARGB4444 2
-#define EVERGREEN_GRPH_FORMAT_AI88 3
-#define EVERGREEN_GRPH_FORMAT_MONO16 4
-#define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+#define GRPH_ARRAY_LINEAR_GENERAL 0
+#define GRPH_ARRAY_LINEAR_ALIGNED 1
+#define GRPH_ARRAY_1D_TILED_THIN1 2
+#define GRPH_ARRAY_2D_TILED_THIN1 4
+
+#define ES_AND_GS_AUTO 3
+#define BUF_SWAP_32BIT (2 << 16)
+
+#define GRPH_DEPTH_8BPP 0
+#define GRPH_DEPTH_16BPP 1
+#define GRPH_DEPTH_32BPP 2
+
+/* 8 BPP */
+#define GRPH_FORMAT_INDEXED 0
+
+/* 16 BPP */
+#define GRPH_FORMAT_ARGB1555 0
+#define GRPH_FORMAT_ARGB565 1
+#define GRPH_FORMAT_ARGB4444 2
+#define GRPH_FORMAT_AI88 3
+#define GRPH_FORMAT_MONO16 4
+#define GRPH_FORMAT_BGRA5551 5
/* 32 BPP */
-#define EVERGREEN_GRPH_FORMAT_ARGB8888 0
-#define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
-#define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
-#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
-#define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
-#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
-#define EVERGREEN_GRPH_FORMAT_RGB111110 6
-#define EVERGREEN_GRPH_FORMAT_BGR101111 7
-#define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
-#define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
-#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-#define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
-#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
-#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
-#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-
-#define EVERGREEN_GRPH_SWAP_CONTROL 0x1a03
-#define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-# define EVERGREEN_GRPH_ENDIAN_NONE 0
-# define EVERGREEN_GRPH_ENDIAN_8IN16 1
-# define EVERGREEN_GRPH_ENDIAN_8IN32 2
-# define EVERGREEN_GRPH_ENDIAN_8IN64 3
-#define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-# define EVERGREEN_GRPH_RED_SEL_R 0
-# define EVERGREEN_GRPH_RED_SEL_G 1
-# define EVERGREEN_GRPH_RED_SEL_B 2
-# define EVERGREEN_GRPH_RED_SEL_A 3
-#define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-# define EVERGREEN_GRPH_GREEN_SEL_G 0
-# define EVERGREEN_GRPH_GREEN_SEL_B 1
-# define EVERGREEN_GRPH_GREEN_SEL_A 2
-# define EVERGREEN_GRPH_GREEN_SEL_R 3
-#define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-# define EVERGREEN_GRPH_BLUE_SEL_B 0
-# define EVERGREEN_GRPH_BLUE_SEL_A 1
-# define EVERGREEN_GRPH_BLUE_SEL_R 2
-# define EVERGREEN_GRPH_BLUE_SEL_G 3
-#define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-# define EVERGREEN_GRPH_ALPHA_SEL_A 0
-# define EVERGREEN_GRPH_ALPHA_SEL_R 1
-# define EVERGREEN_GRPH_ALPHA_SEL_G 2
-# define EVERGREEN_GRPH_ALPHA_SEL_B 3
-
-#define EVERGREEN_D3VGA_CONTROL 0xf8
-#define EVERGREEN_D4VGA_CONTROL 0xf9
-#define EVERGREEN_D5VGA_CONTROL 0xfa
-#define EVERGREEN_D6VGA_CONTROL 0xfb
-
-#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
-
-#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
-#define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
-
-#define EVERGREEN_GRPH_PITCH 0x1a06
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x1a09
-#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x1a0a
-#define EVERGREEN_GRPH_X_START 0x1a0b
-#define EVERGREEN_GRPH_Y_START 0x1a0c
-#define EVERGREEN_GRPH_X_END 0x1a0d
-#define EVERGREEN_GRPH_Y_END 0x1a0e
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_FLIP_CONTROL 0x1a12
-#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
-
-#define EVERGREEN_VIEWPORT_START 0x1b5c
-#define EVERGREEN_VIEWPORT_SIZE 0x1b5d
-#define EVERGREEN_DESKTOP_HEIGHT 0x1ac1
-
-/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
-#define EVERGREEN_CUR_CONTROL 0x1a66
-# define EVERGREEN_CURSOR_EN (1 << 0)
-# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
-# define EVERGREEN_CURSOR_MONO 0
-# define EVERGREEN_CURSOR_24_1 1
-# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
-# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
-# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
-# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
-# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
-# define EVERGREEN_CURSOR_URGENT_1_8 1
-# define EVERGREEN_CURSOR_URGENT_1_4 2
-# define EVERGREEN_CURSOR_URGENT_3_8 3
-# define EVERGREEN_CURSOR_URGENT_1_2 4
-#define EVERGREEN_CUR_SURFACE_ADDRESS 0x1a67
-# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
-#define EVERGREEN_CUR_SIZE 0x1a68
-#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x1a69
-#define EVERGREEN_CUR_POSITION 0x1a6a
-#define EVERGREEN_CUR_HOT_SPOT 0x1a6b
-#define EVERGREEN_CUR_COLOR1 0x1a6c
-#define EVERGREEN_CUR_COLOR2 0x1a6d
-#define EVERGREEN_CUR_UPDATE 0x1a6e
-# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
-# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
-# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
-# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define NI_INPUT_CSC_CONTROL 0x1a35
-# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_CSC_BYPASS 0
-# define NI_INPUT_CSC_PROG_COEFF 1
-# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
-# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_OUTPUT_CSC_CONTROL 0x1a3c
-# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
-# define NI_OUTPUT_CSC_BYPASS 0
-# define NI_OUTPUT_CSC_TV_RGB 1
-# define NI_OUTPUT_CSC_YCBCR_601 2
-# define NI_OUTPUT_CSC_YCBCR_709 3
-# define NI_OUTPUT_CSC_PROG_COEFF 4
-# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
-# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
-
-#define NI_DEGAMMA_CONTROL 0x1a58
-# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_DEGAMMA_BYPASS 0
-# define NI_DEGAMMA_SRGB_24 1
-# define NI_DEGAMMA_XVYCC_222 2
-# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
-# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
-
-#define NI_GAMUT_REMAP_CONTROL 0x1a59
-# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
-# define NI_GAMUT_REMAP_BYPASS 0
-# define NI_GAMUT_REMAP_PROG_COEFF 1
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
-# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_REGAMMA_CONTROL 0x1aa0
-# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
-# define NI_REGAMMA_BYPASS 0
-# define NI_REGAMMA_SRGB_24 1
-# define NI_REGAMMA_XVYCC_222 2
-# define NI_REGAMMA_PROG_A 3
-# define NI_REGAMMA_PROG_B 4
-# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
-
-
-#define NI_PRESCALE_GRPH_CONTROL 0x1a2d
-# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
-
-#define NI_PRESCALE_OVL_CONTROL 0x1a31
-# define NI_OVL_PRESCALE_BYPASS (1 << 4)
-
-#define NI_INPUT_GAMMA_CONTROL 0x1a10
-# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_GAMMA_USE_LUT 0
-# define NI_INPUT_GAMMA_BYPASS 1
-# define NI_INPUT_GAMMA_SRGB_24 2
-# define NI_INPUT_GAMMA_XVYCC_222 3
-# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
-
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-# define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
-
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
-
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
-
-#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
-#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
-#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
-#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
-
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
-
-#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8
-#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x3
-#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40
-#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x6
-#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x200
-#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
-#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x1000
-#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xc
-#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8000
-#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
-#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40000
-#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x12
+#define GRPH_FORMAT_ARGB8888 0
+#define GRPH_FORMAT_ARGB2101010 1
+#define GRPH_FORMAT_32BPP_DIG 2
+#define GRPH_FORMAT_8B_ARGB2101010 3
+#define GRPH_FORMAT_BGRA1010102 4
+#define GRPH_FORMAT_8B_BGRA1010102 5
+#define GRPH_FORMAT_RGB111110 6
+#define GRPH_FORMAT_BGR101111 7
+
+#define GRPH_ENDIAN_NONE 0
+#define GRPH_ENDIAN_8IN16 1
+#define GRPH_ENDIAN_8IN32 2
+#define GRPH_ENDIAN_8IN64 3
+#define GRPH_RED_SEL_R 0
+#define GRPH_RED_SEL_G 1
+#define GRPH_RED_SEL_B 2
+#define GRPH_RED_SEL_A 3
+
+#define GRPH_GREEN_SEL_G 0
+#define GRPH_GREEN_SEL_B 1
+#define GRPH_GREEN_SEL_A 2
+#define GRPH_GREEN_SEL_R 3
+
+#define GRPH_BLUE_SEL_B 0
+#define GRPH_BLUE_SEL_A 1
+#define GRPH_BLUE_SEL_R 2
+#define GRPH_BLUE_SEL_G 3
+
+#define GRPH_ALPHA_SEL_A 0
+#define GRPH_ALPHA_SEL_R 1
+#define GRPH_ALPHA_SEL_G 2
+#define GRPH_ALPHA_SEL_B 3
+
+/* CUR_CONTROL */
+ #define CURSOR_MONO 0
+ #define CURSOR_24_1 1
+ #define CURSOR_24_8_PRE_MULT 2
+ #define CURSOR_24_8_UNPRE_MULT 3
+ #define CURSOR_URGENT_ALWAYS 0
+ #define CURSOR_URGENT_1_8 1
+ #define CURSOR_URGENT_1_4 2
+ #define CURSOR_URGENT_3_8 3
+ #define CURSOR_URGENT_1_2 4
+
+/* INPUT_CSC_CONTROL */
+# define INPUT_CSC_BYPASS 0
+# define INPUT_CSC_PROG_COEFF 1
+# define INPUT_CSC_PROG_SHARED_MATRIXA 2
+
+/* OUTPUT_CSC_CONTROL */
+# define OUTPUT_CSC_BYPASS 0
+# define OUTPUT_CSC_TV_RGB 1
+# define OUTPUT_CSC_YCBCR_601 2
+# define OUTPUT_CSC_YCBCR_709 3
+# define OUTPUT_CSC_PROG_COEFF 4
+# define OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+
+/* DEGAMMA_CONTROL */
+# define DEGAMMA_BYPASS 0
+# define DEGAMMA_SRGB_24 1
+# define DEGAMMA_XVYCC_222 2
+
+/* GAMUT_REMAP_CONTROL */
+# define GAMUT_REMAP_BYPASS 0
+# define GAMUT_REMAP_PROG_COEFF 1
+# define GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+
+/* REGAMMA_CONTROL */
+# define REGAMMA_BYPASS 0
+# define REGAMMA_SRGB_24 1
+# define REGAMMA_XVYCC_222 2
+# define REGAMMA_PROG_A 3
+# define REGAMMA_PROG_B 4
+
+
+/* INPUT_GAMMA_CONTROL */
+# define INPUT_GAMMA_USE_LUT 0
+# define INPUT_GAMMA_BYPASS 1
+# define INPUT_GAMMA_SRGB_24 2
+# define INPUT_GAMMA_XVYCC_222 3
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -2426,28 +740,14 @@
#define MC_SEQ_MISC0__MT__HBM 0x60000000
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
-#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
#define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000
-#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
-#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-#define CONFIG_CNTL 0x1509
-#define CC_DRM_ID_STRAPS 0X1559
#define AMDGPU_PCIE_INDEX 0xc
#define AMDGPU_PCIE_DATA 0xd
-#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
-#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
-#define DMA_MODE 0x342f
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-#define DMA_BUSY_MASK 0x20
-#define DMA1_BUSY_MASK 0X40
-#define SDMA_MAX_INSTANCE 2
-
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
#define PCIE_PORT_INDEX 0xe
@@ -2457,8 +757,6 @@
#define EVERGREEN_PIF_PHY1_INDEX 0x10
#define EVERGREEN_PIF_PHY1_DATA 0x14
-#define MC_VM_FB_OFFSET 0x81a
-
/* Discrete VCE clocks */
#define CG_VCEPLL_FUNC_CNTL 0xc0030600
#define VCEPLL_RESET_MASK 0x00000001
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index dd2d66090d23..68aef47254a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -743,7 +743,7 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res)
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
@@ -752,9 +752,6 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev)
{
- struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus;
-
- i2c_del_adapter(control);
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a59b4c36cad7..42f5d9c0e3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -28,7 +28,6 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -103,12 +102,11 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -120,12 +118,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
@@ -138,10 +136,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -586,6 +584,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
* Enable triggering of GPU reset only if specified
* by module parameter.
*/
+ if (adev->pcie_reset_ctx.in_link_reset)
+ return AMD_RESET_METHOD_LINK;
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
else if (!(adev->flags & AMD_IS_APU))
@@ -605,12 +605,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{
/* Will reset for the following suspend abort cases.
- * 1) Only reset on APU side, dGPU hasn't checked yet.
- * 2) S3 suspend aborted in the normal S3 suspend or
- * performing pm core test.
+ * 1) S3 suspend aborted in the normal S3 suspend
+ * 2) S3 suspend aborted in performing pm core test.
*/
- if (adev->flags & AMD_IS_APU && adev->in_s3 &&
- !pm_resume_via_firmware())
+ if (adev->in_s3 && !pm_resume_via_firmware())
return true;
else
return false;
@@ -644,6 +642,9 @@ asic_reset:
case AMD_RESET_METHOD_MODE2:
dev_info(adev->dev, "MODE2 reset\n");
return amdgpu_dpm_mode2_reset(adev);
+ case AMD_RESET_METHOD_LINK:
+ dev_info(adev->dev, "Link reset\n");
+ return amdgpu_device_link_reset(adev);
default:
dev_info(adev->dev, "MODE1 reset\n");
return amdgpu_device_mode1_reset(adev);
@@ -740,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops;
-
/* init soc15 reg base early enough so we can
* request request full access for sriov before
* set_ip_blocks. */
@@ -853,10 +853,6 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
- /* CP hangs in IGT reloading test on RN, reset to WA */
- if (adev->asic_type == CHIP_RENOIR)
- return true;
-
if (amdgpu_gmc_need_reset_on_init(adev))
return true;
if (amdgpu_psp_tos_reload_needed(adev))
@@ -1217,6 +1213,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_JPEG;
/*TODO: need a new external_rev_id for GC 9.4.4? */
adev->external_rev_id = adev->rev_id + 0x46;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->external_rev_id = adev->rev_id + 0x50;
break;
default:
/* FIXME: not supported yet */
@@ -1363,7 +1361,7 @@ static int soc15_common_resume(struct amdgpu_ip_block *ip_block)
return soc15_common_hw_init(ip_block);
}
-static bool soc15_common_is_idle(void *handle)
+static bool soc15_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -1464,9 +1462,9 @@ static int soc15_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
+static void soc15_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index ef7c603b50ae..c8ac11a9cdef 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -118,7 +118,6 @@ int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
int aldebaran_reg_base_init(struct amdgpu_device *adev);
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev);
u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index b9cbeb389edc..cf93fa477674 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -93,11 +93,25 @@
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_ATOMIC_GDS 0x1D
#define PACKET3_ATOMIC_MEM 0x1E
+#define PACKET3_ATOMIC_MEM__ATOMIC(x) ((((unsigned)(x)) & 0x3F) << 0)
+#define PACKET3_ATOMIC_MEM__COMMAND(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_ATOMIC_MEM__ADDR_LO(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__ADDR_HI(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__SRC_DATA_LO(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__SRC_DATA_HI(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__CMP_DATA_LO(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__CMP_DATA_HI(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__LOOP_INTERVAL(x) ((((unsigned)(x)) & 0x1FFF) << 0)
+#define PACKET3_ATOMIC_MEM__COMMAND__SINGLE_PASS_ATOMIC 0
+#define PACKET3_ATOMIC_MEM__COMMAND__LOOP_UNTIL_COMPARE_SATISFIED 1
#define PACKET3_OCCLUSION_QUERY 0x1F
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_PRED_EXEC__EXEC_COUNT(x) ((((unsigned)(x)) & 0x3FFF) << 0)
+#define PACKET3_PRED_EXEC__VIRTUAL_XCC_ID_SELECT(x) ((((unsigned)(x)) & 0xFF) << 24)
#define PACKET3_DRAW_INDIRECT 0x24
#define PACKET3_DRAW_INDEX_INDIRECT 0x25
#define PACKET3_INDEX_BASE 0x26
@@ -132,6 +146,28 @@
* 1 - pfp
* 2 - ce
*/
+#define PACKET3_WRITE_DATA__DST_SEL(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_WRITE_DATA__ADDR_INCR(x) ((((unsigned)(x)) & 0x1) << 16)
+#define PACKET3_WRITE_DATA__RESUME_VF_MI300(x) ((((unsigned)(x)) & 0x1) << 19)
+#define PACKET3_WRITE_DATA__WR_CONFIRM(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_WRITE_DATA__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WRITE_DATA__DST_MMREG_ADDR(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WRITE_DATA__DST_GDS_ADDR(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_WRITE_DATA__DST_MEM_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_WRITE_DATA__DST_MEM_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_WRITE_DATA__DST_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_WRITE_DATA__DST_SEL__TC_L2 2
+#define PACKET3_WRITE_DATA__DST_SEL__GDS 3
+#define PACKET3_WRITE_DATA__DST_SEL__MEMORY 5
+#define PACKET3_WRITE_DATA__DST_SEL__MEMORY_MAPPED_ADC_PERSISTENT_STATE 6
+#define PACKET3_WRITE_DATA__ADDR_INCR__INCREMENT_ADDRESS 0
+#define PACKET3_WRITE_DATA__ADDR_INCR__DO_NOT_INCREMENT_ADDRESS 1
+#define PACKET3_WRITE_DATA__WR_CONFIRM__DO_NOT_WAIT_FOR_WRITE_CONFIRMATION 0
+#define PACKET3_WRITE_DATA__WR_CONFIRM__WAIT_FOR_WRITE_CONFIRMATION 1
+#define PACKET3_WRITE_DATA__CACHE_POLICY__LRU 0
+#define PACKET3_WRITE_DATA__CACHE_POLICY__STREAM 1
+#define PACKET3_WRITE_DATA__CACHE_POLICY__NOA 2
+#define PACKET3_WRITE_DATA__CACHE_POLICY__BYPASS 3
#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
# define PACKET3_SEM_USE_MAILBOX (0x1 << 16)
@@ -160,6 +196,33 @@
/* 0 - me
* 1 - pfp
*/
+#define PACKET3_WAIT_REG_MEM__FUNCTION(x) ((((unsigned)(x)) & 0x7) << 0)
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE(x) ((((unsigned)(x)) & 0x3) << 4)
+#define PACKET3_WAIT_REG_MEM__OPERATION(x) ((((unsigned)(x)) & 0x3) << 6)
+#define PACKET3_WAIT_REG_MEM__MES_INTR_PIPE(x) ((((unsigned)(x)) & 0x3) << 22)
+#define PACKET3_WAIT_REG_MEM__MES_ACTION(x) ((((unsigned)(x)) & 0x1) << 24)
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WAIT_REG_MEM__MEM_POLL_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_WAIT_REG_MEM__REG_POLL_ADDR(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__REG_WRITE_ADDR1(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__MEM_POLL_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__REG_WRITE_ADDR2(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__REFERENCE(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__MASK(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__POLL_INTERVAL(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__OPTIMIZE_ACE_OFFLOAD_MODE(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_WAIT_REG_MEM__FUNCTION__ALWAYS_PASS 0
+#define PACKET3_WAIT_REG_MEM__FUNCTION__LESS_THAN_REF_VALUE 1
+#define PACKET3_WAIT_REG_MEM__FUNCTION__LESS_THAN_EQUAL_TO_THE_REF_VALUE 2
+#define PACKET3_WAIT_REG_MEM__FUNCTION__EQUAL_TO_THE_REFERENCE_VALUE 3
+#define PACKET3_WAIT_REG_MEM__FUNCTION__NOT_EQUAL_REFERENCE_VALUE 4
+#define PACKET3_WAIT_REG_MEM__FUNCTION__GREATER_THAN_OR_EQUAL_REFERENCE_VALUE 5
+#define PACKET3_WAIT_REG_MEM__FUNCTION__GREATER_THAN_REFERENCE_VALUE 6
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE__REGISTER_SPACE 0
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE__MEMORY_SPACE 1
+#define PACKET3_WAIT_REG_MEM__OPERATION__WAIT_REG_MEM 0
+#define PACKET3_WAIT_REG_MEM__OPERATION__WR_WAIT_WR_REG 1
+#define PACKET3_WAIT_REG_MEM__OPERATION__WAIT_MEM_PREEMPTABLE 3
#define PACKET3_INDIRECT_BUFFER 0x3F
#define INDIRECT_BUFFER_VALID (1 << 23)
#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
@@ -169,7 +232,63 @@
*/
#define INDIRECT_BUFFER_PRE_ENB(x) ((x) << 21)
#define INDIRECT_BUFFER_PRE_RESUME(x) ((x) << 30)
+#define PACKET3_INDIRECT_BUFFER__IB_BASE_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_INDIRECT_BUFFER__IB_BASE_HI(x) ((unsigned)(x))
+#define PACKET3_INDIRECT_BUFFER__IB_SIZE(x) ((((unsigned)(x)) & 0xFFFFF) << 0)
+#define PACKET3_INDIRECT_BUFFER__CHAIN(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_INDIRECT_BUFFER__OFFLOAD_POLLING(x) ((((unsigned)(x)) & 0x1) << 21)
+#define PACKET3_INDIRECT_BUFFER__VALID(x) ((((unsigned)(x)) & 0x1) << 23)
+#define PACKET3_INDIRECT_BUFFER__VMID(x) ((((unsigned)(x)) & 0xF) << 24)
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 28)
+#define PACKET3_INDIRECT_BUFFER__PRIV(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__LRU 0
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__STREAM 1
#define PACKET3_COPY_DATA 0x40
+#define PACKET3_COPY_DATA__SRC_SEL(x) ((((unsigned)(x)) & 0xF) << 0)
+#define PACKET3_COPY_DATA__DST_SEL(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 13)
+#define PACKET3_COPY_DATA__COUNT_SEL(x) ((((unsigned)(x)) & 0x1) << 16)
+#define PACKET3_COPY_DATA__WR_CONFIRM(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS(x) ((((unsigned)(x)) & 0x1) << 29)
+#define PACKET3_COPY_DATA__SRC_REG_OFFSET(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_COPY_DATA__SRC_32B_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_COPY_DATA__SRC_64B_ADDR_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_COPY_DATA__SRC_GDS_ADDR_LO(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_COPY_DATA__IMM_DATA(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_MEMTC_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_IMM_DATA(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__DST_REG_OFFSET(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_COPY_DATA__DST_32B_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_COPY_DATA__DST_64B_ADDR_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_COPY_DATA__DST_GDS_ADDR_LO(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_COPY_DATA__DST_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_COPY_DATA__SRC_SEL__MEMORY 1
+#define PACKET3_COPY_DATA__SRC_SEL__TC_L2 2
+#define PACKET3_COPY_DATA__SRC_SEL__GDS 3
+#define PACKET3_COPY_DATA__SRC_SEL__PERFCOUNTERS 4
+#define PACKET3_COPY_DATA__SRC_SEL__IMMEDIATE_DATA 5
+#define PACKET3_COPY_DATA__SRC_SEL__ATOMIC_RETURN_DATA 6
+#define PACKET3_COPY_DATA__SRC_SEL__GDS_ATOMIC_RETURN_DATA0 7
+#define PACKET3_COPY_DATA__SRC_SEL__GDS_ATOMIC_RETURN_DATA1 8
+#define PACKET3_COPY_DATA__SRC_SEL__GPU_CLOCK_COUNT 9
+#define PACKET3_COPY_DATA__DST_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_COPY_DATA__DST_SEL__TC_L2 2
+#define PACKET3_COPY_DATA__DST_SEL__GDS 3
+#define PACKET3_COPY_DATA__DST_SEL__PERFCOUNTERS 4
+#define PACKET3_COPY_DATA__DST_SEL__MEMORY 5
+#define PACKET3_COPY_DATA__DST_SEL__MEM_MAPPED_REG_DC 6
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__LRU 0
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__STREAM 1
+#define PACKET3_COPY_DATA__COUNT_SEL__32_BITS_OF_DATA 0
+#define PACKET3_COPY_DATA__COUNT_SEL__64_BITS_OF_DATA 1
+#define PACKET3_COPY_DATA__WR_CONFIRM__DO_NOT_WAIT_FOR_CONFIRMATION 0
+#define PACKET3_COPY_DATA__WR_CONFIRM__WAIT_FOR_CONFIRMATION 1
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__LRU 0
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__STREAM 1
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS__DEFAULT 0
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS__PHASE_UPDATE 1
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
@@ -181,6 +300,15 @@
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
*/
+#define PACKET3_EVENT_WRITE__EVENT_TYPE(x) ((((unsigned)(x)) & 0x3F) << 0)
+#define PACKET3_EVENT_WRITE__EVENT_INDEX(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_EVENT_WRITE__OFFLOAD_ENABLE(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE(x) ((((unsigned)(x)) & 0x3) << 29)
+#define PACKET3_EVENT_WRITE__ADDRESS_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_EVENT_WRITE__ADDRESS_HI(x) (((unsigned)(x)) << 0)
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__OTHER 0
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_PIPELINESTATS 2
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__CS_PARTIAL_FLUSH 4
#define PACKET3_RELEASE_MEM 0x49
#define EVENT_TYPE(x) ((x) << 0)
#define EVENT_INDEX(x) ((x) << 8)
@@ -286,6 +414,13 @@
#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(x) ((x) << 29)
#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_WB_ACTION_ENA(x) ((x) << 30)
#define PACKET3_REWIND 0x59
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE(x) ((unsigned)(x))
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE_HI_VG10(x) ((((unsigned)(x)) & 0xFFFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__COHER_BASE_LO(x) ((unsigned)(x))
+#define PACKET3_ACQUIRE_MEM__COHER_BASE_HI(x) ((((unsigned)(x)) & 0xFFFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__POLL_INTERVAL(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__GCR_CNTL(x) ((((unsigned)(x)) & 0x7FF) << 0)
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
#define PACKET3_LOAD_CONFIG_REG 0x60
@@ -300,12 +435,16 @@
#define PACKET3_SET_SH_REG 0x76
#define PACKET3_SET_SH_REG_START 0x00002c00
#define PACKET3_SET_SH_REG_END 0x00003000
+#define PACKET3_SET_SH_REG__REG_OFFSET(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_SET_SH_REG__VMID_SHIFT(x) ((((unsigned)(x)) & 0x1F) << 23)
+#define PACKET3_SET_SH_REG__INDEX(x) ((((unsigned)(x)) & 0xF) << 28)
#define PACKET3_SET_SH_REG_OFFSET 0x77
#define PACKET3_SET_QUEUE_REG 0x78
#define PACKET3_SET_UCONFIG_REG 0x79
#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
#define PACKET3_SET_UCONFIG_REG_INDEX_TYPE (2 << 28)
+#define PACKET3_SET_UCONFIG_REG__REG_OFFSET(x) ((((unsigned)(x)) & 0xFFFF) << 0)
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
#define PACKET3_SCRATCH_RAM_READ 0x7E
#define PACKET3_LOAD_CONST_RAM 0x80
@@ -413,6 +552,11 @@
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+#define PACKET3_RUN_CLEANER_SHADER_9_0 0xD7
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
+
#define PACKET3_RUN_CLEANER_SHADER 0xD2
/* 1. header
* 2. RESERVED [31:0]
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 62ad67d0b598..ad36c96478a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -117,23 +117,17 @@ static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -390,6 +384,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
return AMD_RESET_METHOD_MODE2;
default:
if (amdgpu_dpm_is_baco_supported(adev))
@@ -781,6 +776,34 @@ static int soc21_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_GFX_PG;
adev->external_rev_id = adev->rev_id + 0x40;
break;
+ case IP_VERSION(11, 5, 3):
+ adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_JPEG_DPG |
+ AMD_PG_SUPPORT_JPEG |
+ AMD_PG_SUPPORT_GFX_PG;
+ adev->external_rev_id = adev->rev_id + 0x50;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -923,7 +946,7 @@ static int soc21_common_resume(struct amdgpu_ip_block *ip_block)
return soc21_common_hw_init(ip_block);
}
-static bool soc21_common_is_idle(void *handle)
+static bool soc21_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -940,6 +963,7 @@ static int soc21_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
case IP_VERSION(7, 7, 1):
case IP_VERSION(7, 11, 0):
case IP_VERSION(7, 11, 1):
+ case IP_VERSION(7, 11, 2):
case IP_VERSION(7, 11, 3):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -972,9 +996,9 @@ static int soc21_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void soc21_common_get_clockgating_state(void *handle, u64 *flags)
+static void soc21_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->get_clockgating_state(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index 6b8e078ee7c7..972b449ab89f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -26,7 +26,6 @@
#include <linux/pci.h>
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -532,7 +531,7 @@ static int soc24_common_resume(struct amdgpu_ip_block *ip_block)
return soc24_common_hw_init(ip_block);
}
-static bool soc24_common_is_idle(void *handle)
+static bool soc24_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -575,9 +574,9 @@ static int soc24_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void soc24_common_get_clockgating_state(void *handle, u64 *flags)
+static void soc24_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->get_clockgating_state(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 64891f099366..8a3f326474e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -92,6 +92,9 @@ enum ta_ras_block {
TA_RAS_BLOCK__MCA,
TA_RAS_BLOCK__VCN,
TA_RAS_BLOCK__JPEG,
+ TA_RAS_BLOCK__IH,
+ TA_RAS_BLOCK__MPIO,
+ TA_RAS_BLOCK__MMSCH,
TA_NUM_BLOCK_MAX
};
@@ -151,6 +154,7 @@ struct ta_ras_init_flags {
uint16_t xcc_mask;
uint8_t channel_dis_num;
uint8_t nps_mode;
+ uint32_t active_umc_mask;
};
struct ta_ras_mca_addr {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 0968e551f7b5..ee8038df17e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -159,6 +159,9 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
tonga_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -196,6 +199,9 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -306,6 +312,10 @@ static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
@@ -353,9 +363,9 @@ static int tonga_ih_resume(struct amdgpu_ip_block *ip_block)
return tonga_ih_hw_init(ip_block);
}
-static bool tonga_ih_is_idle(void *handle)
+static bool tonga_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index a7b9c358a2d4..0f5b1719fda5 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -85,7 +85,8 @@ bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_sta
return (amdgpu_ras_is_poison_mode_supported(adev) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1));
+ ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) ||
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison) == 1)));
}
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
@@ -173,19 +174,76 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
+static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ uint32_t vram_type = adev->gmc.vram_type;
+ struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits);
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == AMDGPU_NPS4_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case AMDGPU_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case AMDGPU_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ dev_warn(adev->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+
+ adev->umc.retire_unit = 0x1 << flip_bits->bit_num;
+}
+
static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
struct ta_ras_query_address_input *addr_in,
struct ta_ras_query_address_output *addr_out,
bool dump_addr)
{
- uint32_t col, col_lower, row, row_lower, bank;
+ uint32_t col, col_lower, row, row_lower, row_high, bank;
uint32_t channel_index = 0, umc_inst = 0;
- uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
+ uint32_t i, bit_num, retire_unit, *flip_bits;
uint64_t soc_pa, column, err_addr;
struct ta_ras_query_address_output addr_out_tmp;
struct ta_ras_query_address_output *paddr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
if (!addr_out)
@@ -210,46 +268,46 @@ static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
umc_inst = addr_in->ma.umc_inst;
}
- loop_bits[0] = UMC_V12_0_PA_C2_BIT;
- loop_bits[1] = UMC_V12_0_PA_C3_BIT;
- loop_bits[2] = UMC_V12_0_PA_C4_BIT;
- loop_bits[3] = UMC_V12_0_PA_R13_BIT;
-
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
-
- /* other nps modes are taken as nps1 */
- if (nps == AMDGPU_NPS4_PARTITION_MODE) {
- loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
- loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
- loop_bits[2] = UMC_V12_0_PA_B0_BIT;
- loop_bits[3] = UMC_V12_0_PA_R11_BIT;
- }
+ flip_bits = adev->umc.flip_bits.flip_bits_in_pa;
+ bit_num = adev->umc.flip_bits.bit_num;
+ retire_unit = adev->umc.retire_unit;
soc_pa = paddr_out->pa.pa;
channel_index = paddr_out->pa.channel_idx;
/* clear loop bits in soc physical address */
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa &= ~BIT_ULL(loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa &= ~BIT_ULL(flip_bits[i]);
paddr_out->pa.pa = soc_pa;
/* get column bit 0 and 1 in mca address */
col_lower = (err_addr >> 1) & 0x3ULL;
- /* MA_R13_BIT will be handled later */
+ /* extra row bit will be handled later */
row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit);
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) {
+ row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
if (!err_data && !dump_addr)
goto out;
/* loop for all possibilities of retired bits */
- for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
+ for (column = 0; column < retire_unit; column++) {
soc_pa = paddr_out->pa.pa;
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]);
col = ((column & 0x7) << 2) | col_lower;
- /* add row bit 13 */
- row = ((column >> 3) << 13) | row_lower;
+ /* handle extra row bit */
+ if (bit_num == RETIRE_FLIP_BITS_NUM)
+ row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) |
+ row_lower;
if (dump_addr)
dev_info(adev->dev,
@@ -415,6 +473,7 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank
err_type = ACA_ERROR_TYPE_CE;
else
return 0;
+ bank->aca_err_type = err_type;
ret = aca_bank_info_decode(bank, &info);
if (ret)
@@ -426,8 +485,12 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank
bank->regs[ACA_REG_IDX_ADDR]);
ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
- count = ext_error_code == 0 ?
- ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
+ if (umc_v12_0_is_deferred_error(adev, status))
+ count = ext_error_code == 0 ?
+ adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL;
+ else
+ count = ext_error_code == 0 ?
+ ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
@@ -467,15 +530,17 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
struct ta_ras_query_address_output addr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
- uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
+ uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2];
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
- if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ /* The IP block decode of consumption is SMU */
+ if (hwid != MCA_UMC_HWID_V12_0 || mcatype != MCA_UMC_MCATYPE_V12_0) {
+ con->umc_ecc_log.consumption_q_count++;
return 0;
+ }
if (!status)
return 0;
@@ -513,11 +578,6 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
ecc_err->channel_idx = addr_out.pa.channel_idx;
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
- if (nps == AMDGPU_NPS4_PARTITION_MODE)
- shift_bit = UMC_V12_0_PA_B0_BIT;
-
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
@@ -651,6 +711,19 @@ static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev,
return die;
}
+static void umc_v12_0_mca_ipid_parse(struct amdgpu_device *adev, uint64_t ipid,
+ uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid)
+{
+ if (did)
+ *did = MCA_IPID_2_DIE_ID(ipid);
+ if (ch)
+ *ch = MCA_IPID_2_UMC_CH(ipid);
+ if (umc_inst)
+ *umc_inst = MCA_IPID_2_UMC_INST(ipid);
+ if (sid)
+ *sid = MCA_IPID_2_SOCKET_ID(ipid);
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -663,5 +736,7 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.update_ecc_status = umc_v12_0_update_ecc_status,
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
+ .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
+ .mca_ipid_parse = umc_v12_0_mca_ipid_parse,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index 9298018d938f..63b7e7254526 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,8 +55,6 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
-/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
-#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
@@ -64,13 +62,16 @@
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
#define UMC_V12_0_PA_R0_BIT 22
+#define UMC_V12_0_PA_R10_BIT 32
#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R12_BIT 34
#define UMC_V12_0_PA_R13_BIT 35
/* channel bit in SOC physical address */
#define UMC_V12_0_PA_CH4_BIT 12
#define UMC_V12_0_PA_CH5_BIT 13
/* bank bit in SOC physical address */
#define UMC_V12_0_PA_B0_BIT 19
+#define UMC_V12_0_PA_B1_BIT 20
/* row bits in MCA address */
#define UMC_V12_0_MA_R0_BIT 10
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 5830e799c0a3..2e79a3afc774 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -98,7 +98,7 @@ static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
- * uvd_v3_1_ring_emit_fence - emit an fence & trap command
+ * uvd_v3_1_ring_emit_fence - emit a fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
@@ -242,7 +242,7 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
uint64_t addr;
uint32_t size;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
@@ -416,7 +416,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
/* Set the write pointer delay */
WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
- /* programm the 4GB memory segment for rptr and ring buffer */
+ /* Program the 4GB memory segment for rptr and ring buffer */
WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
(0x7 << 16) | (0x1 << 31));
@@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
*
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
- * Initialize the hardware, boot up the VCPU and do some testing
+ * Initialize the hardware, boot up the VCPU and do some testing.
+ *
+ * On SI, the UVD is meant to be used in a specific power state,
+ * or alternatively the driver can manually enable its clock.
+ * In amdgpu we use the dedicated UVD power state when DPM is enabled.
+ * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state
+ * for the SMU and afterwards enables the UVD clock.
+ * This is automatically done by amdgpu_uvd_ring_begin_use when work
+ * is submitted to the UVD ring. Here, we have to call it manually
+ * in order to power up UVD before firmware validation.
+ *
+ * Note that we must not disable the UVD clock here, as that would
+ * cause the ring test to fail. However, UVD is powered off
+ * automatically after the ring test: amdgpu_uvd_ring_end_use calls
+ * the UVD idle work handler which will disable the UVD clock when
+ * all fences are signalled.
*/
static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{
@@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
int r;
uvd_v3_1_mc_resume(adev);
+ uvd_v3_1_enable_mgcg(adev, true);
+
+ /* Make sure UVD is powered during FW validation.
+ * It's going to be automatically powered off after the ring test.
+ */
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
r = uvd_v3_1_fw_validate(adev);
if (r) {
@@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- uvd_v3_1_enable_mgcg(adev, true);
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
uvd_v3_1_start(adev);
r = amdgpu_ring_test_helper(ring);
@@ -758,9 +779,9 @@ static int uvd_v3_1_resume(struct amdgpu_ip_block *ip_block)
return uvd_v3_1_hw_init(ip_block);
}
-static bool uvd_v3_1_is_idle(void *handle)
+static bool uvd_v3_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f93079e09215..4b96fd583772 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -302,7 +302,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* enable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
- /* disable interupt */
+ /* disable interrupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
#ifdef __BIG_ENDIAN
@@ -312,6 +312,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
#endif
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+
/* initialize UVD memory controller */
WREG32(mmUVD_LMI_CTRL, 0x203108);
@@ -658,9 +659,9 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
}
-static bool uvd_v4_2_is_idle(void *handle)
+static bool uvd_v4_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 050a0f309390..71409ad8b7ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -580,9 +580,9 @@ static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool uvd_v5_0_is_idle(void *handle)
+static bool uvd_v5_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
@@ -837,9 +837,9 @@ out:
return ret;
}
-static void uvd_v5_0_get_clockgating_state(void *handle, u64 *flags)
+static void uvd_v5_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9d036ee51fb..ceb94bbb03a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -1143,9 +1145,9 @@ static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, vmid);
}
-static bool uvd_v6_0_is_idle(void *handle)
+static bool uvd_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
@@ -1156,7 +1158,7 @@ static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (uvd_v6_0_is_idle(adev))
+ if (uvd_v6_0_is_idle(ip_block))
return 0;
}
return -ETIMEDOUT;
@@ -1498,9 +1500,9 @@ out:
return ret;
}
-static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags)
+static void uvd_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 9d237b5937fb..1f8866f3f63c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
new file mode 100644
index 000000000000..9ae424618556
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
@@ -0,0 +1,839 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * Copyright 2025 Valve Corporation
+ * Copyright 2025 Alexandre Demers
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ * Timur Kristóf <timur.kristof@gmail.com>
+ * Alexandre Demers <alexandre.f.demers@gmail.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vce.h"
+#include "amdgpu_gart.h"
+#include "sid.h"
+#include "vce_v1_0.h"
+#include "vce/vce_1_0_d.h"
+#include "vce/vce_1_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+
+#define VCE_V1_0_FW_SIZE (256 * 1024)
+#define VCE_V1_0_STACK_SIZE (64 * 1024)
+#define VCE_V1_0_DATA_SIZE (7808 * (AMDGPU_MAX_VCE_HANDLES + 1))
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
+
+#define VCE_V1_0_GART_PAGE_START \
+ (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS)
+#define VCE_V1_0_GART_ADDR_START \
+ (VCE_V1_0_GART_PAGE_START * AMDGPU_GPU_PAGE_SIZE)
+
+static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev);
+static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+
+struct vce_v1_0_fw_signature {
+ int32_t offset;
+ uint32_t length;
+ int32_t number;
+ struct {
+ uint32_t chip_id;
+ uint32_t keyselect;
+ uint32_t nonce[4];
+ uint32_t sigval[4];
+ } val[8];
+};
+
+/**
+ * vce_v1_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vce_v1_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ return RREG32(mmVCE_RB_RPTR);
+ else
+ return RREG32(mmVCE_RB_RPTR2);
+}
+
+/**
+ * vce_v1_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vce_v1_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ return RREG32(mmVCE_RB_WPTR);
+ else
+ return RREG32(mmVCE_RB_WPTR2);
+}
+
+/**
+ * vce_v1_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vce_v1_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ else
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+}
+
+static int vce_v1_0_lmi_clean(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ if (RREG32(mmVCE_LMI_STATUS) & 0x337f)
+ return 0;
+
+ mdelay(10);
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int vce_v1_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ if (RREG32(mmVCE_STATUS) & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ dev_err(adev->dev, "VCE not responding, trying to reset the ECPU\n");
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void vce_v1_0_init_cg(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0x1e;
+ tmp &= ~0xe100e1;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0xff9ff000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+}
+
+/**
+ * vce_v1_0_load_fw_signature - load firmware signature into VCPU BO
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The VCE1 firmware validation mechanism needs a firmware signature.
+ * This function finds the signature appropriate for the current
+ * ASIC and writes that into the VCPU BO.
+ */
+static int vce_v1_0_load_fw_signature(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *hdr;
+ struct vce_v1_0_fw_signature *sign;
+ unsigned int ucode_offset;
+ uint32_t chip_id;
+ u32 *cpu_addr;
+ int i;
+
+ hdr = (const struct common_firmware_header *)adev->vce.fw->data;
+ ucode_offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ cpu_addr = adev->vce.cpu_addr;
+
+ sign = (void *)adev->vce.fw->data + ucode_offset;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ default:
+ dev_err(adev->dev, "asic_type %#010x was not found!", adev->asic_type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < le32_to_cpu(sign->number); ++i) {
+ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
+ break;
+ }
+
+ if (i == le32_to_cpu(sign->number)) {
+ dev_err(adev->dev, "chip_id 0x%x for %s was not found in VCE firmware",
+ chip_id, amdgpu_asic_name[adev->asic_type]);
+ return -EINVAL;
+ }
+
+ cpu_addr += (256 - 64) / 4;
+ memcpy_toio(&cpu_addr[0], &sign->val[i].nonce[0], 16);
+ cpu_addr[4] = cpu_to_le32(le32_to_cpu(sign->length) + 64);
+
+ memset_io(&cpu_addr[5], 0, 44);
+ memcpy_toio(&cpu_addr[16], &sign[1], hdr->ucode_size_bytes - sizeof(*sign));
+
+ cpu_addr += (le32_to_cpu(sign->length) + 64) / 4;
+ memcpy_toio(&cpu_addr[0], &sign->val[i].sigval[0], 16);
+
+ adev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
+
+ return 0;
+}
+
+static int vce_v1_0_wait_for_fw_validation(struct amdgpu_device *adev)
+{
+ int i;
+
+ dev_dbg(adev->dev, "VCE keyselect: %d", adev->vce.keyselect);
+ WREG32(mmVCE_LMI_FW_START_KEYSEL, adev->vce.keyselect);
+
+ for (i = 0; i < 10; ++i) {
+ mdelay(10);
+ if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)
+ break;
+ }
+
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)) {
+ dev_err(adev->dev, "VCE FW validation timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__PASS_MASK)) {
+ dev_err(adev->dev, "VCE FW validation failed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 10; ++i) {
+ mdelay(10);
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK))
+ break;
+ }
+
+ if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK) {
+ dev_err(adev->dev, "VCE FW busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_mc_resume(struct amdgpu_device *adev)
+{
+ uint32_t offset;
+ uint32_t size;
+
+ /*
+ * When the keyselect is already set, don't perturb VCE FW.
+ * Validation seems to always fail the second time.
+ */
+ if (RREG32(mmVCE_LMI_FW_START_KEYSEL)) {
+ dev_dbg(adev->dev, "keyselect already set: 0x%x (on CPU: 0x%x)\n",
+ RREG32(mmVCE_LMI_FW_START_KEYSEL), adev->vce.keyselect);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+ return 0;
+ }
+
+ WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+ WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+ WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+ WREG32(mmVCE_CLOCK_GATING_B, 0);
+
+ WREG32_P(mmVCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4);
+
+ WREG32(mmVCE_LMI_CTRL, 0x00398000);
+
+ WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+ WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+ WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+ WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+ WREG32(mmVCE_VCPU_SCRATCH7, AMDGPU_MAX_VCE_HANDLES);
+
+ offset = adev->vce.gpu_addr + AMDGPU_VCE_FIRMWARE_OFFSET;
+ size = VCE_V1_0_FW_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+ offset += size;
+ size = VCE_V1_0_STACK_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+ offset += size;
+ size = VCE_V1_0_DATA_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+
+ return vce_v1_0_wait_for_fw_validation(adev);
+}
+
+/**
+ * vce_v1_0_is_idle() - Check idle status of VCE1 IP block
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ *
+ * Check whether VCE is busy according to VCE_STATUS.
+ * Also check whether the SRBM thinks VCE is busy, although
+ * SRBM_STATUS.VCE_BUSY seems to be bogus because it
+ * appears to mirror the VCE_STATUS.VCPU_REPORT_FW_LOADED bit.
+ */
+static bool vce_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool busy =
+ (RREG32(mmVCE_STATUS) & (VCE_STATUS__JOB_BUSY_MASK | VCE_STATUS__UENC_BUSY_MASK)) ||
+ (RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+
+ return !busy;
+}
+
+static int vce_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ unsigned int i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ udelay(1);
+ if (vce_v1_0_is_idle(ip_block))
+ return 0;
+ }
+ return -ETIMEDOUT;
+}
+
+/**
+ * vce_v1_0_start - start VCE block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the VCE block
+ */
+static int vce_v1_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ WREG32_P(mmVCE_STATUS, 1, ~1);
+
+ r = vce_v1_0_mc_resume(adev);
+ if (r)
+ return r;
+
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO2, lower_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
+ ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ mdelay(100);
+
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ r = vce_v1_0_firmware_loaded(adev);
+
+ /* Clear VCE_STATUS, otherwise SRBM thinks VCE1 is busy. */
+ WREG32(mmVCE_STATUS, 0);
+
+ if (r) {
+ dev_err(adev->dev, "VCE not responding, giving up\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ip_block *ip_block;
+ int status;
+ int i;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
+ if (!ip_block)
+ return -EINVAL;
+
+ if (vce_v1_0_lmi_clean(adev))
+ dev_warn(adev->dev, "VCE not idle\n");
+
+ if (vce_v1_0_wait_for_idle(ip_block))
+ dev_warn(adev->dev, "VCE busy: VCE_STATUS=0x%x, SRBM_STATUS2=0x%x\n",
+ RREG32(mmVCE_STATUS), RREG32(mmSRBM_STATUS2));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
+
+ for (i = 0; i < 100; ++i) {
+ status = RREG32(mmVCE_LMI_STATUS);
+ if (status & 0x240)
+ break;
+ mdelay(1);
+ }
+
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ WREG32(mmVCE_STATUS, 0);
+
+ return 0;
+}
+
+static void vce_v1_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0x1ff000;
+ tmp |= 0xff800000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ } else {
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp &= ~VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp |= 0x1ff000;
+ tmp &= ~0xff800000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp |= 0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ }
+}
+
+static int vce_v1_0_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
+
+ adev->vce.num_rings = 2;
+
+ vce_v1_0_set_ring_funcs(adev);
+ vce_v1_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * vce_v1_0_ensure_vcpu_bo_32bit_addr() - ensure the VCPU BO has a 32-bit address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Due to various hardware limitations, the VCE1 requires
+ * the VCPU BO to be in the low 32 bit address range.
+ * Ensure that the VCPU BO has a 32-bit GPU address,
+ * or return an error code when that isn't possible.
+ *
+ * To accomodate that, we put GART to the LOW address range
+ * and reserve some GART pages where we map the VCPU BO,
+ * so that it gets a 32-bit address.
+ */
+static int vce_v1_0_ensure_vcpu_bo_32bit_addr(struct amdgpu_device *adev)
+{
+ u64 gpu_addr = amdgpu_bo_gpu_offset(adev->vce.vcpu_bo);
+ u64 bo_size = amdgpu_bo_size(adev->vce.vcpu_bo);
+ u64 max_vcpu_bo_addr = 0xffffffff - bo_size;
+ u64 num_pages = ALIGN(bo_size, AMDGPU_GPU_PAGE_SIZE) / AMDGPU_GPU_PAGE_SIZE;
+ u64 pa = amdgpu_gmc_vram_pa(adev, adev->vce.vcpu_bo);
+ u64 flags = AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_VALID;
+
+ /*
+ * Check if the VCPU BO already has a 32-bit address.
+ * Eg. if MC is configured to put VRAM in the low address range.
+ */
+ if (gpu_addr <= max_vcpu_bo_addr)
+ return 0;
+
+ /* Check if we can map the VCPU BO in GART to a 32-bit address. */
+ if (adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START > max_vcpu_bo_addr)
+ return -EINVAL;
+
+ amdgpu_gart_map_vram_range(adev, pa, VCE_V1_0_GART_PAGE_START,
+ num_pages, flags, adev->gart.ptr);
+ adev->vce.gpu_addr = adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START;
+ if (adev->vce.gpu_addr > max_vcpu_bo_addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vce_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int r, i;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_sw_init(adev, VCE_V1_0_FW_SIZE +
+ VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_load_fw_signature(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
+ ring = &adev->vce.ring[i];
+ sprintf(ring->name, "vce%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ hw_prio, NULL);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static int vce_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_suspend(adev);
+ if (r)
+ return r;
+
+ return amdgpu_vce_sw_fini(adev);
+}
+
+/**
+ * vce_v1_0_hw_init - start and test VCE block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vce_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vce(adev, true);
+ else
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
+ if (r)
+ return r;
+ }
+
+ dev_info(adev->dev, "VCE initialized successfully.\n");
+
+ return 0;
+}
+
+static int vce_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ r = vce_v1_0_stop(ip_block->adev);
+ if (r)
+ return r;
+
+ cancel_delayed_work_sync(&ip_block->adev->vce.idle_work);
+ return 0;
+}
+
+static int vce_v1_0_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
+ r = vce_v1_0_hw_fini(ip_block);
+ if (r) {
+ dev_err(adev->dev, "vce_v1_0_hw_fini() failed with error %i", r);
+ return r;
+ }
+
+ return amdgpu_vce_suspend(adev);
+}
+
+static int vce_v1_0_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_load_fw_signature(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev);
+ if (r)
+ return r;
+
+ return vce_v1_0_hw_init(ip_block);
+}
+
+static int vce_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+
+ WREG32_P(mmVCE_SYS_INT_EN, val,
+ ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ return 0;
+}
+
+static int vce_v1_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg(adev->dev, "IH: VCE\n");
+ switch (entry->src_data[0]) {
+ case 0:
+ case 1:
+ amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
+ break;
+ default:
+ dev_err(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ vce_v1_0_init_cg(adev);
+ vce_v1_0_enable_mgcg(adev, state == AMD_CG_STATE_GATE);
+
+ return 0;
+}
+
+static int vce_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ /*
+ * This doesn't actually powergate the VCE block.
+ * That's done in the dpm code via the SMC. This
+ * just re-inits the block as necessary. The actual
+ * gating still happens in the dpm code. We should
+ * revisit this when there is a cleaner line between
+ * the smc and the hw blocks
+ */
+ if (state == AMD_PG_STATE_GATE)
+ return vce_v1_0_stop(adev);
+ else
+ return vce_v1_0_start(adev);
+}
+
+static const struct amd_ip_funcs vce_v1_0_ip_funcs = {
+ .name = "vce_v1_0",
+ .early_init = vce_v1_0_early_init,
+ .sw_init = vce_v1_0_sw_init,
+ .sw_fini = vce_v1_0_sw_fini,
+ .hw_init = vce_v1_0_hw_init,
+ .hw_fini = vce_v1_0_hw_fini,
+ .suspend = vce_v1_0_suspend,
+ .resume = vce_v1_0_resume,
+ .is_idle = vce_v1_0_is_idle,
+ .wait_for_idle = vce_v1_0_wait_for_idle,
+ .set_clockgating_state = vce_v1_0_set_clockgating_state,
+ .set_powergating_state = vce_v1_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs vce_v1_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
+ .support_64bit_ptrs = false,
+ .no_user_fence = true,
+ .get_rptr = vce_v1_0_ring_get_rptr,
+ .get_wptr = vce_v1_0_ring_get_wptr,
+ .set_wptr = vce_v1_0_ring_set_wptr,
+ .parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib = amdgpu_vce_ring_emit_ib,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .test_ring = amdgpu_vce_ring_test_ring,
+ .test_ib = amdgpu_vce_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
+};
+
+static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ adev->vce.ring[i].funcs = &vce_v1_0_ring_funcs;
+ adev->vce.ring[i].me = i;
+ }
+};
+
+static const struct amdgpu_irq_src_funcs vce_v1_0_irq_funcs = {
+ .set = vce_v1_0_set_interrupt_state,
+ .process = vce_v1_0_process_interrupt,
+};
+
+static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->vce.irq.num_types = 1;
+ adev->vce.irq.funcs = &vce_v1_0_irq_funcs;
+};
+
+const struct amdgpu_ip_block_version vce_v1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v1_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h
index 0d878ca3acba..206e7bec897f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h
@@ -1,5 +1,8 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ * Copyright 2025 Valve Corporation
+ * Copyright 2025 Alexandre Demers
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,12 +24,9 @@
*
*/
-#ifndef __DCE_V11_0_H__
-#define __DCE_V11_0_H__
+#ifndef __VCE_V1_0_H__
+#define __VCE_V1_0_H__
-extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+extern const struct amdgpu_ip_block_version vce_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c633b7ff2943..8ea8a6193492 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -201,9 +201,9 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
-static bool vce_v2_0_is_idle(void *handle)
+static bool vce_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
}
@@ -214,7 +214,7 @@ static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (vce_v2_0_is_idle(adev))
+ if (vce_v2_0_is_idle(ip_block))
return 0;
}
return -ETIMEDOUT;
@@ -280,11 +280,11 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
if (vce_v2_0_lmi_clean(adev)) {
- DRM_INFO("vce is not idle \n");
+ DRM_INFO("VCE is not idle \n");
return 0;
}
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
if (!ip_block)
return -EINVAL;
@@ -407,6 +407,11 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
adev->vce.num_rings = 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index f8bddcd19b68..719e9643c43d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -399,6 +399,7 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
@@ -407,6 +408,10 @@ static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
return -ENOENT;
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
+
adev->vce.num_rings = 3;
vce_v3_0_set_ring_funcs(adev);
@@ -597,9 +602,9 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
-static bool vce_v3_0_is_idle(void *handle)
+static bool vce_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 mask = 0;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
@@ -614,7 +619,7 @@ static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++)
- if (vce_v3_0_is_idle(adev))
+ if (vce_v3_0_is_idle(ip_block))
return 0;
return -ETIMEDOUT;
@@ -828,9 +833,9 @@ out:
return ret;
}
-static void vce_v3_0_get_clockgating_state(void *handle, u64 *flags)
+static void vce_v3_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 335bda64ff5b..2d64002bed61 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -410,6 +410,11 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
adev->vce.num_rings = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 5ea96c983517..a316797875a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -81,14 +81,14 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = {
SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
};
-static int vcn_v1_0_stop(struct amdgpu_device *adev);
+static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v1_0_idle_work_handler(struct work_struct *work);
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
@@ -105,7 +105,8 @@ static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->vcn.num_enc_rings = 2;
+ adev->vcn.inst[0].num_enc_rings = 2;
+ adev->vcn.inst[0].set_pg_state = vcn_v1_0_set_pg_state;
vcn_v1_0_set_dec_ring_funcs(adev);
vcn_v1_0_set_enc_ring_funcs(adev);
@@ -113,7 +114,7 @@ static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
jpeg_v1_0_early_init(ip_block);
- return amdgpu_vcn_early_init(adev);
+ return amdgpu_vcn_early_init(adev, 0);
}
/**
@@ -138,23 +139,23 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.inst->irq);
if (r)
return r;
}
- r = amdgpu_vcn_sw_init(adev);
+ r = amdgpu_vcn_sw_init(adev, 0);
if (r)
return r;
/* Override the work func */
- adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
+ adev->vcn.inst[0].idle_work.work.func = vcn_v1_0_idle_work_handler;
- amdgpu_vcn_setup_ucode(adev);
+ amdgpu_vcn_setup_ucode(adev, 0);
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(adev, 0);
if (r)
return r;
@@ -166,18 +167,18 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
+ adev->vcn.inst[0].internal.scratch9 = adev->vcn.inst->external.scratch9 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
+ adev->vcn.inst[0].internal.data0 = adev->vcn.inst->external.data0 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
+ adev->vcn.inst[0].internal.data1 = adev->vcn.inst->external.data1 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
+ adev->vcn.inst[0].internal.cmd = adev->vcn.inst->external.cmd =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = adev->vcn.inst->external.nop =
+ adev->vcn.inst[0].internal.nop = adev->vcn.inst->external.nop =
SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i];
@@ -189,10 +190,10 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
+ adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
if (amdgpu_vcnfw_log) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
amdgpu_vcn_fwlog_init(adev->vcn.inst);
@@ -223,17 +224,17 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
int r;
struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
if (r)
return r;
jpeg_v1_0_sw_fini(ip_block);
- r = amdgpu_vcn_sw_fini(adev);
+ amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -253,7 +254,7 @@ static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -276,13 +277,14 @@ static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&vinst->idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
- vcn_v1_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
return 0;
@@ -301,7 +303,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
bool idle_work_unexecuted;
- idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
+ idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, 0);
@@ -311,7 +313,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
return r;
}
@@ -327,7 +329,7 @@ static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- r = amdgpu_vcn_resume(ip_block->adev);
+ r = amdgpu_vcn_resume(ip_block->adev, 0);
if (r)
return r;
@@ -339,12 +341,13 @@ static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v1_0_mc_resume_spg_mode - memory controller programming
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
+static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -410,8 +413,9 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config);
}
-static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -485,12 +489,13 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
/**
* vcn_v1_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v1_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data;
/* JPEG disable CGC */
@@ -611,12 +616,13 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
/**
* vcn_v1_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: Pointer to the VCN instance structure
*
* Enable clock gating for VCN block
*/
-static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v1_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
/* enable JPEG CGC */
@@ -680,8 +686,10 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
+static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t reg_data = 0;
/* disable JPEG CGC */
@@ -734,8 +742,9 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
}
-static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_1_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -779,8 +788,9 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
}
-static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_1_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -823,12 +833,13 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
/**
* vcn_v1_0_start_spg_mode - start VCN block
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Setup and start the VCN block
*/
-static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -837,13 +848,13 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_1_0_disable_static_power_gating(adev);
+ vcn_1_0_disable_static_power_gating(vinst);
tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
/* disable clock gating */
- vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_disable_clock_gating(vinst);
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
@@ -885,7 +896,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- vcn_v1_0_mc_resume_spg_mode(adev);
+ vcn_v1_0_mc_resume_spg_mode(vinst);
WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
@@ -998,11 +1009,17 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
jpeg_v1_0_start(adev, 0);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1010,7 +1027,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_1_0_enable_static_power_gating(adev);
+ vcn_1_0_enable_static_power_gating(vinst);
/* enable dynamic power gating mode */
tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
@@ -1019,7 +1036,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
/* enable clock gating */
- vcn_v1_0_clock_gating_dpg_mode(adev, 0);
+ vcn_v1_0_clock_gating_dpg_mode(vinst, 0);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1068,7 +1085,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
- vcn_v1_0_mc_resume_dpg_mode(adev);
+ vcn_v1_0_mc_resume_dpg_mode(vinst);
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
@@ -1085,7 +1102,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
- vcn_v1_0_clock_gating_dpg_mode(adev, 1);
+ vcn_v1_0_clock_gating_dpg_mode(vinst, 1);
/* setup mmUVD_LMI_CTRL */
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
@@ -1142,24 +1159,32 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
jpeg_v1_0_start(adev, 1);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v1_0_start(struct amdgpu_device *adev)
+static int vcn_v1_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+
return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
- vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev);
+ vcn_v1_0_start_dpg_mode(vinst) : vcn_v1_0_start_spg_mode(vinst);
}
/**
* vcn_v1_0_stop_spg_mode - stop VCN block
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* stop the VCN block
*/
-static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
int tmp;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
@@ -1199,13 +1224,20 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
- vcn_v1_0_enable_clock_gating(adev);
- vcn_1_0_enable_static_power_gating(adev);
+ vcn_v1_0_enable_clock_gating(vinst);
+ vcn_1_0_enable_static_power_gating(vinst);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
@@ -1234,24 +1266,32 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v1_0_stop(struct amdgpu_device *adev)
+static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
int r;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- r = vcn_v1_0_stop_dpg_mode(adev);
+ r = vcn_v1_0_stop_dpg_mode(vinst);
else
- r = vcn_v1_0_stop_spg_mode(adev);
+ r = vcn_v1_0_stop_spg_mode(vinst);
return r;
}
-static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
int ret_code;
uint32_t reg_data = 0;
uint32_t reg_data2 = 0;
@@ -1298,7 +1338,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1359,7 +1398,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1377,9 +1415,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
return 0;
}
-static bool vcn_v1_0_is_idle(void *handle)
+static bool vcn_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
@@ -1399,16 +1437,17 @@ static int vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v1_0_is_idle(adev))
+ if (!vcn_v1_0_is_idle(ip_block))
return -EBUSY;
- vcn_v1_0_enable_clock_gating(adev);
+ vcn_v1_0_enable_clock_gating(vinst);
} else {
/* disable HW gating and enable Sw gating */
- vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_disable_clock_gating(vinst);
}
return 0;
}
@@ -1800,8 +1839,8 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
}
}
-static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
* That's done in the dpm code via the SMC. This
@@ -1811,28 +1850,29 @@ static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = ip_block->adev;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v1_0_stop(adev);
+ ret = vcn_v1_0_stop(vinst);
else
- ret = vcn_v1_0_start(adev);
+ ret = vcn_v1_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
+
return ret;
}
static void vcn_v1_0_idle_work_handler(struct work_struct *work)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -1848,7 +1888,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
- adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ adev->vcn.inst->pause_dpg_mode(vcn_inst, &new_state);
}
fences += amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec);
@@ -1862,16 +1902,16 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
} else {
- schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
}
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
- mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+ mutex_lock(&adev->vcn.inst[0].vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(ring->adev->jpeg.inst->ring_dec))
DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
@@ -1897,7 +1937,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
struct dpg_pause_state new_state;
unsigned int fences = 0, i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
if (fences)
@@ -1915,14 +1955,14 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
new_state.jpeg = VCN_DPG_STATE__PAUSE;
- adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ adev->vcn.inst->pause_dpg_mode(adev->vcn.inst, &new_state);
}
}
void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
{
- schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
- mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
+ schedule_delayed_work(&ring->adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
+ mutex_unlock(&ring->adev->vcn.inst[0].vcn1_jpeg1_workaround);
}
static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -1997,7 +2037,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.is_idle = vcn_v1_0_is_idle,
.wait_for_idle = vcn_v1_0_wait_for_idle,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
- .set_powergating_state = vcn_v1_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v1_0_dump_ip_state,
.print_ip_state = vcn_v1_0_print_ip_state,
};
@@ -2056,11 +2096,11 @@ static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
uint32_t reg = amdgpu_ib_get_value(ib, i);
uint32_t val = amdgpu_ib_get_value(ib, i + 1);
- if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ if (reg == PACKET0(p->adev->vcn.inst[0].internal.data0, 0)) {
msg_lo = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.data1, 0)) {
msg_hi = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.cmd, 0)) {
r = vcn_v1_0_validate_bo(p, job,
((u64)msg_hi) << 32 | msg_lo);
if (r)
@@ -2145,7 +2185,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
}
@@ -2156,7 +2196,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+ adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 2;
adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e42cfc731ad8..8897dcc9c1a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
@@ -92,11 +93,13 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst);
+
/**
* vcn_v2_0_early_init - set function pointers and load microcode
*
@@ -110,15 +113,16 @@ static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
- adev->vcn.num_enc_rings = 1;
+ adev->vcn.inst[0].num_enc_rings = 1;
else
- adev->vcn.num_enc_rings = 2;
+ adev->vcn.inst[0].num_enc_rings = 2;
+ adev->vcn.inst->set_pg_state = vcn_v2_0_set_pg_state;
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
vcn_v2_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ return amdgpu_vcn_early_init(adev, 0);
}
/**
@@ -132,10 +136,8 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -145,7 +147,7 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.inst->irq);
@@ -153,13 +155,13 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- r = amdgpu_vcn_sw_init(adev);
+ r = amdgpu_vcn_sw_init(adev, 0);
if (r)
return r;
- amdgpu_vcn_setup_ucode(adev);
+ amdgpu_vcn_setup_ucode(adev, 0);
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(adev, 0);
if (r)
return r;
@@ -175,25 +177,25 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i];
@@ -210,7 +212,13 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ adev->vcn.inst[0].reset = vcn_v2_0_reset;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -222,14 +230,13 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0));
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -245,7 +252,7 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
fw_shared->present_flag_0 = 0;
@@ -254,15 +261,15 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
+ amdgpu_vcn_sw_fini(adev, 0);
- return r;
+ return 0;
}
/**
@@ -292,7 +299,7 @@ static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
ring->sched.ready = false;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -312,13 +319,14 @@ static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&vinst->idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
- vcn_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
return 0;
}
@@ -338,7 +346,7 @@ static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ r = amdgpu_vcn_suspend(ip_block->adev, 0);
return r;
}
@@ -354,7 +362,7 @@ static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- r = amdgpu_vcn_resume(ip_block->adev);
+ r = amdgpu_vcn_resume(ip_block->adev, 0);
if (r)
return r;
@@ -366,12 +374,13 @@ static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v2_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
+ * @vinst: Pointer to the VCN instance structure
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
+static void vcn_v2_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -426,8 +435,10 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
-static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
+static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -525,12 +536,13 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/**
* vcn_v2_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data;
if (amdgpu_sriov_vf(adev))
@@ -634,9 +646,10 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
+static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
uint8_t sram_sel, uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -685,12 +698,13 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v2_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -743,8 +757,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -792,8 +807,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
}
-static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -834,13 +850,15 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
}
}
-static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
+static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_device *adev = vinst->adev;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
+ int ret;
- vcn_v2_0_enable_static_power_gating(adev);
+ vcn_v2_0_enable_static_power_gating(vinst);
/* enable dynamic power gating mode */
tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
@@ -852,7 +870,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
+ vcn_v2_0_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -901,7 +919,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
+ vcn_v2_0_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -922,8 +940,13 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -966,12 +989,19 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v2_0_start(struct amdgpu_device *adev)
+static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_device *adev = vinst->adev;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -981,16 +1011,16 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
amdgpu_dpm_enable_vcn(adev, true, 0);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
+ return vcn_v2_0_start_dpg_mode(vinst, adev->vcn.inst->indirect_sram);
- vcn_v2_0_disable_static_power_gating(adev);
+ vcn_v2_0_disable_static_power_gating(vinst);
/* set uvd status busy */
tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
/*SW clock gating */
- vcn_v2_0_disable_clock_gating(adev);
+ vcn_v2_0_disable_clock_gating(vinst);
/* enable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
@@ -1034,7 +1064,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- vcn_v2_0_mc_resume(adev);
+ vcn_v2_0_mc_resume(vinst);
/* release VCPU reset to boot */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
@@ -1139,15 +1169,21 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v2_0_pause_dpg_mode(adev, 0, &state);
+ vcn_v2_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1169,16 +1205,22 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v2_0_stop(struct amdgpu_device *adev)
+static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t tmp;
int r;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_0_stop_dpg_mode(adev);
+ r = vcn_v2_0_stop_dpg_mode(vinst);
if (r)
return r;
goto power_off;
@@ -1230,8 +1272,13 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
/* clear status */
WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
- vcn_v2_0_enable_clock_gating(adev);
- vcn_v2_0_enable_static_power_gating(adev);
+ vcn_v2_0_enable_clock_gating(vinst);
+ vcn_v2_0_enable_static_power_gating(vinst);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS);
power_off:
if (adev->pm.dpm_enabled)
@@ -1240,9 +1287,11 @@ power_off:
return 0;
}
-static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@@ -1259,7 +1308,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -1317,9 +1366,19 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
return 0;
}
-static bool vcn_v2_0_is_idle(void *handle)
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = vcn_v2_0_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_0_start(vinst);
+}
+
+static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
@@ -1346,12 +1405,12 @@ static int vcn_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v2_0_is_idle(adev))
+ if (!vcn_v2_0_is_idle(ip_block))
return -EBUSY;
- vcn_v2_0_enable_clock_gating(adev);
+ vcn_v2_0_enable_clock_gating(&adev->vcn.inst[0]);
} else {
/* disable HW gating and enable Sw gating */
- vcn_v2_0_disable_clock_gating(adev);
+ vcn_v2_0_disable_clock_gating(&adev->vcn.inst[0]);
}
return 0;
}
@@ -1421,9 +1480,9 @@ void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
}
@@ -1438,7 +1497,7 @@ void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[0].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
}
@@ -1458,7 +1517,7 @@ void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.nop, 0));
amdgpu_ring_write(ring, 0);
}
}
@@ -1479,25 +1538,25 @@ void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
struct amdgpu_device *adev = ring->adev;
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.context_id, 0));
amdgpu_ring_write(ring, seq);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, addr & 0xffffffff);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
}
@@ -1520,14 +1579,14 @@ void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_vmid, 0));
amdgpu_ring_write(ring, vmid);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_low, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_high, 0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_size, 0));
amdgpu_ring_write(ring, ib->length_dw);
}
@@ -1536,16 +1595,16 @@ void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, val);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.gp_scratch8, 0));
amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
}
@@ -1570,13 +1629,13 @@ void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, val);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
}
@@ -1777,9 +1836,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 4);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -1796,8 +1855,8 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
}
-static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
* That's done in the dpm code via the SMC. This
@@ -1807,23 +1866,24 @@ static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_device *adev = vinst->adev;
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v2_0_stop(adev);
+ ret = vcn_v2_0_stop(vinst);
else
- ret = vcn_v2_0_start(adev);
+ ret = vcn_v2_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
+
return ret;
}
@@ -1862,7 +1922,7 @@ static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
adev->vcn.inst->ring_dec.wptr_old = 0;
vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
adev->vcn.inst->ring_enc[i].wptr = 0;
adev->vcn.inst->ring_enc[i].wptr_old = 0;
vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
@@ -1988,7 +2048,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
- for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
+ for (r = 0; r < adev->vcn.inst[0].num_enc_rings; ++r) {
ring = &adev->vcn.inst->ring_enc[r];
ring->wptr = 0;
MMSCH_V2_0_INSERT_DIRECT_WT(
@@ -2032,66 +2092,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
-static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2104,9 +2104,9 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.is_idle = vcn_v2_0_is_idle,
.wait_for_idle = vcn_v2_0_wait_for_idle,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
- .set_powergating_state = vcn_v2_0_set_powergating_state,
- .dump_ip_state = vcn_v2_0_dump_ip_state,
- .print_ip_state = vcn_v2_0_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
@@ -2137,6 +2137,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
@@ -2166,6 +2167,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -2177,7 +2179,7 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
}
@@ -2188,7 +2190,7 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 1;
adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index b518202955ca..cebee453871c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
@@ -95,18 +96,123 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
};
+static void vcn_v2_5_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
+ unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
+ unsigned int i, j;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
+
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ for (j = 0; j < v->num_enc_rings; ++j)
+ fence[i] += amdgpu_fence_count_emitted(&v->ring_enc[j]);
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !v->using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (fence[i] ||
+ unlikely(atomic_read(&v->dpg_enc_submission_cnt)))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ v->pause_dpg_mode(v, &new_state);
+ }
+
+ fence[i] += amdgpu_fence_count_emitted(&v->ring_dec);
+ fences += fence[i];
+
+ }
+
+ if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) {
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_GATE);
+ amdgpu_vcn_put_profile(adev);
+ } else {
+ schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
+ }
+}
+
+static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
+
+ atomic_inc(&adev->vcn.inst[0].total_submission_cnt);
+
+ cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
+
+ /* We can safely return early here because we've cancelled the
+ * the delayed work so there is no one else to set it to false
+ * and we don't care if someone else sets it to true.
+ */
+ mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !v->using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+ atomic_inc(&v->dpg_enc_submission_cnt);
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ } else {
+ unsigned int fences = 0;
+ unsigned int i;
+
+ for (i = 0; i < v->num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&v->ring_enc[i]);
+
+ if (fences || atomic_read(&v->dpg_enc_submission_cnt))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ }
+ v->pause_dpg_mode(v, &new_state);
+ }
+ mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
+}
+
+static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
+ !adev->vcn.inst[ring->me].using_unified_queue)
+ atomic_dec(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+ atomic_dec(&adev->vcn.inst[0].total_submission_cnt);
+
+ schedule_delayed_work(&adev->vcn.inst[0].idle_work,
+ VCN_IDLE_TIMEOUT);
+}
+
/**
* vcn_v2_5_early_init - set function pointers and load microcode
*
@@ -118,11 +224,13 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0;
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ adev->vcn.inst[i].num_enc_rings = 1;
} else {
u32 harvest;
int i;
@@ -131,13 +239,12 @@ static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
+ adev->vcn.inst[i].num_enc_rings = 2;
}
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
+ AMDGPU_VCN_HARVEST_VCN1))
/* both instances are harvested, disable the block */
return -ENOENT;
-
- adev->vcn.num_enc_rings = 2;
}
vcn_v2_5_set_dec_ring_funcs(adev);
@@ -145,7 +252,15 @@ static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
vcn_v2_5_set_irq_funcs(adev);
vcn_v2_5_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ adev->vcn.inst[i].set_pg_state = vcn_v2_5_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
/**
@@ -159,11 +274,11 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+ struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << j))
continue;
/* VCN DEC TRAP */
@@ -173,7 +288,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
if (r)
@@ -185,39 +300,36 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
if (r)
return r;
- }
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
+ r = amdgpu_vcn_sw_init(adev, j);
+ if (r)
+ return r;
- amdgpu_vcn_setup_ucode(adev);
+ /* Override the work func */
+ adev->vcn.inst[j].idle_work.work.func = vcn_v2_5_idle_work_handler;
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
+ amdgpu_vcn_setup_ucode(adev, j);
- for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ r = amdgpu_vcn_resume(adev, j);
+ if (r)
+ return r;
- if (adev->vcn.harvest_config & (1 << j))
- continue;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
-
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+
+ adev->vcn.inst[j].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec;
@@ -237,7 +349,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst[j].ring_enc[i];
@@ -265,29 +377,34 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+ adev->vcn.inst[j].reset = vcn_v2_5_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
-
r = amdgpu_vcn_ras_sw_init(adev);
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_5, ARRAY_SIZE(vcn_reg_list_2_5));
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -303,7 +420,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
@@ -319,15 +436,16 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
-
- r = amdgpu_vcn_sw_fini(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ amdgpu_vcn_sw_fini(adev, i);
+ }
- return r;
+ return 0;
}
/**
@@ -366,7 +484,7 @@ static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -390,19 +508,21 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
- vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
- amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
}
return 0;
@@ -417,15 +537,20 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -437,11 +562,14 @@ static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v2_5_hw_init(ip_block);
@@ -451,69 +579,71 @@ static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v2_5_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
+static void vcn_v2_5_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t size;
uint32_t offset;
- int i;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
- /* cache window 0: fw */
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
- offset = 0;
- } else {
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].gpu_addr));
- offset = size;
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
- }
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
- /* cache window 1: stack */
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
-
- /* cache window 2: context */
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
-
- /* non-cache window */
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
- WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = size;
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
-static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
@@ -611,123 +741,124 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/**
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t data;
- int i;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* UVD disable CGC */
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
- if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
- data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- else
- data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
-
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
- data &= ~(UVD_CGC_GATE__SYS_MASK
- | UVD_CGC_GATE__UDEC_MASK
- | UVD_CGC_GATE__MPEG2_MASK
- | UVD_CGC_GATE__REGS_MASK
- | UVD_CGC_GATE__RBC_MASK
- | UVD_CGC_GATE__LMI_MC_MASK
- | UVD_CGC_GATE__LMI_UMC_MASK
- | UVD_CGC_GATE__IDCT_MASK
- | UVD_CGC_GATE__MPRD_MASK
- | UVD_CGC_GATE__MPC_MASK
- | UVD_CGC_GATE__LBSI_MASK
- | UVD_CGC_GATE__LRBBM_MASK
- | UVD_CGC_GATE__UDEC_RE_MASK
- | UVD_CGC_GATE__UDEC_CM_MASK
- | UVD_CGC_GATE__UDEC_IT_MASK
- | UVD_CGC_GATE__UDEC_DB_MASK
- | UVD_CGC_GATE__UDEC_MP_MASK
- | UVD_CGC_GATE__WCB_MASK
- | UVD_CGC_GATE__VCPU_MASK
- | UVD_CGC_GATE__MMSCH_MASK);
-
- WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
-
- SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
-
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
- data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
- | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
- | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
- | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
- | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
- | UVD_CGC_CTRL__SYS_MODE_MASK
- | UVD_CGC_CTRL__UDEC_MODE_MASK
- | UVD_CGC_CTRL__MPEG2_MODE_MASK
- | UVD_CGC_CTRL__REGS_MODE_MASK
- | UVD_CGC_CTRL__RBC_MODE_MASK
- | UVD_CGC_CTRL__LMI_MC_MODE_MASK
- | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
- | UVD_CGC_CTRL__IDCT_MODE_MASK
- | UVD_CGC_CTRL__MPRD_MODE_MASK
- | UVD_CGC_CTRL__MPC_MODE_MASK
- | UVD_CGC_CTRL__LBSI_MODE_MASK
- | UVD_CGC_CTRL__LRBBM_MODE_MASK
- | UVD_CGC_CTRL__WCB_MODE_MASK
- | UVD_CGC_CTRL__VCPU_MODE_MASK
- | UVD_CGC_CTRL__MMSCH_MODE_MASK);
- WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
-
- /* turn on */
- data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
- data |= (UVD_SUVD_CGC_GATE__SRE_MASK
- | UVD_SUVD_CGC_GATE__SIT_MASK
- | UVD_SUVD_CGC_GATE__SMP_MASK
- | UVD_SUVD_CGC_GATE__SCM_MASK
- | UVD_SUVD_CGC_GATE__SDB_MASK
- | UVD_SUVD_CGC_GATE__SRE_H264_MASK
- | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
- | UVD_SUVD_CGC_GATE__SIT_H264_MASK
- | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
- | UVD_SUVD_CGC_GATE__SCM_H264_MASK
- | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
- | UVD_SUVD_CGC_GATE__SDB_H264_MASK
- | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
- | UVD_SUVD_CGC_GATE__SCLR_MASK
- | UVD_SUVD_CGC_GATE__UVD_SC_MASK
- | UVD_SUVD_CGC_GATE__ENT_MASK
- | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
- | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
- | UVD_SUVD_CGC_GATE__SITE_MASK
- | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
- | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
- | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
- | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
- | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
- WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
-
- data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
- data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
- | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
- | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+ /* UVD disable CGC */
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
+ data &= ~(UVD_CGC_GATE__SYS_MASK
+ | UVD_CGC_GATE__UDEC_MASK
+ | UVD_CGC_GATE__MPEG2_MASK
+ | UVD_CGC_GATE__REGS_MASK
+ | UVD_CGC_GATE__RBC_MASK
+ | UVD_CGC_GATE__LMI_MC_MASK
+ | UVD_CGC_GATE__LMI_UMC_MASK
+ | UVD_CGC_GATE__IDCT_MASK
+ | UVD_CGC_GATE__MPRD_MASK
+ | UVD_CGC_GATE__MPC_MASK
+ | UVD_CGC_GATE__LBSI_MASK
+ | UVD_CGC_GATE__LRBBM_MASK
+ | UVD_CGC_GATE__UDEC_RE_MASK
+ | UVD_CGC_GATE__UDEC_CM_MASK
+ | UVD_CGC_GATE__UDEC_IT_MASK
+ | UVD_CGC_GATE__UDEC_DB_MASK
+ | UVD_CGC_GATE__UDEC_MP_MASK
+ | UVD_CGC_GATE__WCB_MASK
+ | UVD_CGC_GATE__VCPU_MASK
+ | UVD_CGC_GATE__MMSCH_MASK);
+
+ WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
+
+ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
+ | UVD_CGC_CTRL__SYS_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MODE_MASK
+ | UVD_CGC_CTRL__MPEG2_MODE_MASK
+ | UVD_CGC_CTRL__REGS_MODE_MASK
+ | UVD_CGC_CTRL__RBC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_MC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
+ | UVD_CGC_CTRL__IDCT_MODE_MASK
+ | UVD_CGC_CTRL__MPRD_MODE_MASK
+ | UVD_CGC_CTRL__MPC_MODE_MASK
+ | UVD_CGC_CTRL__LBSI_MODE_MASK
+ | UVD_CGC_CTRL__LRBBM_MODE_MASK
+ | UVD_CGC_CTRL__WCB_MODE_MASK
+ | UVD_CGC_CTRL__VCPU_MODE_MASK
+ | UVD_CGC_CTRL__MMSCH_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ /* turn on */
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
+ data |= (UVD_SUVD_CGC_GATE__SRE_MASK
+ | UVD_SUVD_CGC_GATE__SIT_MASK
+ | UVD_SUVD_CGC_GATE__SMP_MASK
+ | UVD_SUVD_CGC_GATE__SCM_MASK
+ | UVD_SUVD_CGC_GATE__SDB_MASK
+ | UVD_SUVD_CGC_GATE__SRE_H264_MASK
+ | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SIT_H264_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SCM_H264_MASK
+ | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SDB_H264_MASK
+ | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SCLR_MASK
+ | UVD_SUVD_CGC_GATE__UVD_SC_MASK
+ | UVD_SUVD_CGC_GATE__ENT_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
+ | UVD_SUVD_CGC_GATE__SITE_MASK
+ | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
+ | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
+ | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
+ | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
+ | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
+ data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
- WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
- }
+ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
- uint8_t sram_sel, int inst_idx, uint8_t indirect)
+static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel, uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -776,68 +907,69 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t data = 0;
- int i;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* enable UVD CGC */
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
- if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
- data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- else
- data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
-
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
- data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
- | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
- | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
- | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
- | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
- | UVD_CGC_CTRL__SYS_MODE_MASK
- | UVD_CGC_CTRL__UDEC_MODE_MASK
- | UVD_CGC_CTRL__MPEG2_MODE_MASK
- | UVD_CGC_CTRL__REGS_MODE_MASK
- | UVD_CGC_CTRL__RBC_MODE_MASK
- | UVD_CGC_CTRL__LMI_MC_MODE_MASK
- | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
- | UVD_CGC_CTRL__IDCT_MODE_MASK
- | UVD_CGC_CTRL__MPRD_MODE_MASK
- | UVD_CGC_CTRL__MPC_MODE_MASK
- | UVD_CGC_CTRL__LBSI_MODE_MASK
- | UVD_CGC_CTRL__LRBBM_MODE_MASK
- | UVD_CGC_CTRL__WCB_MODE_MASK
- | UVD_CGC_CTRL__VCPU_MODE_MASK);
- WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
-
- data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
- data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
- | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
- | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
- | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
- WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+ /* enable UVD CGC */
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
+ | UVD_CGC_CTRL__SYS_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MODE_MASK
+ | UVD_CGC_CTRL__MPEG2_MODE_MASK
+ | UVD_CGC_CTRL__REGS_MODE_MASK
+ | UVD_CGC_CTRL__RBC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_MC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
+ | UVD_CGC_CTRL__IDCT_MODE_MASK
+ | UVD_CGC_CTRL__MPRD_MODE_MASK
+ | UVD_CGC_CTRL__MPC_MODE_MASK
+ | UVD_CGC_CTRL__LBSI_MODE_MASK
+ | UVD_CGC_CTRL__LRBBM_MODE_MASK
+ | UVD_CGC_CTRL__WCB_MODE_MASK
+ | UVD_CGC_CTRL__VCPU_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
+ data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
+static void vcn_v2_6_enable_ras(struct amdgpu_vcn_inst *vinst,
bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))
@@ -862,11 +994,14 @@ static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
tmp, 0, indirect);
}
-static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -881,7 +1016,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v2_5_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -930,7 +1065,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v2_5_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -941,7 +1076,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
- vcn_v2_6_enable_ras(adev, inst_idx, indirect);
+ vcn_v2_6_enable_ras(vinst, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
@@ -957,8 +1092,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1003,200 +1143,200 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v2_5_start(struct amdgpu_device *adev)
+static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_fw_shared *fw_shared =
+ adev->vcn.inst[i].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- /* disable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
- ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v2_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* set uvd status busy */
- tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
- }
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* set uvd status busy */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
return 0;
- /*SW clock gating */
- vcn_v2_5_disable_clock_gating(adev);
+ /* SW clock gating */
+ vcn_v2_5_disable_clock_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* setup mmUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
- tmp &= ~0xff;
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup mmUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup mmUVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- }
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
- vcn_v2_5_mc_resume(adev);
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
+ /* setup mmUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
+ tmp &= ~0xff;
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup mmUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup mmUVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v2_5_mc_resume(vinst);
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (k = 0; k < 10; ++k) {
- uint32_t status;
-
- for (j = 0; j < 100; ++j) {
- status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
- if (status & 2)
- break;
- if (amdgpu_emu_mode == 1)
- msleep(500);
- else
- mdelay(10);
- }
- r = 0;
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (k = 0; k < 10; ++k) {
+ uint32_t status;
+
+ for (j = 0; j < 100; ++j) {
+ status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
if (status & 2)
break;
+ if (amdgpu_emu_mode == 1)
+ msleep(500);
+ else
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
- DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- r = -1;
- }
+ mdelay(10);
+ r = -1;
+ }
- if (r) {
- DRM_ERROR("VCN decode not responding, giving up!!!\n");
- return r;
- }
+ if (r) {
+ DRM_ERROR("VCN decode not responding, giving up!!!\n");
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
- ring = &adev->vcn.inst[i].ring_dec;
- /* force RBC into idle state */
- rb_bufsz = order_base_2(ring->ring_size);
- tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+ ring = &adev->vcn.inst[i].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
- fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
- /* program the RB_BASE for ring buffer */
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
+ /* program the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
- /* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
- ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
- lower_32_bits(ring->wptr));
- fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
- fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
- fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
-
- fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
- ring = &adev->vcn.inst[i].ring_enc[1];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
- fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
- }
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
+ ring = &adev->vcn.inst[i].ring_enc[1];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
return 0;
}
@@ -1397,8 +1537,10 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
}
-static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
/* Wait for power status to be 1 */
@@ -1422,82 +1564,93 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v2_5_stop(struct amdgpu_device *adev)
+static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t tmp;
- int i, r = 0;
+ int r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_5_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_stop_dpg_mode(vinst);
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* block LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* block LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* clear status */
- WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- vcn_v2_5_enable_clock_gating(adev);
+ /* clear status */
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
- /* enable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
- ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
- }
+ vcn_v2_5_enable_clock_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+ /* enable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
- return 0;
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+
+ return r;
}
-static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code = 0;
@@ -1514,7 +1667,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
@@ -1643,11 +1796,12 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.insert_start = vcn_v2_0_dec_ring_insert_start,
.insert_end = vcn_v2_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .begin_use = vcn_v2_5_ring_begin_use,
+ .end_use = vcn_v2_5_ring_end_use,
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -1741,11 +1895,12 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = vcn_v2_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .begin_use = vcn_v2_5_ring_begin_use,
+ .end_use = vcn_v2_5_ring_end_use,
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -1767,16 +1922,26 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
}
}
-static bool vcn_v2_5_is_idle(void *handle)
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = vcn_v2_5_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_5_start(vinst);
+}
+
+static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1811,40 +1976,45 @@ static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
{
struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
+ int i;
if (amdgpu_sriov_vf(adev))
return 0;
- if (enable) {
- if (!vcn_v2_5_is_idle(adev))
- return -EBUSY;
- vcn_v2_5_enable_clock_gating(adev);
- } else {
- vcn_v2_5_disable_clock_gating(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ if (enable) {
+ if (!vcn_v2_5_is_idle(ip_block))
+ return -EBUSY;
+ vcn_v2_5_enable_clock_gating(vinst);
+ } else {
+ vcn_v2_5_disable_clock_gating(vinst);
+ }
}
return 0;
}
-static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_device *adev = vinst->adev;
int ret;
if (amdgpu_sriov_vf(adev))
return 0;
- if(state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v2_5_stop(adev);
+ ret = vcn_v2_5_stop(vinst);
else
- ret = vcn_v2_5_start(adev);
+ ret = vcn_v2_5_start(vinst);
- if(!ret)
- adev->vcn.cur_state = state;
+ if (!ret)
+ vinst->cur_state = state;
return ret;
}
@@ -1921,74 +2091,14 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
- adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
}
}
-static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -2001,9 +2111,9 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
- .set_powergating_state = vcn_v2_5_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -2018,9 +2128,9 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
- .set_powergating_state = vcn_v2_5_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 63ddd4cca910..d9cf8f0feeb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -40,6 +40,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
@@ -105,10 +106,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst);
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
@@ -124,11 +126,13 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
adev->vcn.harvest_config = 0;
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ adev->vcn.inst[i].num_enc_rings = 1;
} else {
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
@@ -136,18 +140,27 @@ static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
/* both instances are harvested, disable the block */
return -ENOENT;
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(3, 0, 33))
- adev->vcn.num_enc_rings = 0;
- else
- adev->vcn.num_enc_rings = 2;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
+ IP_VERSION(3, 0, 33))
+ adev->vcn.inst[i].num_enc_rings = 0;
+ else
+ adev->vcn.inst[i].num_enc_rings = 2;
+ }
}
vcn_v3_0_set_dec_ring_funcs(adev);
vcn_v3_0_set_enc_ring_funcs(adev);
vcn_v3_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ adev->vcn.inst[i].set_pg_state = vcn_v3_0_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+ return 0;
}
/**
@@ -162,20 +175,8 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
/*
* Note: doorbell assignment is fixed for SRIOV multiple VCN engines
* Formula:
@@ -190,27 +191,37 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
+ adev->vcn.inst[i].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+
+ adev->vcn.inst[i].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
/* VCN DEC TRAP */
@@ -224,7 +235,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_dec;
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
- ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1);
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1);
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
@@ -236,7 +247,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
/* VCN ENC TRAP */
@@ -248,7 +259,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[j];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
- ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j;
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1) + 1 + j;
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
@@ -274,24 +285,30 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
+ adev->vcn.inst[i].reset = vcn_v3_0_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_3_0, ARRAY_SIZE(vcn_reg_list_3_0));
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -310,7 +327,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -325,14 +342,17 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
- kfree(adev->vcn.ip_dump);
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
+ }
+
+ return 0;
}
/**
@@ -370,7 +390,7 @@ static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
ring->sched.ready = true;
}
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
ring->sched.ready = false;
@@ -398,7 +418,7 @@ static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -422,17 +442,19 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
- vcn_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -449,15 +471,20 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -469,11 +496,14 @@ static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v3_0_hw_init(ip_block);
@@ -483,13 +513,14 @@ static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v3_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
uint32_t offset;
@@ -538,8 +569,11 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
-static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
@@ -634,8 +668,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
-static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -685,8 +721,10 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int
WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
}
-static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -733,13 +771,14 @@ static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v3_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: Pointer to the VCN instance structure
*
* Disable clock gating for VCN block
*/
-static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
/* VCN disable CGC */
@@ -866,9 +905,12 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
- uint8_t sram_sel, int inst_idx, uint8_t indirect)
+static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -917,13 +959,14 @@ static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v3_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: Pointer to the VCN instance structure
*
* Enable clock gating for VCN block
*/
-static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
/* enable VCN CGC */
@@ -982,11 +1025,14 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
}
-static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1001,7 +1047,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v3_0_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1050,7 +1096,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v3_0_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -1079,8 +1125,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1131,197 +1182,203 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v3_0_start(struct amdgpu_device *adev)
+static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v3_0_start_dpg_mode(vinst, vinst->indirect_sram);
- /* disable VCN power gating */
- vcn_v3_0_disable_static_power_gating(adev, i);
+ /* disable VCN power gating */
+ vcn_v3_0_disable_static_power_gating(vinst);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
- /*SW clock gating */
- vcn_v3_0_disable_clock_gating(adev, i);
+ /* SW clock gating */
+ vcn_v3_0_disable_clock_gating(vinst);
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
-
- /* setup mmUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup mmUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup mmUVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v3_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
- for (j = 0; j < 10; ++j) {
- uint32_t status;
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
- DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+
+ /* setup mmUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup mmUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup mmUVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v3_0_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
+ if (status & 2)
+ break;
mdelay(10);
- r = -1;
}
+ r = 0;
+ if (status & 2)
+ break;
- if (r) {
- DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
- return r;
- }
+ DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ mdelay(10);
+ r = -1;
+ }
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ if (r) {
+ DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
+ return r;
+ }
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- ring = &adev->vcn.inst[i].ring_dec;
- /* force RBC into idle state */
- rb_bufsz = order_base_2(ring->ring_size);
- tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
- /* programm the RB_BASE for ring buffer */
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
+ ring = &adev->vcn.inst[i].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- /* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
- ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
- lower_32_bits(ring->wptr));
- fw_shared->rb.wptr = lower_32_bits(ring->wptr);
- fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
-
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
- IP_VERSION(3, 0, 33)) {
- fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
- fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
-
- fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- ring = &adev->vcn.inst[i].ring_enc[1];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
- fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
- }
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+
+ WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
+ ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+ fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
+ IP_VERSION(3, 0, 33)) {
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ ring = &adev->vcn.inst[i].ring_enc[1];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
}
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
+
return 0;
}
@@ -1434,7 +1491,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
ring->wptr = 0;
rb_addr = ring->gpu_addr;
@@ -1534,12 +1591,14 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v3_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
@@ -1562,90 +1621,101 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
-static int vcn_v3_0_stop(struct amdgpu_device *adev)
+static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t tmp;
- int i, r = 0;
+ int r = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v3_0_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v3_0_stop_dpg_mode(vinst);
+ goto done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+ /* clear status */
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
- /* clear status */
- WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
+ /* apply HW clock gating */
+ vcn_v3_0_enable_clock_gating(vinst);
- /* apply HW clock gating */
- vcn_v3_0_enable_clock_gating(adev, i);
+ /* enable VCN power gating */
+ vcn_v3_0_enable_static_power_gating(vinst);
- /* enable VCN power gating */
- vcn_v3_0_enable_static_power_gating(adev, i);
- }
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
- return 0;
+ return r;
}
-static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@@ -1764,7 +1834,7 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
@@ -1814,15 +1884,19 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
-
- /* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
- return -EINVAL;
+ struct dma_fence *fence;
/* if VCN0 is harvested, we can't support AV1 */
if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
return -EINVAL;
+ /* wait for all jobs to finish before switching to instance 0 */
+ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -1928,11 +2002,11 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
uint32_t reg = amdgpu_ib_get_value(ib, i);
uint32_t val = amdgpu_ib_get_value(ib, i + 1);
- if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data0, 0)) {
msg_lo = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data1, 0)) {
msg_hi = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
+ } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) &&
val == 0) {
r = vcn_v3_0_dec_msg(p, job,
((u64)msg_hi) << 32 | msg_lo);
@@ -1972,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -2070,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -2096,16 +2172,28 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[j].me = i;
}
}
}
-static bool vcn_v3_0_is_idle(void *handle)
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = vcn_v3_0_stop(vinst);
+ if (r)
+ return r;
+ vcn_v3_0_enable_clock_gating(vinst);
+ vcn_v3_0_enable_static_power_gating(vinst);
+ return vcn_v3_0_start(vinst);
+}
+
+static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2144,46 +2232,47 @@ static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v3_0_enable_clock_gating(adev, i);
+ vcn_v3_0_enable_clock_gating(vinst);
} else {
- vcn_v3_0_disable_clock_gating(adev, i);
+ vcn_v3_0_disable_clock_gating(vinst);
}
}
return 0;
}
-static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v3_0_stop(adev);
+ ret = vcn_v3_0_stop(vinst);
else
- ret = vcn_v3_0_start(adev);
+ ret = vcn_v3_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -2248,72 +2337,11 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
}
}
-static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t inst_off;
- bool is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2326,9 +2354,9 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.is_idle = vcn_v3_0_is_idle,
.wait_for_idle = vcn_v3_0_wait_for_idle,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
- .set_powergating_state = vcn_v3_0_set_powergating_state,
- .dump_ip_state = vcn_v3_0_dump_ip_state,
- .print_ip_state = vcn_v3_0_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 00551d6f0370..3ae666522d57 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_HARVEST_MMSCH 0
@@ -96,10 +97,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
@@ -114,7 +115,7 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int i;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
@@ -126,19 +127,28 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
}
}
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_set_unified_ring_funcs(adev);
vcn_v4_0_set_irq_funcs(adev);
vcn_v4_0_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
@@ -173,23 +183,21 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t *ptr;
-
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
/* Init instance 0 sched_score to 1, so it's scheduled after other instances */
if (i == 0)
atomic_set(&adev->vcn.inst[i].sched_score, 1);
@@ -211,7 +219,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i *
+ (adev->vcn.inst[i].num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
ring->vm_hub = AMDGPU_MMHUB0(0);
@@ -223,11 +232,15 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
vcn_v4_0_fw_shared_init(adev, i);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -235,21 +248,14 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
r = amdgpu_vcn_ras_sw_init(adev);
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0, ARRAY_SIZE(vcn_reg_list_4_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -272,7 +278,7 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -288,16 +294,18 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
- return r;
+ return 0;
}
/**
@@ -359,20 +367,23 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
- amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
}
return 0;
@@ -387,15 +398,20 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -407,11 +423,14 @@ static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_hw_init(ip_block);
@@ -421,13 +440,14 @@ static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -481,14 +501,16 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
@@ -582,19 +604,21 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
* vcn_v4_0_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -653,13 +677,14 @@ static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v4_0_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -708,13 +733,14 @@ static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v4_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -819,16 +845,18 @@ static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -876,13 +904,14 @@ static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, u
/**
* vcn_v4_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -932,9 +961,11 @@ static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx,
+static void vcn_v4_0_enable_ras(struct amdgpu_vcn_inst *vinst,
bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
@@ -957,17 +988,19 @@ static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx,
/**
* vcn_v4_0_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -982,7 +1015,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v4_0_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1030,7 +1063,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -1042,7 +1075,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
- vcn_v4_0_enable_ras(adev, inst_idx, indirect);
+ vcn_v4_0_enable_ras(vinst, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
@@ -1050,8 +1083,13 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1079,6 +1117,11 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return 0;
}
@@ -1086,183 +1129,184 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
/**
* vcn_v4_0_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_start(struct amdgpu_device *adev)
+static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v4_0_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v4_0_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /*SW clock gating */
+ vcn_v4_0_disable_clock_gating(vinst);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
return 0;
}
@@ -1310,8 +1354,8 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1518,17 +1562,18 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
/**
* vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v4_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v4_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1543,92 +1588,101 @@ static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
}
/**
* vcn_v4_0_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
+ int r = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_stop_dpg_mode(vinst);
+ goto done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+ /* apply HW clock gating */
+ vcn_v4_0_enable_clock_gating(vinst);
- /* apply HW clock gating */
- vcn_v4_0_enable_clock_gating(adev, i);
+ /* enable VCN power gating */
+ vcn_v4_0_enable_static_power_gating(vinst);
- /* enable VCN power gating */
- vcn_v4_0_enable_static_power_gating(adev, i);
- }
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
return 0;
}
@@ -1636,15 +1690,16 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
/**
* vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1746,15 +1801,19 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
-
- /* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
- return -EINVAL;
+ struct dma_fence *fence;
/* if VCN0 is harvested, we can't support AV1 */
if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
return -EINVAL;
+ /* wait for all jobs to finish before switching to instance 0 */
+ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
[AMDGPU_RING_PRIO_0].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -1845,22 +1904,16 @@ out:
#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
-
#define RADEON_VCN_ENGINE_INFO (0x30000001)
-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
-
#define RENCODE_ENCODE_STANDARD_AV1 2
#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
-/* return the offset in ib if id is found, -1 otherwise
- * to speed up the searching we only search upto max_offset
- */
-static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
+/* return the offset in ib if id is found, -1 otherwise */
+static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int start)
{
int i;
- for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
+ for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 4) {
if (ib->ptr[i + 1] == id)
return i;
}
@@ -1875,42 +1928,56 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_vcn_decode_buffer *decode_buffer;
uint64_t addr;
uint32_t val;
- int idx;
+ int idx = 0, sidx;
/* The first instance can decode anything */
if (!ring->me)
return 0;
- /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
- idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
- RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
- if (idx < 0) /* engine info is missing */
- return 0;
-
- val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
-
- if (!(decode_buffer->valid_buf_flag & 0x1))
- return 0;
-
- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo;
- return vcn_v4_0_dec_msg(p, job, addr);
- } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
- idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
- RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
- if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
- return vcn_v4_0_limit_sched(p, job);
+ while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, idx)) >= 0) {
+ val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+ if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
+
+ if (!(decode_buffer->valid_buf_flag & 0x1))
+ return 0;
+
+ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+ decode_buffer->msg_buffer_address_lo;
+ return vcn_v4_0_dec_msg(p, job, addr);
+ } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+ sidx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, idx);
+ if (sidx >= 0 && ib->ptr[sidx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+ return vcn_v4_0_limit_sched(p, job);
+ }
+ idx += ib->ptr[idx] / 4;
}
return 0;
}
+static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
+ .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
@@ -1935,6 +2002,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_ring_reset,
};
/**
@@ -1964,13 +2032,13 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_is_idle(void *handle)
+static bool vcn_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2024,54 +2092,48 @@ static int vcn_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_enable_clock_gating(adev, i);
+ vcn_v4_0_enable_clock_gating(vinst);
} else {
- vcn_v4_0_disable_clock_gating(adev, i);
+ vcn_v4_0_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_stop(adev);
+ ret = vcn_v4_0_stop(vinst);
else
- ret = vcn_v4_0_start(adev);
+ ret = vcn_v4_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -2163,75 +2225,14 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
- adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
}
}
-static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2244,9 +2245,9 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.is_idle = vcn_v4_0_is_idle,
.wait_for_idle = vcn_v4_0_wait_for_idle,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_set_powergating_state,
- .dump_ip_state = vcn_v4_0_dump_ip_state,
- .print_ip_state = vcn_v4_0_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index ecdc027f8220..cb7123ec1a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -31,6 +31,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "vcn_v4_0_3.h"
#include "mmsch_v4_0_3.h"
#include "vcn/vcn_4_0_3_offset.h"
@@ -44,6 +45,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
@@ -87,10 +89,10 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
@@ -98,8 +100,7 @@ static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
{
- return (amdgpu_sriov_vf(adev) ||
- (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)));
+ return (adev->vcn.caps & AMDGPU_VCN_CAPS(RRMT_ENABLED)) == 0;
}
/**
@@ -112,15 +113,38 @@ static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_3_set_unified_ring_funcs(adev);
vcn_v4_0_3_set_irq_funcs(adev);
vcn_v4_0_3_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_3_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ return 0;
}
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
@@ -149,18 +173,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t *ptr;
-
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -168,7 +180,22 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -185,18 +212,21 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+
+ /* There are no per-instance irq source IDs on 4.0.3, the IH
+ * packets use a separate field to differentiate instances.
+ */
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0,
AMDGPU_RING_PRIO_DEFAULT,
&adev->vcn.inst[i].sched_score);
if (r)
return r;
vcn_v4_0_3_fw_shared_init(adev, i);
- }
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
+ }
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -204,9 +234,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
-
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
r = amdgpu_vcn_ras_sw_init(adev);
if (r) {
@@ -215,20 +242,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-
- r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_3, ARRAY_SIZE(vcn_reg_list_4_0_3));
if (r)
return r;
- return 0;
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
/**
@@ -245,7 +263,7 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(&adev->ddev, &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -257,16 +275,43 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
- return r;
+ return 0;
+}
+
+static int vcn_v4_0_3_hw_init_inst(struct amdgpu_vcn_inst *vinst)
+{
+ int vcn_inst;
+ struct amdgpu_device *adev = vinst->adev;
+ struct amdgpu_ring *ring;
+ int inst_idx = vinst->inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ if (ring->use_doorbell) {
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst,
+ adev->vcn.inst[inst_idx].aid_id);
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+ }
+
+ return 0;
}
/**
@@ -280,7 +325,8 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ struct amdgpu_vcn_inst *vinst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_3_start_sriov(adev);
@@ -295,31 +341,17 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
ring->sched.ready = true;
}
} else {
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) &
+ 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn4_fw_shared *fw_shared;
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell) {
- adev->nbio.funcs->vcn_doorbell_range(
- adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst,
- adev->vcn.inst[i].aid_id);
-
- WREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL,
- ring->doorbell_index
- << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL);
- }
+ vinst = &adev->vcn.inst[i];
+ vcn_v4_0_3_hw_init_inst(vinst);
/* Re-init fw_shared when RAS fatal error occurred */
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
@@ -345,11 +377,19 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
- if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
- vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ cancel_delayed_work_sync(&vinst->idle_work);
+
+ if (vinst->cur_state != AMD_PG_STATE_GATE)
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
+ }
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
+ amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
}
@@ -363,15 +403,20 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -383,11 +428,14 @@ static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_3_hw_init(ip_block);
@@ -397,13 +445,14 @@ static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_3_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
@@ -471,14 +520,16 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -585,13 +636,14 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t data;
int vcn_inst;
@@ -678,16 +730,18 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -729,13 +783,14 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t data;
int vcn_inst;
@@ -780,18 +835,20 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_
/**
* vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -815,7 +872,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
}
/* enable clock gating */
- vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_3_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -865,7 +922,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_3_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -884,8 +941,13 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -917,6 +979,11 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
/*resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -945,8 +1012,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_3_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1112,191 +1179,185 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
/**
* vcn_v4_0_3_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_3_start(struct amdgpu_device *adev)
+static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
- int i, j, k, r, vcn_inst;
+ int j, k, r, vcn_inst;
uint32_t tmp;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ vcn_inst = GET_INST(VCN, i);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
+ UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
- vcn_inst = GET_INST(VCN, i);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
- UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_3_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK,
- ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
- tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_3_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* SW clock gating */
+ vcn_v4_0_3_disable_clock_gating(vinst);
- for (j = 0; j < 10; ++j) {
- uint32_t status;
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK,
+ ~UVD_VCPU_CNTL__CLK_EN_MASK);
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, vcn_inst,
- regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- DRM_DEV_ERROR(adev->dev,
- "VCN decode not responding, trying to reset the VCPU!!!\n");
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
- regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
- regUVD_VCPU_CNTL),
- 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
+ tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_3_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst,
+ regUVD_STATUS);
+ if (status & 2)
+ break;
mdelay(10);
- r = -1;
}
+ r = 0;
+ if (status & 2)
+ break;
- if (r) {
- DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
- return r;
- }
+ DRM_DEV_ERROR(adev->dev,
+ "VCN decode not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
+ regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
+ regUVD_VCPU_CNTL),
+ 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ mdelay(10);
+ r = -1;
+ }
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
+ return r;
+ }
- ring = &adev->vcn.inst[i].ring_enc[0];
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* program the RB_BASE for ring buffer */
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
- upper_32_bits(ring->gpu_addr));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
- ring->ring_size / sizeof(uint32_t));
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- /* resetting ring, fw should not check RB ring */
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* program the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
+ upper_32_bits(ring->gpu_addr));
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
+ ring->ring_size / sizeof(uint32_t));
- /* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+ /* resetting ring, fw should not check RB ring */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB_EN_MASK;
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
- ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
- fw_shared->sq.queue_mode &=
- cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ fw_shared->sq.queue_mode &=
+ cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
- }
return 0;
}
/**
* vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
@@ -1316,108 +1377,114 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
/**
* vcn_v4_0_3_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- int i, r = 0, vcn_inst;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ int r = 0, vcn_inst;
uint32_t tmp;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
+ vcn_inst = GET_INST(VCN, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_3_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_3_stop_dpg_mode(vinst);
+ goto Done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
- UVD_STATUS__IDLE, 0x7);
- if (r)
- goto Done;
-
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
- tmp);
- if (r)
- goto Done;
-
- /* stall UMC channel */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
- tmp);
- if (r)
- goto Done;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
+ UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto Done;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
+ tmp);
+ if (r)
+ goto Done;
+
+ /* stall UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
+ tmp);
+ if (r)
+ goto Done;
- /* Unblock VCPU Register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* Unblock VCPU Register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* reset LMI UMC/LMI/VCPU */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ /* reset LMI UMC/LMI/VCPU */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- /* clear VCN status */
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+ /* clear VCN status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v4_0_3_enable_clock_gating(adev, i);
- }
-Done:
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+ /* apply HW clock gating */
+ vcn_v4_0_3_enable_clock_gating(vinst);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+Done:
return 0;
}
/**
* vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
return 0;
@@ -1461,8 +1528,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
regUVD_RB_WPTR);
}
-static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
+void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
{
/* Use normalized offsets when required */
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
@@ -1474,7 +1541,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t
amdgpu_ring_write(ring, val);
}
-static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val)
{
/* Use normalized offsets when required */
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
@@ -1485,8 +1553,8 @@ static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg
amdgpu_ring_write(ring, val);
}
-static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vmid, uint64_t pd_addr)
+void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
@@ -1498,7 +1566,7 @@ static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
lower_32_bits(pd_addr), 0xffffffff);
}
-static void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* VCN engine access for HDP flush doesn't work when RRMT is enabled.
* This is a workaround to avoid any HDP flush through VCN ring.
@@ -1528,6 +1596,34 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ vcn_v4_0_3_hw_init_inst(vinst);
+ vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1556,6 +1652,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_3_ring_reset,
};
/**
@@ -1581,13 +1678,13 @@ static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_3_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_3_is_idle(void *handle)
+static bool vcn_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1635,51 +1732,45 @@ static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (enable) {
if (RREG32_SOC15(VCN, GET_INST(VCN, i),
regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_3_enable_clock_gating(adev, i);
+ vcn_v4_0_3_enable_clock_gating(vinst);
} else {
- vcn_v4_0_3_disable_clock_gating(adev, i);
+ vcn_v4_0_3_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_3_stop(adev);
+ ret = vcn_v4_0_3_stop(vinst);
else
- ret = vcn_v4_0_3_start(adev);
+ ret = vcn_v4_0_3_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1745,11 +1836,24 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
.set = vcn_v4_0_3_set_interrupt_state,
.process = vcn_v4_0_3_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = {
+ .set = vcn_v4_0_3_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
/**
* vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
*
@@ -1765,73 +1869,15 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.num_types++;
}
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
-}
-
-static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t inst_off, is_powered;
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off, inst_id;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_id = GET_INST(VCN, i);
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
- inst_id));
- }
+ adev->vcn.inst->ras_poison_irq.num_types = 1;
+ adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
+ .late_init = vcn_v4_0_3_late_init,
.sw_init = vcn_v4_0_3_sw_init,
.sw_fini = vcn_v4_0_3_sw_fini,
.hw_init = vcn_v4_0_3_hw_init,
@@ -1841,9 +1887,9 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.is_idle = vcn_v4_0_3_is_idle,
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_3_set_powergating_state,
- .dump_ip_state = vcn_v4_0_3_dump_ip_state,
- .print_ip_state = vcn_v4_0_3_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
@@ -1912,9 +1958,44 @@ static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
}
+static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_VCN_V4_0_3_VCPU_VCODEC:
+ reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst, sub;
+ uint32_t poison_stat = 0;
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+ for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ vcn_v4_0_3_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
.query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
+ .query_poison_status = vcn_v4_0_3_query_poison_status,
};
static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@@ -1931,11 +2012,13 @@ static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
@@ -1988,6 +2071,13 @@ static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
return r;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
&vcn_v4_0_3_aca_info, NULL);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
index 0b046114373a..aeab89853a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
@@ -24,6 +24,21 @@
#ifndef __VCN_V4_0_3_H__
#define __VCN_V4_0_3_H__
+enum amdgpu_vcn_v4_0_3_sub_block {
+ AMDGPU_VCN_V4_0_3_VCPU_VCODEC = 0,
+
+ AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
+void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+
+void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val);
+void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr);
+void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring);
+
#endif /* __VCN_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 23d3c16c9d9f..b107ee80e472 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
+#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000)
#define VCN_HARVEST_MMSCH 0
@@ -95,10 +96,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -112,13 +113,26 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
+
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
+ adev->vcn.per_inst_fw = true;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_5_set_unified_ring_funcs(adev);
vcn_v4_0_5_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_5_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
/**
@@ -133,25 +147,23 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t *ptr;
-
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
@@ -170,7 +182,7 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- i * (adev->vcn.num_enc_rings + 1) + 1;
+ i * (adev->vcn.inst[i].num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
2 + 8 * i;
@@ -193,28 +205,34 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+ fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+ fw_shared->drm_key_wa.method =
+ AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
}
+ adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_5, ARRAY_SIZE(vcn_reg_list_4_0_5));
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
- return 0;
+ return r;
}
/**
@@ -231,7 +249,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -247,15 +265,15 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
-
- r = amdgpu_vcn_sw_fini(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
- kfree(adev->vcn.ip_dump);
+ amdgpu_vcn_sw_fini(adev, i);
+ }
- return r;
+ return 0;
}
/**
@@ -300,16 +318,19 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -326,13 +347,18 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -346,11 +372,14 @@ static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_5_hw_init(ip_block);
@@ -360,13 +389,14 @@ static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_5_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -420,14 +450,16 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_5_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -534,13 +566,14 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v4_0_5_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -593,13 +626,14 @@ static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, i
/**
* vcn_v4_0_5_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -635,13 +669,14 @@ static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, in
/**
* vcn_v4_0_5_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -746,16 +781,18 @@ static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v4_0_5_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -803,13 +840,14 @@ static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v4_0_5_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -862,17 +900,20 @@ static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_5_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -888,7 +929,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v4_0_5_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_5_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -936,7 +977,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_5_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -953,8 +994,13 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -982,6 +1028,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
+
return 0;
}
@@ -989,184 +1039,184 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
/**
* vcn_v4_0_5_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_5_start(struct amdgpu_device *adev)
+static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v4_0_5_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_5_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_5_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v4_0_5_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /* SW clock gating */
+ vcn_v4_0_5_disable_clock_gating(vinst);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_5_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
+
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
return 0;
}
@@ -1174,13 +1224,14 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
/**
* vcn_v4_0_5_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
/* Wait for power status to be 1 */
@@ -1197,108 +1248,119 @@ static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
}
/**
* vcn_v4_0_5_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
+ int r = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_5_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_5_stop_dpg_mode(vinst);
+ r = 0;
+ goto done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+ /* apply HW clock gating */
+ vcn_v4_0_5_enable_clock_gating(vinst);
- /* apply HW clock gating */
- vcn_v4_0_5_enable_clock_gating(adev, i);
+ /* enable VCN power gating */
+ vcn_v4_0_5_enable_static_power_gating(vinst);
- /* enable VCN power gating */
- vcn_v4_0_5_enable_static_power_gating(adev, i);
- }
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
- return 0;
+ return r;
}
/**
* vcn_v4_0_5_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1397,7 +1459,25 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
+static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_5_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_5_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
+static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
@@ -1424,6 +1504,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_5_ring_reset,
};
/**
@@ -1441,6 +1522,9 @@ static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5))
+ vcn_v4_0_5_unified_ring_vm_funcs.secure_submission_supported = true;
+
adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_5_unified_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[0].me = i;
}
@@ -1449,13 +1533,13 @@ static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_5_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_5_is_idle(void *handle)
+static bool vcn_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1505,49 +1589,42 @@ static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_5_enable_clock_gating(adev, i);
+ vcn_v4_0_5_enable_clock_gating(vinst);
} else {
- vcn_v4_0_5_disable_clock_gating(adev, i);
+ vcn_v4_0_5_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_5_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_5_stop(adev);
+ ret = vcn_v4_0_5_stop(vinst);
else
- ret = vcn_v4_0_5_start(adev);
+ ret = vcn_v4_0_5_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1615,72 +1692,11 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_5_irq_funcs;
}
}
-static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1693,9 +1709,9 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.is_idle = vcn_v4_0_5_is_idle,
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_5_set_powergating_state,
- .dump_ip_state = vcn_v4_0_5_dump_ip_state,
- .print_ip_state = vcn_v4_0_5_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index b6d78381ebfb..0202df5db1e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -78,10 +78,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -95,29 +95,24 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v5_0_0_set_unified_ring_funcs(adev);
vcn_v5_0_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
-}
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v5_0_0_set_pg_state;
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
-{
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
-
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
}
+
+ return 0;
}
/**
@@ -133,22 +128,22 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
@@ -181,16 +176,19 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
-
- vcn_v5_0_0_alloc_ip_dump(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -213,7 +211,7 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -226,16 +224,18 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
- return r;
+ return 0;
}
/**
@@ -280,16 +280,19 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -306,13 +309,18 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -326,11 +334,14 @@ static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v5_0_0_hw_init(ip_block);
@@ -340,13 +351,14 @@ static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v5_0_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -400,14 +412,16 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -502,7 +516,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
return;
}
@@ -510,13 +525,14 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -577,13 +593,14 @@ static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, i
/**
* vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -623,12 +640,11 @@ static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, in
/**
* vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
return;
}
@@ -637,15 +653,15 @@ static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
return;
}
@@ -654,12 +670,11 @@ static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
return;
}
@@ -667,17 +682,20 @@ static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
- volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -714,7 +732,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
- vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v5_0_0_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -731,8 +749,13 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -760,161 +783,167 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return 0;
}
/**
* vcn_v5_0_0_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v5_0_0_start(struct amdgpu_device *adev)
+static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v5_0_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v5_0_0_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- vcn_v5_0_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v5_0_0_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_0_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
return 0;
}
@@ -922,17 +951,18 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev)
/**
* vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v5_0_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
@@ -946,106 +976,117 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return;
}
/**
* vcn_v5_0_0_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
+static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
+ int r = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v5_0_0_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_0_stop_dpg_mode(vinst);
+ r = 0;
+ goto done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
-
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
-
- /* enable VCN power gating */
- vcn_v5_0_0_enable_static_power_gating(adev, i);
- }
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- return 0;
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+
+ /* enable VCN power gating */
+ vcn_v5_0_0_enable_static_power_gating(vinst);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+
+ return r;
}
/**
* vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1140,6 +1181,24 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v5_0_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v5_0_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1167,6 +1226,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_0_ring_reset,
};
/**
@@ -1192,13 +1252,13 @@ static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v5_0_0_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v5_0_0_is_idle(void *handle)
+static bool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1248,49 +1308,42 @@ static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v5_0_0_enable_clock_gating(adev, i);
+ vcn_v5_0_0_enable_clock_gating(vinst);
} else {
- vcn_v5_0_0_disable_clock_gating(adev, i);
+ vcn_v5_0_0_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v5_0_0_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v5_0_0_stop(adev);
+ ret = vcn_v5_0_0_stop(vinst);
else
- ret = vcn_v5_0_0_start(adev);
+ ret = vcn_v5_0_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1358,72 +1411,11 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
}
}
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1436,9 +1428,9 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.is_idle = vcn_v5_0_0_is_idle,
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
- .set_powergating_state = vcn_v5_0_0_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index b8927652bc50..51bbccd4360f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,11 +32,6 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p);
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
-
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 8b463c977d08..8bd457dea4cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -29,6 +29,8 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "vcn_v4_0_3.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -38,12 +40,47 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0_1[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
+static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
-
+static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
/**
* vcn_v5_0_1_early_init - set function pointers and load microcode
*
@@ -55,14 +92,62 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v5_0_1_set_unified_ring_funcs(adev);
vcn_v5_0_1_set_irq_funcs(adev);
+ vcn_v5_0_1_set_ras_funcs(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ if ((adev->psp.sos.fw_version >= 0x00450025) &&
+ amdgpu_dpm_reset_vcn_is_supported(adev) &&
+ !amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
- return amdgpu_vcn_early_init(adev);
+ return 0;
+}
+
+static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+
+ if (fw_shared->sq.is_enabled)
+ return;
+ fw_shared->present_flag_0 =
+ cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
}
/**
@@ -78,30 +163,39 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
if (r)
return r;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
vcn_inst = GET_INST(VCN, i);
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 32 * vcn_inst;
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
@@ -111,19 +205,26 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = true;
+ vcn_v5_0_1_fw_shared_init(adev, i);
+ }
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
+ return r;
+ }
+ }
- vcn_v5_0_0_alloc_ip_dump(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0_1, ARRAY_SIZE(vcn_reg_list_5_0_1));
+ if (r)
+ return r;
return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
@@ -142,7 +243,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -152,17 +253,38 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_sw_fini(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
- return r;
+ return 0;
+}
+
+static int vcn_v5_0_1_hw_init_inst(struct amdgpu_device *adev, int i)
+{
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
+
+ return 0;
}
/**
@@ -176,21 +298,34 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
- ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst),
- adev->vcn.inst[i].aid_id);
+ int i, r;
- r = amdgpu_ring_test_helper(ring);
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v5_0_1_start_sriov(adev);
if (r)
return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v5_0_1_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ } else {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ vcn_v5_0_1_hw_init_inst(adev, i);
+
+ /* Re-init fw_shared, if required */
+ vcn_v5_0_1_fw_shared_init(adev, i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
return 0;
@@ -206,8 +341,18 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
+ if (vinst->cur_state != AMD_PG_STATE_GATE)
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
+ }
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
+ amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
}
@@ -222,13 +367,17 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int r;
+ int r, i;
r = vcn_v5_0_1_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -243,11 +392,18 @@ static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int r;
+ int r, i;
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ if (amdgpu_in_reset(adev))
+ vinst->cur_state = AMD_PG_STATE_GATE;
+
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v5_0_1_hw_init(ip_block);
@@ -257,13 +413,14 @@ static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v5_0_1_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
@@ -313,20 +470,22 @@ static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
}
/**
* vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -421,7 +580,7 @@ static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
@@ -431,42 +590,89 @@ static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
}
/**
* vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
}
/**
+ * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode
+ *
+ * @vinst: VCN instance
+ * @new_state: pause state
+ *
+ * Pause dpg mode for VCN block
+ */
+static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
+{
+ struct amdgpu_device *adev = vinst->adev;
+ uint32_t reg_data = 0;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, vinst->inst);
+
+ /* pause/unpause if state is changed */
+ if (vinst->pause_state.fw_based != new_state->fw_based) {
+ DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n",
+ vinst->pause_state.fw_based, new_state->fw_based,
+ new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE");
+ reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ /* pause DPG */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
+
+ /* wait for ACK */
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+ } else {
+ /* unpause DPG, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
+ }
+ vinst->pause_state.fw_based = new_state->fw_based;
+ }
+
+ return 0;
+}
+
+
+/**
* vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
- int vcn_inst;
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -510,7 +716,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
- vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -527,8 +733,19 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* resetting ring, fw should not check RB ring */
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
+ /* Pause dpg */
+ vcn_v5_0_1_pause_dpg_mode(vinst, &state);
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -539,7 +756,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
@@ -550,6 +767,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
@@ -561,156 +779,345 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
return 0;
}
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ vcn_inst = GET_INST(VCN, i);
+
+ vcn_v5_0_1_fw_shared_init(adev, vcn_inst);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ table_size = 0;
+
+ MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+
+ offset = 0;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET1), 0);
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET2), 0);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ MMSCH_V5_0_INSERT_END();
+
+ header.vcn0.init_status = 0;
+ header.vcn0.table_offset = header.total_size;
+ header.vcn0.table_size = table_size;
+ header.total_size += table_size;
+
+ /* Send init table to mmsch */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+ }
+ }
+
+ return 0;
+}
+
/**
* vcn_v5_0_1_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v5_0_1_start(struct amdgpu_device *adev)
+static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r, vcn_inst;
+ int j, k, r, vcn_inst;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v5_0_1_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v5_0_1_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ vcn_inst = GET_INST(VCN, i);
- vcn_inst = GET_INST(VCN, i);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- vcn_v5_0_1_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(100);
- if (amdgpu_emu_mode == 1)
- msleep(20);
- }
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ vcn_v5_0_1_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(100);
+ if (amdgpu_emu_mode == 1)
+ msleep(20);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
+ ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
return 0;
}
@@ -718,18 +1125,23 @@ static int vcn_v5_0_1_start(struct amdgpu_device *adev)
/**
* vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
vcn_inst = GET_INST(VCN, inst_idx);
+ /* Unpause dpg */
+ vcn_v5_0_1_pause_dpg_mode(vinst, &state);
+
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -741,83 +1153,90 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
}
/**
* vcn_v5_0_1_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v5_0_1_stop(struct amdgpu_device *adev)
+static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0, vcn_inst;
+ int r = 0, vcn_inst;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
+ vcn_inst = GET_INST(VCN, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v5_0_1_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_1_stop_dpg_mode(vinst);
+ return 0;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ return r;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
-
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* clear status */
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
- }
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ /* clear status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
return 0;
}
@@ -882,6 +1301,31 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ vcn_v5_0_1_hw_init_inst(adev, ring->me);
+ vcn_v5_0_1_start_dpg_mode(vinst, vinst->indirect_sram);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -889,16 +1333,17 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
.get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
.set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
- 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
- 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
- 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 +
+ 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
.emit_ib = vcn_v2_0_enc_ring_emit_ib,
.emit_fence = vcn_v2_0_enc_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
+ .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_vcn_enc_ring_test_ring,
.test_ib = amdgpu_vcn_unified_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -906,9 +1351,10 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_1_ring_reset,
};
/**
@@ -933,13 +1379,13 @@ static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v5_0_1_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v5_0_1_is_idle(void *handle)
+static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
@@ -986,42 +1432,45 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (enable) {
if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v5_0_1_enable_clock_gating(adev, i);
+ vcn_v5_0_1_enable_clock_gating(vinst);
} else {
- vcn_v5_0_1_disable_clock_gating(adev, i);
+ vcn_v5_0_1_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v5_0_1_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
+
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v5_0_1_stop(adev);
+ ret = vcn_v5_0_1_stop(vinst);
else
- ret = vcn_v5_0_1_start(adev);
+ ret = vcn_v5_0_1_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1067,10 +1516,24 @@ static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgp
return 0;
}
+static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
.process = vcn_v5_0_1_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = {
+ .set = vcn_v5_0_1_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
+
/**
* vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
*
@@ -1084,13 +1547,18 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
adev->vcn.inst->irq.num_types++;
+
adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
+
+ adev->vcn.inst->ras_poison_irq.num_types = 1;
+ adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs;
+
}
static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.name = "vcn_v5_0_1",
.early_init = vcn_v5_0_1_early_init,
- .late_init = NULL,
+ .late_init = vcn_v5_0_1_late_init,
.sw_init = vcn_v5_0_1_sw_init,
.sw_fini = vcn_v5_0_1_sw_fini,
.hw_init = vcn_v5_0_1_hw_init,
@@ -1104,9 +1572,9 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
- .set_powergating_state = vcn_v5_0_1_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .set_powergating_state = vcn_set_powergating_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
@@ -1116,3 +1584,146 @@ const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
.rev = 1,
.funcs = &vcn_v5_0_1_ip_funcs,
};
+
+static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_VCN_V5_0_1_VCPU_VCODEC:
+ reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst, sub;
+ uint32_t poison_stat = 0;
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+ for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ vcn_v5_0_1_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = {
+ .query_poison_status = vcn_v5_0_1_query_poison_status,
+};
+
+static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int vcn_v5_0_1_err_codes[] = {
+ 14, 15, 47, /* VCN [D|V|S] */
+};
+
+static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ vcn_v5_0_1_err_codes,
+ ARRAY_SIZE(vcn_v5_0_1_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = {
+ .aca_bank_parser = vcn_v5_0_1_aca_bank_parser,
+ .aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid,
+};
+
+static const struct aca_info vcn_v5_0_1_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &vcn_v5_0_1_aca_bank_ops,
+};
+
+static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
+ &vcn_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
+static struct amdgpu_vcn_ras vcn_v5_0_1_ras = {
+ .ras_block = {
+ .hw_ops = &vcn_v5_0_1_ras_hw_ops,
+ .ras_late_init = vcn_v5_0_1_ras_late_init,
+ },
+};
+
+static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->vcn.ras = &vcn_v5_0_1_ras;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
index 82ac709f44bf..b72e4da68317 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
@@ -24,6 +24,16 @@
#ifndef __VCN_v5_0_1_H__
#define __VCN_v5_0_1_H__
+#define regVCN_RRMT_CNTL 0x0940
+#define regVCN_RRMT_CNTL_BASE_IDX 1
+
+
+enum amdgpu_vcn_v5_0_1_sub_block {
+ AMDGPU_VCN_V5_0_1_VCPU_VCODEC = 0,
+
+ AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
#endif /* __VCN_v5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 98fc6941159e..eb16916c6473 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -555,7 +555,7 @@ static int vega10_ih_resume(struct amdgpu_ip_block *ip_block)
return vega10_ih_hw_init(ip_block);
}
-static bool vega10_ih_is_idle(void *handle)
+static bool vega10_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index e9e3b2ed4b7b..85846fd08ce4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -350,6 +350,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
if (!amdgpu_sriov_vf(adev))
@@ -437,7 +438,10 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
@@ -651,7 +655,7 @@ static int vega20_ih_resume(struct amdgpu_ip_block *ip_block)
return vega20_ih_hw_init(ip_block);
}
-static bool vega20_ih_is_idle(void *handle)
+static bool vega20_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 06615f160331..a611a7345125 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -67,7 +67,6 @@
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "iceland_ih.h"
#include "tonga_ih.h"
#include "cz_ih.h"
@@ -167,16 +166,16 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -188,9 +187,9 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
};
@@ -206,16 +205,16 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -227,9 +226,9 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
{
@@ -1736,7 +1735,7 @@ static int vi_common_resume(struct amdgpu_ip_block *ip_block)
return vi_common_hw_init(ip_block);
}
-static bool vi_common_is_idle(void *handle)
+static bool vi_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -1994,9 +1993,9 @@ static int vi_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void vi_common_get_clockgating_state(void *handle, u64 *flags)
+static void vi_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -2124,8 +2123,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
@@ -2142,8 +2139,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
@@ -2163,8 +2158,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index d3c3d3ab7225..16e12c9913f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -5,7 +5,7 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT))
select HMM_MIRROR
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 0d3d8972240d..0ce08113c9f0 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -27,7 +27,6 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_device.o \
$(AMDKFD_PATH)/kfd_chardev.o \
$(AMDKFD_PATH)/kfd_topology.o \
- $(AMDKFD_PATH)/kfd_pasid.o \
$(AMDKFD_PATH)/kfd_doorbell.o \
$(AMDKFD_PATH)/kfd_flat_memory.o \
$(AMDKFD_PATH)/kfd_process.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 795382b55e0a..73acbe0b7c21 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -91,7 +91,6 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
- unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
u32 pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
@@ -107,20 +106,26 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
kfd_signal_hw_exception_event(pasid);
else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_process_device *pdd = NULL;
struct kfd_vm_fault_info info;
+ struct kfd_process *p;
kfd_smi_event_update_vmfault(dev, pasid);
- kfd_dqm_evict_pasid(dev->dqm, pasid);
+ p = kfd_lookup_process_by_pasid(pasid, &pdd);
+ if (!pdd)
+ return;
+
+ kfd_evict_process_device(pdd);
memset(&info, 0, sizeof(info));
amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info);
- if (!info.page_addr && !info.status)
+ if (!info.page_addr && !info.status) {
+ kfd_unref_process(p);
return;
+ }
- if (info.vmid == vmid)
- kfd_signal_vm_fault_event(dev, pasid, &info, NULL);
- else
- kfd_signal_vm_fault_event(dev, pasid, NULL, NULL);
+ kfd_signal_vm_fault_event(pdd, &info, NULL);
+ kfd_unref_process(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 388b44ed5928..0320163b6e74 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -274,7 +274,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf820258,
+ 0xbf820001, 0xbf820259,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -390,141 +390,98 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000901,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840063, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf84005f, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
- 0xd2890000, 0x00000900,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
+ 0x00000903, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
- 0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb2a05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840063,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf84005f,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -544,137 +501,181 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200c7,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001e, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf840019,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2a05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xbf9c0000, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xbf8c0f70,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200c7, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001e,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf840019, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbefe00c1, 0xbeff00c1,
0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe0070,
- 0xbeff0071, 0x866f7bff,
- 0x000003ff, 0xb96f4803,
- 0x866f7bff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2a05,
- 0x806e816e, 0x8e6e8a6e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@@ -1302,7 +1303,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
- 0xbf820001, 0xbf8202d4,
+ 0xbf820001, 0xbf8202d5,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -1419,99 +1420,37 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -1530,31 +1469,50 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2a05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -1574,215 +1532,259 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbefc0080,
- 0xbf11017c, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850059,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbefc0080, 0xbf11017c,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850059, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xbe840080,
+ 0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffa9, 0xbf9c0000,
- 0xbf820016, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf8200e3,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001f, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf84001a,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x8078ff78,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xe0510000, 0x781d0000,
- 0xe0510100, 0x781d0000,
- 0x807cff7c, 0x00000200,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85fff6,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffa9,
+ 0xbf9c0000, 0xbf820016,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffeb, 0xbf9c0000,
+ 0xbf8200e3, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001f,
0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf84001a, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x806fff6f,
- 0x00008000, 0xbef80080,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbefc0080,
- 0xbf11087c, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbefc0080, 0xe0510000,
+ 0x781d0000, 0xe0510100,
+ 0x781d0000, 0x807cff7c,
+ 0x00000200, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2a05, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
- 0xbf820001, 0xbf8202df,
+ 0xbf820001, 0xbf8202e0,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -1899,99 +1901,37 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -2010,31 +1950,50 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2b05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -2054,51 +2013,31 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xb8fb2985,
- 0x807b817b, 0x8e7b837b,
- 0xb8fa2b05, 0x807a817a,
- 0x8e7a827a, 0x80fb7a7b,
- 0x867b7b7b, 0xbf84007a,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
0x807bff7b, 0x00001000,
- 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850059, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xbe840080,
+ 0xbf850051, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -2137,139 +2076,203 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffa9,
- 0xbf9c0000, 0xbf820016,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffeb, 0xbf9c0000,
- 0xbf8200ee, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x04000000, 0xbf84001f,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf84001a, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2b05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb8ef2985, 0x806f816f,
- 0x8e6f836f, 0xb8f92b05,
- 0x80798179, 0x8e798279,
- 0x80ef796f, 0x866f6f6f,
- 0xbf84001a, 0x806fff6f,
- 0x00008000, 0xbefc0080,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2985, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
@@ -3151,7 +3154,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
};
static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
- 0xbf820001, 0xbf8202db,
+ 0xbf820001, 0xbf8202dc,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -3266,99 +3269,37 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3377,31 +3318,50 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2b05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -3421,51 +3381,31 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xb8fb2985,
- 0x807b817b, 0x8e7b837b,
- 0xb8fa2b05, 0x807a817a,
- 0x8e7a827a, 0x80fb7a7b,
- 0x867b7b7b, 0xbf84007a,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
0x807bff7b, 0x00001000,
- 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850059, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xbe840080,
+ 0xbf850051, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3504,143 +3444,207 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffa9,
- 0xbf9c0000, 0xbf820016,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffeb, 0xbf9c0000,
- 0xbf8200ee, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x04000000, 0xbf84001f,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf84001a, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2b05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb8ef2985, 0x806f816f,
- 0x8e6f836f, 0xb8f92b05,
- 0x80798179, 0x8e798279,
- 0x80ef796f, 0x866f6f6f,
- 0xbf84001a, 0x806fff6f,
- 0x00008000, 0xbefc0080,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2985, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b79,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b79, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
- 0xbfa00001, 0xbfa0024b,
+ 0xbfa00001, 0xbfa002a2,
0xb0804009, 0xb8f8f804,
0x9178ff78, 0x00008c00,
0xb8fbf811, 0x8b6eff78,
@@ -3714,7 +3718,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00011677, 0xd7610000,
0x00011a79, 0xd7610000,
0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
+ 0x00011e7f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x00003fff, 0xbeff0080,
0xee0a407a, 0x000c0000,
0x00004000, 0xd760007a,
@@ -3751,38 +3763,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00000200, 0xbef600ff,
0x01000000, 0x7e000280,
0x7e020280, 0x7e040280,
- 0xbefd0080, 0xbe804ec2,
- 0xbf94fffe, 0xb8faf804,
- 0x8b7a847a, 0x91788478,
- 0x8c787a78, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xd7610002, 0x0000fa6c,
- 0x807d817d, 0x917aff6d,
- 0x80000000, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa6e,
- 0x807d817d, 0xd7610002,
- 0x0000fa6f, 0x807d817d,
- 0xd7610002, 0x0000fa78,
- 0x807d817d, 0xb8faf811,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa7b, 0x807d817d,
- 0xb8f1f801, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f814, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f815, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f812, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f813, 0xd7610002,
- 0x0000fa71, 0x807d817d,
+ 0xbe804ec2, 0xbf94fffe,
+ 0xb8faf804, 0x8b7a847a,
+ 0x91788478, 0x8c787a78,
+ 0x917aff6d, 0x80000000,
+ 0xd7610002, 0x00010071,
+ 0xd7610002, 0x0001026c,
+ 0xd7610002, 0x0001047a,
+ 0xd7610002, 0x0001066e,
+ 0xd7610002, 0x0001086f,
+ 0xd7610002, 0x00010a78,
+ 0xd7610002, 0x00010e7b,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xb8faf811, 0xd7610002,
+ 0x00010c7a, 0xb8faf801,
+ 0xd7610002, 0x0001107a,
+ 0xb8faf814, 0xd7610002,
+ 0x0001127a, 0xb8faf815,
+ 0xd7610002, 0x0001147a,
+ 0xb8faf812, 0xd7610002,
+ 0x0001167a, 0xb8faf813,
+ 0xd7610002, 0x0001187a,
0xb8faf802, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xbefa50c1, 0xbfc70000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xbefe00ff,
+ 0x00011a7a, 0xbefa50c1,
+ 0xbfc70000, 0xd7610002,
+ 0x00011c7a, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x0000ffff, 0xbeff0080,
0xc4068070, 0x008ce802,
0x00000000, 0xbefe00c1,
@@ -3797,354 +3817,380 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbe824102, 0xbe844104,
0xbe864106, 0xbe884108,
0xbe8a410a, 0xbe8c410c,
- 0xbe8e410e, 0xd7610002,
- 0x0000f200, 0x80798179,
- 0xd7610002, 0x0000f201,
- 0x80798179, 0xd7610002,
- 0x0000f202, 0x80798179,
- 0xd7610002, 0x0000f203,
- 0x80798179, 0xd7610002,
- 0x0000f204, 0x80798179,
- 0xd7610002, 0x0000f205,
- 0x80798179, 0xd7610002,
- 0x0000f206, 0x80798179,
- 0xd7610002, 0x0000f207,
- 0x80798179, 0xd7610002,
- 0x0000f208, 0x80798179,
- 0xd7610002, 0x0000f209,
- 0x80798179, 0xd7610002,
- 0x0000f20a, 0x80798179,
- 0xd7610002, 0x0000f20b,
- 0x80798179, 0xd7610002,
- 0x0000f20c, 0x80798179,
- 0xd7610002, 0x0000f20d,
- 0x80798179, 0xd7610002,
- 0x0000f20e, 0x80798179,
- 0xd7610002, 0x0000f20f,
- 0x80798179, 0xbf06a079,
- 0xbfa10007, 0xc4068070,
+ 0xbe8e410e, 0xbf068079,
+ 0xbfa10032, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd7610002,
+ 0x0001180c, 0xd7610002,
+ 0x00011a0d, 0xd7610002,
+ 0x00011c0e, 0xd7610002,
+ 0x00011e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xbfa00038, 0xd7610002,
+ 0x00012000, 0xd7610002,
+ 0x00012201, 0xd7610002,
+ 0x00012402, 0xd7610002,
+ 0x00012603, 0xd7610002,
+ 0x00012804, 0xd7610002,
+ 0x00012a05, 0xd7610002,
+ 0x00012c06, 0xd7610002,
+ 0x00012e07, 0xd7610002,
+ 0x00013008, 0xd7610002,
+ 0x00013209, 0xd7610002,
+ 0x0001340a, 0xd7610002,
+ 0x0001360b, 0xd7610002,
+ 0x0001380c, 0xd7610002,
+ 0x00013a0d, 0xd7610002,
+ 0x00013c0e, 0xd7610002,
+ 0x00013e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xc4068070, 0x008ce802,
+ 0x00000000, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ff88, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xc4068070,
0x008ce802, 0x00000000,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8fb4306, 0x8b7bc17b,
+ 0xbfa10044, 0x8b7aff6d,
+ 0x80000000, 0xbfa10041,
+ 0x847b897b, 0xbef6007b,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbb,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
- 0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
- 0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
- 0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
- 0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xc4068070, 0x008ce802,
- 0x00000000, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8fb4306,
- 0x8b7bc17b, 0xbfa10044,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10041, 0x847b897b,
- 0xbef6007b, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0x8070ff70,
- 0x00000200, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xd71f0000,
- 0x000100c1, 0xd7200000,
- 0x000200c1, 0x16000084,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa20013, 0xbe8300ff,
- 0x00000080, 0xbf800000,
- 0xbf800000, 0xbf800000,
- 0xd8d80000, 0x01000000,
- 0xbf8a0000, 0xc4068070,
- 0x008ce801, 0x00000000,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff3, 0xbfa00012,
- 0xbe8300ff, 0x00000100,
+ 0xbef600ff, 0x01000000,
+ 0xd71f0000, 0x000100c1,
+ 0xd7200000, 0x000200c1,
+ 0x16000084, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa20013,
+ 0xbe8300ff, 0x00000080,
0xbf800000, 0xbf800000,
0xbf800000, 0xd8d80000,
0x01000000, 0xbf8a0000,
0xc4068070, 0x008ce801,
0x00000000, 0x807d037d,
0x80700370, 0xd5250000,
- 0x0001ff00, 0x00000100,
+ 0x0001ff00, 0x00000080,
0xbf0a7b7d, 0xbfa2fff3,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20004, 0xbef000ff,
- 0x00000200, 0xbeff0080,
- 0xbfa00003, 0xbef000ff,
- 0x00000400, 0xbeff00c1,
- 0xb8fb3b05, 0x807b817b,
- 0x847b827b, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa2001b, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10040,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
- 0xc4068070, 0x008ce800,
- 0x00000000, 0xc4068070,
- 0x008ce801, 0x00008000,
- 0xc4068070, 0x008ce802,
- 0x00010000, 0xc4068070,
- 0x008ce803, 0x00018000,
- 0x807d847d, 0x8070ff70,
- 0x00000200, 0xbf0a7b7d,
- 0xbfa2ffeb, 0xbfa0002a,
+ 0xbfa00012, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8a0000, 0xc4068070,
+ 0x008ce801, 0x00000000,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7d,
+ 0xbfa2fff3, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20004,
+ 0xbef000ff, 0x00000200,
+ 0xbeff0080, 0xbfa00003,
+ 0xbef000ff, 0x00000400,
+ 0xbeff00c1, 0xb8fb3b05,
+ 0x807b817b, 0x847b827b,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa2001b,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10015, 0x7e008700,
+ 0xbfa10040, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xc4068070,
0x008ce800, 0x00000000,
0xc4068070, 0x008ce801,
- 0x00010000, 0xc4068070,
- 0x008ce802, 0x00020000,
+ 0x00008000, 0xc4068070,
+ 0x008ce802, 0x00010000,
0xc4068070, 0x008ce803,
- 0x00030000, 0x807d847d,
- 0x8070ff70, 0x00000400,
+ 0x00018000, 0x807d847d,
+ 0x8070ff70, 0x00000200,
0xbf0a7b7d, 0xbfa2ffeb,
- 0xb8fb1e06, 0x8b7bc17b,
- 0xbfa1000d, 0x847b837b,
- 0x807b7d7b, 0xbefe00c1,
- 0xbeff0080, 0x7e008700,
+ 0xbfa0002a, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10015,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xc4068070, 0x008ce800,
- 0x00000000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff7,
- 0xbfa0016e, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xbef1007f,
- 0xb8f20742, 0x84729972,
- 0x8b6eff7f, 0x04000000,
- 0xbfa1003b, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef4306,
- 0x8b6fc16f, 0xbfa10030,
- 0x846f896f, 0xbef6006f,
+ 0x00000000, 0xc4068070,
+ 0x008ce801, 0x00010000,
+ 0xc4068070, 0x008ce802,
+ 0x00020000, 0xc4068070,
+ 0x008ce803, 0x00030000,
+ 0x807d847d, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7d,
+ 0xbfa2ffeb, 0xb8fb1e06,
+ 0x8b7bc17b, 0xbfa1000d,
+ 0x847b837b, 0x807b7d7b,
+ 0xbefe00c1, 0xbeff0080,
+ 0x7e008700, 0xc4068070,
+ 0x008ce800, 0x00000000,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff7, 0xbfa0016e,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xbef1007f, 0xb8f20742,
+ 0x84729972, 0x8b6eff7f,
+ 0x04000000, 0xbfa1003b,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef4306, 0x8b6fc16f,
+ 0xbfa10030, 0x846f896f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000d,
+ 0xc4050078, 0x0080e800,
+ 0x00000000, 0xbf8a0000,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff4,
+ 0xbfa0000c, 0xc4050078,
+ 0x0080e800, 0x00000000,
+ 0xbf8a0000, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff4, 0xbef80080,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa2002c, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000200,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10061, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00008000, 0xc4050078,
+ 0x008ce802, 0x00010000,
+ 0xc4050078, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00008000, 0xc405006e,
+ 0x008ce802, 0x00010000,
+ 0xc405006e, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0xbfa0003d, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10016, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00010000, 0xc4050078,
+ 0x008ce802, 0x00020000,
+ 0xc4050078, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000f,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
+ 0xc4050078, 0x008ce800,
+ 0x00000000, 0xbf8a0000,
+ 0x7e008500, 0x807d817d,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff6,
+ 0xbeff00c1, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00010000, 0xc405006e,
+ 0x008ce802, 0x00020000,
+ 0xc405006e, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000d, 0xc4050078,
- 0x0080e800, 0x00000000,
- 0xbf8a0000, 0xdac00000,
- 0x00000000, 0x807dff7d,
- 0x00000080, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff4, 0xbfa0000c,
- 0xc4050078, 0x0080e800,
- 0x00000000, 0xbf8a0000,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff4,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa2002c,
+ 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10061,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00008000,
- 0xc4050078, 0x008ce802,
- 0x00010000, 0xc4050078,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
+ 0xbefd00ff, 0x0000006c,
+ 0x80f89078, 0xf462403a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd847d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0x80f8a078, 0xf462603a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd887d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0x80f8c078, 0xf462803a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd907d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0xbe884308, 0xbe8a430a,
+ 0xbe8c430c, 0xbe8e430e,
+ 0xbf06807d, 0xbfa1fff0,
+ 0xb980f801, 0x00000000,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00008000,
- 0xc405006e, 0x008ce802,
- 0x00010000, 0xc405006e,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0xbfa0003d,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10016,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00010000,
- 0xc4050078, 0x008ce802,
- 0x00020000, 0xc4050078,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000f, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xc4050078,
- 0x008ce800, 0x00000000,
- 0xbf8a0000, 0x7e008500,
- 0x807d817d, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff6, 0xbeff00c1,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00010000,
- 0xc405006e, 0x008ce802,
- 0x00020000, 0xc405006e,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef600ff,
- 0x01000000, 0xbefd00ff,
- 0x0000006c, 0x80f89078,
- 0xf462403a, 0xf0000000,
- 0xbf8a0000, 0x80fd847d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0x80f8a078,
- 0xf462603a, 0xf0000000,
- 0xbf8a0000, 0x80fd887d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0x80f8c078,
- 0xf462803a, 0xf0000000,
- 0xbf8a0000, 0x80fd907d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0xbe884308,
- 0xbe8a430a, 0xbe8c430c,
- 0xbe8e430e, 0xbf06807d,
- 0xbfa1fff0, 0xb980f801,
- 0x00000000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xbeff0071,
- 0xf4621bfa, 0xf0000000,
- 0x80788478, 0xf4621b3a,
+ 0xbeff0071, 0xf4621bfa,
0xf0000000, 0x80788478,
- 0xf4621b7a, 0xf0000000,
- 0x80788478, 0xf4621c3a,
+ 0xf4621b3a, 0xf0000000,
+ 0x80788478, 0xf4621b7a,
0xf0000000, 0x80788478,
- 0xf4621c7a, 0xf0000000,
- 0x80788478, 0xf4621eba,
+ 0xf4621c3a, 0xf0000000,
+ 0x80788478, 0xf4621c7a,
0xf0000000, 0x80788478,
- 0xf4621efa, 0xf0000000,
- 0x80788478, 0xf4621e7a,
+ 0xf4621eba, 0xf0000000,
+ 0x80788478, 0xf4621efa,
0xf0000000, 0x80788478,
- 0xf4621cfa, 0xf0000000,
- 0x80788478, 0xf4621bba,
+ 0xf4621e7a, 0xf0000000,
+ 0x80788478, 0xf4621cfa,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef814,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef815, 0xf4621bba,
+ 0xb96ef814, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef812,
+ 0xbf8a0000, 0xb96ef815,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef813, 0x8b6eff7f,
- 0x04000000, 0xbfa1000d,
- 0x80788478, 0xf4621bba,
+ 0xb96ef812, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xbf0d806e,
- 0xbfa10006, 0x856e906e,
- 0x8b6e6e6e, 0xbfa10003,
- 0xbe804ec1, 0x816ec16e,
- 0xbfa0fffb, 0xbefd006f,
- 0xbefe0070, 0xbeff0071,
- 0xb97b2011, 0x857b867b,
- 0xb97b0191, 0x857b827b,
- 0xb97bba11, 0xb973f801,
- 0xb8ee3b05, 0x806e816e,
- 0xbf0d9972, 0xbfa20002,
- 0x846e896e, 0xbfa00001,
- 0x846e8a6e, 0xb8ef1e06,
- 0x846f8a6f, 0x806e6f6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x8b6fff6f, 0x0000ffff,
- 0xf4605c37, 0xf8000050,
- 0xf4605d37, 0xf8000060,
- 0xf4601e77, 0xf8000074,
- 0xbf8a0000, 0x8b6dff6d,
- 0x0000ffff, 0x8bfe7e7e,
- 0x8bea6a6a, 0xb97af804,
+ 0xbf8a0000, 0xb96ef813,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1000d, 0x80788478,
+ 0xf4621bba, 0xf0000000,
+ 0x80788478, 0xbf8a0000,
+ 0xbf0d806e, 0xbfa10006,
+ 0x856e906e, 0x8b6e6e6e,
+ 0xbfa10003, 0xbe804ec1,
+ 0x816ec16e, 0xbfa0fffb,
+ 0xbefd006f, 0xbefe0070,
+ 0xbeff0071, 0xb97b2011,
+ 0x857b867b, 0xb97b0191,
+ 0x857b827b, 0xb97bba11,
+ 0xb973f801, 0xb8ee3b05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbfa20002, 0x846e896e,
+ 0xbfa00001, 0x846e8a6e,
+ 0xb8ef1e06, 0x846f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x8b6fff6f,
+ 0x0000ffff, 0xf4605c37,
+ 0xf8000050, 0xf4605d37,
+ 0xf8000060, 0xf4601e77,
+ 0xf8000074, 0xbf8a0000,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb97af804, 0xbe804ec2,
+ 0xbf94fffe, 0xbe804a6c,
0xbe804ec2, 0xbf94fffe,
- 0xbe804a6c, 0xbfb10000,
+ 0xbfb10000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
- 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
- 0xbf820001, 0xbf8202d8,
+ 0xbf820001, 0xbf8202ca,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
- 0xbf840008, 0xbf0d986d,
- 0xbf85001f, 0x866eff7b,
- 0x00000400, 0xbf850061,
- 0xbf8e0010, 0xb8fbf803,
- 0xbf82fffa, 0x866eff7b,
- 0x03800900, 0xbf850015,
- 0x866eff7b, 0x000071ff,
- 0xbf840008, 0x866fff7b,
- 0x00007080, 0xbf840001,
- 0xbeee1a87, 0xb8eff801,
- 0x8e6e8c6e, 0x866e6f6e,
- 0xbf85000a, 0xbf0d986d,
- 0xbf850003, 0x866eff6d,
- 0x00ff0000, 0xbf850005,
- 0xbf0d986d, 0xbf850004,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001a,
0x866eff7b, 0x00000400,
- 0xbf850046, 0xbeed1a9d,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x03c00900,
+ 0xbf850011, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf850006,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf85003a,
0xb8faf807, 0x867aff7a,
0x001f8000, 0x8e7a8b7a,
0x8979ff79, 0xfc000000,
@@ -4153,100 +4199,95 @@ static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
0xb8fbf813, 0x8efa887a,
0xbf0d8f7b, 0xbf840002,
0x877bff7b, 0xffff0000,
- 0xc0031cfd, 0x00000010,
- 0xc0071bbd, 0x00000000,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8979ff79, 0x00800000,
+ 0x87796e79, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
0xc0071ebd, 0x00000008,
- 0xbf8cc07f, 0x8e739773,
- 0x8979ff79, 0x01800000,
- 0x87797379, 0xbf0d986d,
- 0xbf840009, 0xbf0d9879,
- 0xbf850007, 0x896dff6d,
- 0x01ff0000, 0xba7f0583,
- 0x00000000, 0xbf0d9d6d,
- 0xbeed189d, 0xbf840012,
- 0xbef91898, 0xbeed189d,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0x866eff6d,
- 0x01ff0000, 0xbf850005,
- 0x8778ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xbf820005, 0x866eff6d,
- 0x01000000, 0xbf850002,
- 0x806c846c, 0x826d806d,
- 0x866dff6d, 0x0000ffff,
- 0x8f7a8b79, 0x867aff7a,
- 0x001f8000, 0xb97af807,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e8378, 0xb96ee0c2,
- 0xbf800002, 0xb9780002,
- 0xbe801f6c, 0x866dff6d,
- 0x0000ffff, 0xbefa0080,
- 0xb97a0283, 0xb8faf807,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f7a8b79,
0x867aff7a, 0x001f8000,
- 0x8e7a8b7a, 0x8979ff79,
- 0xfc000000, 0x87797a79,
- 0xba7ff807, 0x00000000,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2985, 0x807a817a,
- 0x8e7a8a7a, 0x8e7a817a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0xbef1007c,
- 0xbef00080, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0xb97af807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
+ 0x866dff6d, 0x0000ffff,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2985,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbf108080,
0x867aff7f, 0x04000000,
0xbeef0080, 0x876f6f7a,
0xb8f02985, 0x80708170,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 44772eec9ef4..96fbb16ceb21 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -34,41 +34,24 @@
* cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
* sp3 gfx11.sp3 -hex gfx11.hex
*
- * gfx12:
- * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx10.asm -P -o gfx12.sp3
- * sp3 gfx12.sp3 -hex gfx12.hex
*/
#define CHIP_NAVI10 26
#define CHIP_SIENNA_CICHLID 30
#define CHIP_PLUM_BONITO 36
-#define CHIP_GFX12 37
#define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
-#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO && ASIC_FAMILY < CHIP_GFX12)
+#define SW_SA_TRAP (ASIC_FAMILY == CHIP_PLUM_BONITO)
#define SAVE_AFTER_XNACK_ERROR (HAVE_XNACK && !NO_SQC_STORE) // workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
-#if ASIC_FAMILY < CHIP_GFX12
#define S_COHERENCE glc:1
#define V_COHERENCE slc:1 glc:1
#define S_WAITCNT_0 s_waitcnt 0
-#else
-#define S_COHERENCE scope:SCOPE_SYS
-#define V_COHERENCE scope:SCOPE_SYS
-#define S_WAITCNT_0 s_wait_idle
-
-#define HW_REG_SHADER_FLAT_SCRATCH_LO HW_REG_WAVE_SCRATCH_BASE_LO
-#define HW_REG_SHADER_FLAT_SCRATCH_HI HW_REG_WAVE_SCRATCH_BASE_HI
-#define HW_REG_GPR_ALLOC HW_REG_WAVE_GPR_ALLOC
-#define HW_REG_LDS_ALLOC HW_REG_WAVE_LDS_ALLOC
-#define HW_REG_MODE HW_REG_WAVE_MODE
-#endif
-#if ASIC_FAMILY < CHIP_GFX12
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
@@ -81,21 +64,6 @@ var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_E
var S_STATUS_HALT_MASK = SQ_WAVE_STATUS_HALT_MASK
var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000
var S_SAVE_PC_HI_HT_MASK = 0x01000000
-#else
-var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
-var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
-var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00
-var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000
-var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000
-var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15
-var SQ_WAVE_STATUS_WAVE64_SHIFT = 29
-var SQ_WAVE_STATUS_WAVE64_SIZE = 1
-var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9
-var S_STATUS_HWREG = HW_REG_WAVE_STATE_PRIV
-var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
-var S_STATUS_HALT_MASK = SQ_WAVE_STATE_PRIV_HALT_MASK
-var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000
-#endif
var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
@@ -110,7 +78,6 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
#endif
-#if ASIC_FAMILY < CHIP_GFX12
var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF
var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
@@ -161,39 +128,6 @@ var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
var S_TRAPSTS_HWREG = HW_REG_TRAPSTS
var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_TRAPSTS_SAVECTX_MASK
var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_TRAPSTS_SAVECTX_SHIFT
-#else
-var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF
-var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10
-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5
-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
-var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800
-var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80
-var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200
-
-var S_TRAPSTS_HWREG = HW_REG_WAVE_EXCP_FLAG_PRIV
-var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
-var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
-var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
-var S_TRAPSTS_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
-var S_TRAPSTS_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
-var S_TRAPSTS_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
-var S_TRAPSTS_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
-var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
-var BARRIER_STATE_SIGNAL_OFFSET = 16
-var BARRIER_STATE_VALID_OFFSET = 0
-#endif
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
@@ -305,11 +239,7 @@ L_TRAP_NO_BARRIER:
L_HALTED:
// Host trap may occur while wave is halted.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
-#else
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
-#endif
s_cbranch_scc1 L_FETCH_2ND_TRAP
L_CHECK_SAVE:
@@ -336,7 +266,6 @@ L_NOT_HALTED:
// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
// Maskable exceptions only cause the wave to enter the trap handler if
// their respective bit in mode.excp_en is set.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
s_cbranch_scc0 L_CHECK_TRAP_ID
@@ -349,17 +278,6 @@ L_NOT_ADDR_WATCH:
s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
s_and_b32 ttmp2, ttmp2, ttmp3
s_cbranch_scc1 L_FETCH_2ND_TRAP
-#else
- s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
- s_cbranch_scc0 L_NOT_ADDR_WATCH
- s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
-
-L_NOT_ADDR_WATCH:
- s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
- s_and_b32 ttmp2, ttmp3, ttmp2
- s_cbranch_scc1 L_FETCH_2ND_TRAP
-#endif
L_CHECK_TRAP_ID:
// Check trap_id != 0
@@ -369,13 +287,8 @@ L_CHECK_TRAP_ID:
#if SINGLE_STEP_MISSED_WORKAROUND
// Prioritize single step exception over context save.
// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
-#if ASIC_FAMILY < CHIP_GFX12
s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
-#else
- // WAVE_TRAP_CTRL is already in ttmp3.
- s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
-#endif
s_cbranch_scc1 L_FETCH_2ND_TRAP
#endif
@@ -425,12 +338,7 @@ L_NO_NEXT_TRAP:
s_cbranch_scc1 L_TRAP_CASE
// Host trap will not cause trap re-entry.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
-#else
- s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
-#endif
s_cbranch_scc1 L_EXIT_TRAP
s_or_b32 s_save_status, s_save_status, S_STATUS_HALT_MASK
@@ -457,16 +365,7 @@ L_EXIT_TRAP:
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
-#if ASIC_FAMILY < CHIP_GFX12
s_setreg_b32 hwreg(S_STATUS_HWREG), s_save_status
-#else
- // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
- // Only restore fields which the trap handler changes.
- s_lshr_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_SCC_SHIFT
- s_setreg_b32 hwreg(S_STATUS_HWREG, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
- SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_status
-#endif
-
s_rfe_b64 [ttmp0, ttmp1]
L_SAVE:
@@ -478,14 +377,6 @@ L_SAVE:
s_endpgm
L_HAVE_VGPRS:
#endif
-#if ASIC_FAMILY >= CHIP_GFX12
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
- s_cbranch_scc0 L_HAVE_VGPRS
- s_endpgm
-L_HAVE_VGPRS:
-#endif
-
s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
s_mov_b32 s_save_tmp, 0
s_setreg_b32 hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit
@@ -671,19 +562,6 @@ L_SAVE_HWREG:
s_mov_b32 m0, 0x0 //Next lane of v2 to write to
#endif
-#if ASIC_FAMILY >= CHIP_GFX12
- // Ensure no further changes to barrier or LDS state.
- // STATE_PRIV.BARRIER_COMPLETE may change up to this point.
- s_barrier_signal -2
- s_barrier_wait -2
-
- // Re-read final state of BARRIER_COMPLETE field for save.
- s_getreg_b32 s_save_tmp, hwreg(S_STATUS_HWREG)
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
- s_or_b32 s_save_status, s_save_status, s_save_tmp
-#endif
-
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
@@ -707,21 +585,6 @@ L_SAVE_HWREG:
s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-#if ASIC_FAMILY >= CHIP_GFX12
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
- write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_get_barrier_state s_save_tmp, -1
- s_wait_kmcnt (0)
- write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
-#endif
-
#if NO_SQC_STORE
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@@ -814,9 +677,7 @@ L_SAVE_LDS_NORMAL:
s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
-#if ASIC_FAMILY < CHIP_GFX12
s_barrier //LDS is used? wait for other waves in the same TG
-#endif
s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
s_cbranch_scc0 L_SAVE_LDS_DONE
@@ -1081,11 +942,6 @@ L_RESTORE:
s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
-#if ASIC_FAMILY >= CHIP_GFX12
- // Save s_restore_spi_init_hi for later use.
- s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
-#endif
-
//determine it is wave32 or wave64
get_wave_size2(s_restore_size)
@@ -1320,9 +1176,7 @@ L_RESTORE_SGPR:
// s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception.
// Clear DEBUG_EN before and restore MODE after the barrier.
s_setreg_imm32_b32 hwreg(HW_REG_MODE), 0
-#if ASIC_FAMILY < CHIP_GFX12
s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
-#endif
/* restore HW registers */
L_RESTORE_HWREG:
@@ -1334,11 +1188,6 @@ L_RESTORE_HWREG:
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
-#if ASIC_FAMILY >= CHIP_GFX12
- // Restore s_restore_spi_init_hi before the saved value gets clobbered.
- s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
-#endif
-
read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
@@ -1358,44 +1207,6 @@ L_RESTORE_HWREG:
s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
-#if ASIC_FAMILY >= CHIP_GFX12
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
- s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
-
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
- s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
-
- // Only the first wave needs to restore the workgroup barrier.
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
-
- // Skip over WAVE_STATUS, since there is no state to restore from it
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4
-
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
-
- s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
-
- // extract the saved signal count from s_restore_tmp
- s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
-
- // We need to call s_barrier_signal repeatedly to restore the signal
- // count of the work group barrier. The member count is already
- // initialized with the number of waves in the work group.
-L_BARRIER_RESTORE_LOOP:
- s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
- s_barrier_signal -1
- s_add_i32 s_restore_tmp, s_restore_tmp, -1
- s_branch L_BARRIER_RESTORE_LOOP
-
-L_SKIP_BARRIER_RESTORE:
-#endif
-
s_mov_b32 m0, s_restore_m0
s_mov_b32 exec_lo, s_restore_exec_lo
s_mov_b32 exec_hi, s_restore_exec_hi
@@ -1453,13 +1264,6 @@ L_RETURN_WITHOUT_PRIV:
s_setreg_b32 hwreg(S_STATUS_HWREG), s_restore_status // SCC is included, which is changed by previous salu
-#if ASIC_FAMILY >= CHIP_GFX12
- // Make barrier and LDS state visible to all waves in the group.
- // STATE_PRIV.BARRIER_COMPLETE may change after this point.
- s_barrier_signal -2
- s_barrier_wait -2
-#endif
-
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
@@ -1598,11 +1402,7 @@ function get_hwreg_size_bytes
end
function get_wave_size2(s_reg)
-#if ASIC_FAMILY < CHIP_GFX12
s_getreg_b32 s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
-#else
- s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
-#endif
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
new file mode 100644
index 000000000000..5a1a1b1f897f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -0,0 +1,1136 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* To compile this assembly code:
+ *
+ * gfx12:
+ * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx12.asm -P -o gfx12.sp3
+ * sp3 gfx12.sp3 -hex gfx12.hex
+ */
+
+#define CHIP_GFX12 37
+
+#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
+#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12)
+
+var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
+var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
+var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00
+var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000
+var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000
+var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15
+var SQ_WAVE_STATUS_WAVE64_SHIFT = 29
+var SQ_WAVE_STATUS_WAVE64_SIZE = 1
+var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24
+var SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
+var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
+var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9
+
+var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF
+var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10
+var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5
+var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
+var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
+var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
+var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800
+var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80
+var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200
+
+var SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT
+var BARRIER_STATE_SIGNAL_OFFSET = 16
+var BARRIER_STATE_VALID_OFFSET = 0
+
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
+
+// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
+// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000
+var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_SAVE_PC_HI_FIRST_WAVE_MASK = 0x80000000
+var S_SAVE_PC_HI_FIRST_WAVE_SHIFT = 31
+
+var s_sgpr_save_num = 108
+
+var s_save_spi_init_lo = exec_lo
+var s_save_spi_init_hi = exec_hi
+var s_save_pc_lo = ttmp0
+var s_save_pc_hi = ttmp1
+var s_save_exec_lo = ttmp2
+var s_save_exec_hi = ttmp3
+var s_save_state_priv = ttmp12
+var s_save_excp_flag_priv = ttmp15
+var s_save_xnack_mask = s_save_excp_flag_priv
+var s_wave_size = ttmp7
+var s_save_buf_rsrc0 = ttmp8
+var s_save_buf_rsrc1 = ttmp9
+var s_save_buf_rsrc2 = ttmp10
+var s_save_buf_rsrc3 = ttmp11
+var s_save_mem_offset = ttmp4
+var s_save_alloc_size = s_save_excp_flag_priv
+var s_save_tmp = ttmp14
+var s_save_m0 = ttmp5
+var s_save_ttmps_lo = s_save_tmp
+var s_save_ttmps_hi = s_save_excp_flag_priv
+
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+var S_WAVE_SIZE = 25
+
+var s_restore_spi_init_lo = exec_lo
+var s_restore_spi_init_hi = exec_hi
+var s_restore_mem_offset = ttmp12
+var s_restore_alloc_size = ttmp3
+var s_restore_tmp = ttmp2
+var s_restore_mem_offset_save = s_restore_tmp
+var s_restore_m0 = s_restore_alloc_size
+var s_restore_mode = ttmp7
+var s_restore_flat_scratch = s_restore_tmp
+var s_restore_pc_lo = ttmp0
+var s_restore_pc_hi = ttmp1
+var s_restore_exec_lo = ttmp4
+var s_restore_exec_hi = ttmp5
+var s_restore_state_priv = ttmp14
+var s_restore_excp_flag_priv = ttmp15
+var s_restore_xnack_mask = ttmp13
+var s_restore_buf_rsrc0 = ttmp8
+var s_restore_buf_rsrc1 = ttmp9
+var s_restore_buf_rsrc2 = ttmp10
+var s_restore_buf_rsrc3 = ttmp11
+var s_restore_size = ttmp6
+var s_restore_ttmps_lo = s_restore_tmp
+var s_restore_ttmps_hi = s_restore_alloc_size
+var s_restore_spi_init_hi_save = s_restore_exec_hi
+
+shader main
+ asic(DEFAULT)
+ type(CS)
+ wave_size(32)
+
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
+
+L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE
+
+L_SKIP_RESTORE:
+ s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC
+
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK
+
+ s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+
+ s_and_b32 ttmp2, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
+ s_cbranch_scc0 L_NOT_HALTED
+
+L_HALTED:
+ // Host trap may occur while wave is halted.
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_SAVE:
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+ s_cbranch_scc1 L_SAVE
+
+ // Wave is halted but neither host trap nor SAVECTX is raised.
+ // Caused by instruction fetch memory violation.
+ // Spin wait until context saved to prevent interrupt storm.
+ s_sleep 0x10
+ s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ s_branch L_CHECK_SAVE
+
+L_NOT_HALTED:
+ // Let second-level handle non-SAVECTX exception or trap.
+ // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+ // Check non-maskable exceptions. memory_violation, illegal_instruction
+ // and xnack_error exceptions always cause the wave to enter the trap
+ // handler.
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+ // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ // Maskable exceptions only cause the wave to enter the trap handler if
+ // their respective bit in mode.excp_en is set.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ s_and_b32 ttmp3, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
+ s_cbranch_scc0 L_NOT_ADDR_WATCH
+ s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
+
+L_NOT_ADDR_WATCH:
+ s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ s_and_b32 ttmp2, ttmp3, ttmp2
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+ // Check trap_id != 0
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+#if SINGLE_STEP_MISSED_WORKAROUND
+ // Prioritize single step exception over context save.
+ // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+ // WAVE_TRAP_CTRL is already in ttmp3.
+ s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+#endif
+
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+ s_cbranch_scc1 L_SAVE
+
+L_FETCH_2ND_TRAP:
+ // Read second-level TBA/TMA from first-level TMA and jump if available.
+ // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
+ // ttmp12 holds SQ_WAVE_STATUS
+ s_sendmsg_rtn_b64 [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
+ s_wait_idle
+ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+ s_bitcmp1_b32 ttmp15, 0xF
+ s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA
+ s_or_b32 ttmp15, ttmp15, 0xFFFF0000
+L_NO_SIGN_EXTEND_TMA:
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag
+ s_wait_idle
+ s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 ttmp11, ttmp11, ttmp2
+
+ s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 scope:SCOPE_SYS // second-level TBA
+ s_wait_idle
+ s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 scope:SCOPE_SYS // second-level TMA
+ s_wait_idle
+
+ s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
+ s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
+ s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
+
+L_NO_NEXT_TRAP:
+ // If not caused by trap then halt wave to prevent re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_TRAP_CASE
+
+ // Host trap will not cause trap re-entry.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+ s_cbranch_scc1 L_EXIT_TRAP
+ s_or_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
+
+ // If the PC points to S_ENDPGM then context save will fail if STATE_PRIV.HALT is set.
+ // Rewind the PC to prevent this from occurring.
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+ s_branch L_EXIT_TRAP
+
+L_TRAP_CASE:
+ // Advance past trap instruction to prevent re-entry.
+ s_add_u32 ttmp0, ttmp0, 0x4
+ s_addc_u32 ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+
+ // Restore SQ_WAVE_STATUS.
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+ // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
+ // Only restore fields which the trap handler changes.
+ s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
+ SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
+
+ s_rfe_b64 [ttmp0, ttmp1]
+
+L_SAVE:
+ // If VGPRs have been deallocated then terminate the wavefront.
+ // It has no remaining program to run and cannot save without VGPRs.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+ s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
+ s_cbranch_scc0 L_HAVE_VGPRS
+ s_endpgm
+L_HAVE_VGPRS:
+
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_mov_b32 s_save_tmp, 0
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+ /* inform SPI the readiness and wait for SPI's go signal */
+ s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+ s_sendmsg_rtn_b64 [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
+ s_wait_idle
+
+ // Save first_wave flag so we can clear high bits of save address.
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_lshl_b32 s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+
+ // Trap temporaries must be saved via VGPR but all VGPRs are in use.
+ // There is no ttmp space to hold the resource constant for VGPR save.
+ // Save v0 by itself since it requires only two SGPRs.
+ s_mov_b32 s_save_ttmps_lo, exec_lo
+ s_and_b32 s_save_ttmps_hi, exec_hi, 0xFFFF
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] scope:SCOPE_SYS
+ v_mov_b32 v0, 0x0
+ s_mov_b32 exec_lo, s_save_ttmps_lo
+ s_mov_b32 exec_hi, s_save_ttmps_hi
+
+ // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
+ get_wave_size2(s_save_ttmps_hi)
+ get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
+ get_svgpr_size_bytes(s_save_ttmps_hi)
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
+ s_and_b32 s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
+ s_addc_u32 s_save_ttmps_hi, s_save_ttmps_hi, 0x0
+
+ v_writelane_b32 v0, ttmp4, 0x4
+ v_writelane_b32 v0, ttmp5, 0x5
+ v_writelane_b32 v0, ttmp6, 0x6
+ v_writelane_b32 v0, ttmp7, 0x7
+ v_writelane_b32 v0, ttmp8, 0x8
+ v_writelane_b32 v0, ttmp9, 0x9
+ v_writelane_b32 v0, ttmp10, 0xA
+ v_writelane_b32 v0, ttmp11, 0xB
+ v_writelane_b32 v0, ttmp13, 0xD
+ v_writelane_b32 v0, exec_lo, 0xE
+ v_writelane_b32 v0, exec_hi, 0xF
+ valu_sgpr_hazard()
+
+ s_mov_b32 exec_lo, 0x3FFF
+ s_mov_b32 exec_hi, 0x0
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] offset:0x40 scope:SCOPE_SYS
+ v_readlane_b32 ttmp14, v0, 0xE
+ v_readlane_b32 ttmp15, v0, 0xF
+ s_mov_b32 exec_lo, ttmp14
+ s_mov_b32 exec_hi, ttmp15
+
+ /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+ s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
+
+ s_mov_b32 s_save_m0, m0
+
+ /* global mem offset */
+ s_mov_b32 s_save_mem_offset, 0x0
+ get_wave_size2(s_wave_size)
+
+ /* save first 4 VGPRs, needed for SGPR save */
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_4VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_4VGPR_WAVE32
+L_ENABLE_SAVE_4VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ s_branch L_SAVE_4VGPR_WAVE64
+L_SAVE_4VGPR_WAVE32:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
+ s_branch L_SAVE_HWREG
+
+L_SAVE_4VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
+
+ /* save HW registers */
+
+L_SAVE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
+ v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
+ v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
+
+ // Ensure no further changes to barrier or LDS state.
+ // STATE_PRIV.BARRIER_COMPLETE may change up to this point.
+ s_barrier_signal -2
+ s_barrier_wait -2
+
+ // Re-read final state of BARRIER_COMPLETE field for save.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATE_PRIV)
+ s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
+
+ s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ v_writelane_b32 v2, s_save_m0, 0x0
+ v_writelane_b32 v2, s_save_pc_lo, 0x1
+ v_writelane_b32 v2, s_save_tmp, 0x2
+ v_writelane_b32 v2, s_save_exec_lo, 0x3
+ v_writelane_b32 v2, s_save_exec_hi, 0x4
+ v_writelane_b32 v2, s_save_state_priv, 0x5
+ v_writelane_b32 v2, s_save_xnack_mask, 0x7
+ valu_sgpr_hazard()
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ v_writelane_b32 v2, s_save_tmp, 0x6
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_MODE)
+ v_writelane_b32 v2, s_save_tmp, 0x8
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+ v_writelane_b32 v2, s_save_tmp, 0x9
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+ v_writelane_b32 v2, s_save_tmp, 0xA
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ v_writelane_b32 v2, s_save_tmp, 0xB
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ v_writelane_b32 v2, s_save_tmp, 0xC
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+ v_writelane_b32 v2, s_save_tmp, 0xD
+
+ s_get_barrier_state s_save_tmp, -1
+ s_wait_kmcnt (0)
+ v_writelane_b32 v2, s_save_tmp, 0xE
+ valu_sgpr_hazard()
+
+ // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ s_mov_b32 exec_lo, 0xFFFF
+ s_mov_b32 exec_hi, 0x0
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ // Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+
+ /* save SGPRs */
+ // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 ttmp13, 0x0 //next VGPR lane to copy SGPR into
+
+ s_mov_b32 m0, 0x0 //SGPR initial index value =0
+ s_nop 0x0 //Manually inserted wait states
+L_SAVE_SGPR_LOOP:
+ // SGPR is allocated in 16 SGPR granularity
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
+ s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
+
+ s_cmp_eq_u32 ttmp13, 0x0
+ s_cbranch_scc0 L_WRITE_V2_SECOND_HALF
+ write_16sgpr_to_v2(s0, 0x0)
+ s_branch L_SAVE_SGPR_SKIP_TCP_STORE
+L_WRITE_V2_SECOND_HALF:
+ write_16sgpr_to_v2(s0, 0x10)
+
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
+ s_mov_b32 ttmp13, 0x0
+ v_mov_b32 v2, 0x0
+L_SAVE_SGPR_SKIP_TCP_STORE:
+
+ s_add_u32 m0, m0, 16 //next sgpr index
+ s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete?
+
+ //save the rest 12 SGPR
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ write_12sgpr_to_v2(s0)
+
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ /* save LDS */
+
+L_SAVE_LDS:
+ // Change EXEC to all threads...
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_LDS_NORMAL
+L_ENABLE_SAVE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_LDS_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
+
+ s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SAVE_LDS_DONE
+
+ // first wave do LDS save;
+
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
+ s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ //load 0~63*4(byte address) to vgpr v0
+ v_mbcnt_lo_u32_b32 v0, -1, 0
+ v_mbcnt_hi_u32_b32 v0, -1, v0
+ v_mul_u32_u24 v0, 4, v0
+
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_SAVE_LDS_W64
+
+L_SAVE_LDS_W32:
+ s_mov_b32 s3, 128
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W32:
+ ds_read_b32 v1, v0
+ s_wait_idle
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 128 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete?
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_W64:
+ s_mov_b32 s3, 256
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W64:
+ ds_read_b32 v1, v0
+ s_wait_idle
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete?
+
+L_SAVE_LDS_DONE:
+ /* save VGPRs - set the Rest VGPRs */
+L_SAVE_VGPR:
+ // VGPR SR memory offset: 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI
+ s_mov_b32 s_save_mem_offset, (0+128*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_VGPR_NORMAL
+L_ENABLE_SAVE_VGPR_EXEC_HI:
+ s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_VGPR_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_SAVE_VGPR_WAVE64
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+L_SAVE_VGPR_W32_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128*4 //every buffer_store_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP //VGPR save is complete?
+
+ s_branch L_SAVE_VGPR_END
+
+L_SAVE_VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_SHARED_VGPR
+
+L_SAVE_VGPR_W64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete?
+
+L_SAVE_SHARED_VGPR:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_SAVE_VGPR_END //no shared_vgpr used? jump to L_SAVE_LDS
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //save shared_vgpr will start from the index of m0
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+
+L_SAVE_SHARED_VGPR_WAVE64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete?
+
+L_SAVE_VGPR_END:
+ s_branch L_END_PGM
+
+L_RESTORE:
+ /* Setup Resource Contants */
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
+ s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+
+ // Save s_restore_spi_init_hi for later use.
+ s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
+
+ //determine it is wave32 or wave64
+ get_wave_size2(s_restore_size)
+
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_RESTORE_VGPR
+
+ /* restore LDS */
+L_RESTORE_LDS:
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_LDS_NORMAL
+L_ENABLE_RESTORE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_LDS_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
+ s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
+
+L_RESTORE_LDS_LOOP_W32:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_wait_idle
+ ds_store_addtid_b32 v0
+ s_add_u32 m0, m0, 128 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete?
+ s_branch L_RESTORE_VGPR
+
+L_RESTORE_LDS_LOOP_W64:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_wait_idle
+ ds_store_addtid_b32 v0
+ s_add_u32 m0, m0, 256 // 256 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete?
+
+ /* restore VGPRs */
+L_RESTORE_VGPR:
+ // VGPR SR memory offset : 0
+ s_mov_b32 s_restore_mem_offset, 0x0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_VGPR_NORMAL
+L_ENABLE_RESTORE_VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_VGPR_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE32_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*3
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 //every buffer_load_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete?
+
+ /* VGPR restore on v0 */
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*3
+ s_wait_idle
+
+ s_branch L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE64:
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SHARED_VGPR
+
+L_RESTORE_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*3
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+L_RESTORE_SHARED_VGPR:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used?
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //restore shared_vgpr will start from the index of m0
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+ s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!!
+
+ /* VGPR restore on v0 */
+L_RESTORE_V0:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*3
+ s_wait_idle
+
+ /* restore SGPRs */
+ //will be 2+8+16*6
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+L_RESTORE_SGPR:
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 20*4 //s108~s127 is not saved
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 m0, s_sgpr_save_num
+
+ read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 4 // Restore from S[0] to S[104]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+
+ read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 8 // Restore from S[0] to S[96]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+
+ L_RESTORE_SGPR_LOOP:
+ read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+ s_movreld_b64 s8, s8
+ s_movreld_b64 s10, s10
+ s_movreld_b64 s12, s12
+ s_movreld_b64 s14, s14
+
+ s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0
+ s_cbranch_scc0 L_RESTORE_SGPR_LOOP
+
+ // s_barrier with STATE_PRIV.TRAP_AFTER_INST=1, STATUS.PRIV=1 incorrectly asserts debug exception.
+ // Clear DEBUG_EN before and restore MODE after the barrier.
+ s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE), 0
+
+ /* restore HW registers */
+L_RESTORE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // Restore s_restore_spi_init_hi before the saved value gets clobbered.
+ s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_state_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_excp_flag_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_LO), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_HI), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+ s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
+
+ // Only the first wave needs to restore the workgroup barrier.
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+
+ // Skip over WAVE_STATUS, since there is no state to restore from it
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+
+ // extract the saved signal count from s_restore_tmp
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
+
+ // We need to call s_barrier_signal repeatedly to restore the signal
+ // count of the work group barrier. The member count is already
+ // initialized with the number of waves in the work group.
+L_BARRIER_RESTORE_LOOP:
+ s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+ s_barrier_signal -1
+ s_add_i32 s_restore_tmp, s_restore_tmp, -1
+ s_branch L_BARRIER_RESTORE_LOOP
+
+L_SKIP_BARRIER_RESTORE:
+
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+
+ // EXCP_FLAG_PRIV.SAVE_CONTEXT and HOST_TRAP may have changed.
+ // Only restore the other fields to avoid clobbering them.
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 0, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE), s_restore_excp_flag_priv
+ s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE), s_restore_excp_flag_priv
+ s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE), s_restore_excp_flag_priv
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_MODE), s_restore_mode
+
+ // Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
+ get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
+ get_svgpr_size_bytes(s_restore_ttmps_hi)
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
+ s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
+ s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
+ s_load_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 scope:SCOPE_SYS
+ s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 scope:SCOPE_SYS
+ s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 scope:SCOPE_SYS
+ s_wait_idle
+
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu
+
+ // Make barrier and LDS state visible to all waves in the group.
+ // STATE_PRIV.BARRIER_COMPLETE may change after this point.
+ s_barrier_signal -2
+ s_barrier_wait -2
+
+ s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+
+L_END_PGM:
+ // Make sure that no wave of the workgroup can exit the trap handler
+ // before the workgroup barrier state is saved.
+ s_barrier_signal -2
+ s_barrier_wait -2
+ s_endpgm_saved
+end
+
+function write_16sgpr_to_v2(s, lane_offset)
+ // Copy into VGPR for later TCP store.
+ for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx + lane_offset
+ end
+ valu_sgpr_hazard()
+ s_add_u32 ttmp13, ttmp13, 0x10
+end
+
+function write_12sgpr_to_v2(s)
+ // Copy into VGPR for later TCP store.
+ for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx
+ end
+ valu_sgpr_hazard()
+end
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dword s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+end
+
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*16
+ s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*8
+ s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*4
+ s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
+ s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
+ s_bitcmp1_b32 s_size, S_WAVE_SIZE
+ s_cbranch_scc1 L_ENABLE_SHIFT_W64
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+7) //Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4 (non-zero value)
+ s_branch L_SHIFT_DONE
+L_ENABLE_SHIFT_W64:
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value)
+L_SHIFT_DONE:
+end
+
+function get_svgpr_size_bytes(s_svgpr_size_byte)
+ s_getreg_b32 s_svgpr_size_byte, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_lshl_b32 s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
+end
+
+function get_sgpr_size_bytes
+ return 512
+end
+
+function get_hwreg_size_bytes
+ return 128
+end
+
+function get_wave_size2(s_reg)
+ s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
+ s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
+end
+
+function valu_sgpr_hazard
+#if HAVE_VALU_SGPR_HAZARD
+ for var rep = 0; rep < 8; rep ++
+ ds_nop
+ end
+#endif
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 0eabb7a8cab9..6869e07a2fff 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -447,7 +447,9 @@ L_SAVE:
s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
+ // Clear VSKIP state now that MODE.VSKIP has been saved.
+ // If user shader set it then vector instructions would be skipped.
+ s_setvskip 0,0
/* the first wave in the threadgroup */
s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK // extract fisrt wave bit
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 065d87841459..22925df6a791 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -22,7 +22,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/file.h>
@@ -155,8 +154,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
/* filep now owns the reference returned by kfd_create_process */
filep->private_data = process;
- dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
- process->pasid, process->is_32bit_user_mode);
+ dev_dbg(kfd_device, "process pid %d opened kfd node, compat mode (32 bit) - %d\n",
+ process->lead_thread->pid, process->is_32bit_user_mode);
return 0;
}
@@ -212,6 +211,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EINVAL;
}
+ if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
+ args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
+ pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
+ }
+
if (!access_ok((const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access read pointer\n");
@@ -361,8 +365,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_acquire_queue_buf;
}
- pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
- p->pasid,
+ pr_debug("Creating queue for process pid %d on gpu 0x%x\n",
+ p->lead_thread->pid,
dev->id);
err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id,
@@ -415,9 +419,9 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid 0x%x\n",
+ pr_debug("Destroying queue id %d for process pid %d\n",
args->queue_id,
- p->pasid);
+ p->lead_thread->pid);
mutex_lock(&p->mutex);
@@ -461,6 +465,11 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
return -EINVAL;
}
+ if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
+ args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
+ pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
+ }
+
properties.queue_address = args->ring_base_address;
properties.queue_size = args->ring_size;
properties.queue_percent = args->queue_percentage & 0xFF;
@@ -468,8 +477,8 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid 0x%x\n",
- args->queue_id, p->pasid);
+ pr_debug("Updating queue id %d for process pid %d\n",
+ args->queue_id, p->lead_thread->pid);
mutex_lock(&p->mutex);
@@ -512,15 +521,10 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
- minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
- if (!minfo.cu_mask.ptr)
- return -ENOMEM;
-
- retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
- if (retval) {
+ minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size);
+ if (IS_ERR(minfo.cu_mask.ptr)) {
pr_debug("Could not copy CU mask from userspace");
- retval = -EFAULT;
- goto out;
+ return PTR_ERR(minfo.cu_mask.ptr);
}
mutex_lock(&p->mutex);
@@ -529,7 +533,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
mutex_unlock(&p->mutex);
-out:
kfree(minfo.cu_mask.ptr);
return retval;
}
@@ -596,7 +599,8 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
default_policy,
alternate_policy,
(void __user *)args->alternate_aperture_base,
- args->alternate_aperture_size))
+ args->alternate_aperture_size,
+ args->misc_process_flag))
err = -EINVAL;
out:
@@ -695,7 +699,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
int i;
- dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
+ dev_dbg(kfd_device, "get apertures for process pid %d", p->lead_thread->pid);
args->num_of_nodes = 0;
@@ -747,7 +751,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
int ret;
int i;
- dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
+ dev_dbg(kfd_device, "get apertures for process pid %d",
+ p->lead_thread->pid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -1059,7 +1064,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
mutex_lock(&p->svms.lock);
mmap_write_unlock(current->mm);
- if (interval_tree_iter_first(&p->svms.objects,
+
+ /* Skip a special case that allocates VRAM without VA,
+ * VA will be invalid of 0.
+ */
+ if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) &&
+ interval_tree_iter_first(&p->svms.objects,
args->va_addr >> PAGE_SHIFT,
(args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
pr_err("Address: 0x%llx already allocated by SVM\n",
@@ -2027,9 +2037,7 @@ static int criu_get_process_object_info(struct kfd_process *p,
num_events = kfd_get_num_events(p);
- ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
- if (ret)
- return ret;
+ svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
*num_objects = num_queues + num_events + num_svm_ranges;
@@ -2557,8 +2565,8 @@ static int criu_restore(struct file *filep,
pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
- if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
- !args->num_devices || !args->num_bos)
+ if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data ||
+ !args->priv_data_size || !args->num_devices)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -2818,7 +2826,7 @@ retry:
static int runtime_disable(struct kfd_process *p)
{
- int i = 0, ret;
+ int i = 0, ret = 0;
bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
@@ -2855,6 +2863,7 @@ static int runtime_disable(struct kfd_process *p)
/* disable ttmp setup */
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ int last_err = 0;
if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
pdd->spi_dbg_override =
@@ -2864,14 +2873,17 @@ static int runtime_disable(struct kfd_process *p)
pdd->dev->vm_info.last_vmid_kfd);
if (!pdd->dev->kfd->shared_resources.enable_mes)
- debug_refresh_runlist(pdd->dev->dqm);
+ last_err = debug_refresh_runlist(pdd->dev->dqm);
else
- kfd_dbg_set_mes_debug_mode(pdd,
+ last_err = kfd_dbg_set_mes_debug_mode(pdd,
!kfd_dbg_has_cwsr_workaround(pdd->dev));
+
+ if (last_err)
+ ret = last_err;
}
}
- return 0;
+ return ret;
}
static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
@@ -3243,8 +3255,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int retcode = -EINVAL;
bool ptrace_attached = false;
- if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT) {
+ retcode = -ENOTTY;
goto err_i1;
+ }
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
u32 amdkfd_size;
@@ -3257,8 +3271,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
asize = amdkfd_size;
cmd = ioctl->cmd;
- } else
+ } else {
+ retcode = -ENOTTY;
goto err_i1;
+ }
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
@@ -3365,12 +3381,12 @@ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("pasid 0x%x mapping mmio page\n"
+ pr_debug("process pid %d mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
- process->pasid, (unsigned long long) vma->vm_start,
+ process->lead_thread->pid, (unsigned long long) vma->vm_start,
address, vma->vm_flags, PAGE_SIZE);
return io_remap_pfn_range(vma,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 693469c18c60..4a7180b46b71 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1704,6 +1704,7 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
/* Cacheline size not available in IP discovery for gc11.
* kfd_fill_gpu_cache_info_from_gfx_config to hard code it
*/
@@ -2132,9 +2133,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3);
int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT :
KFD_CRAT_INTRA_SOCKET_WEIGHT;
- uint32_t bandwidth = ext_cpu ? amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
- kdev->adev, NULL, true) : mem_bw;
-
/*
* with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional
@@ -2143,8 +2141,16 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->weight_xgmi = weight;
- sub_type_hdr->minimum_bandwidth_mbs = bandwidth;
- sub_type_hdr->maximum_bandwidth_mbs = bandwidth;
+ if (ext_cpu) {
+ amdgpu_xgmi_get_bandwidth(kdev->adev, NULL,
+ AMDGPU_XGMI_BW_MODE_PER_LINK,
+ AMDGPU_XGMI_BW_UNIT_MBYTES,
+ &sub_type_hdr->minimum_bandwidth_mbs,
+ &sub_type_hdr->maximum_bandwidth_mbs);
+ } else {
+ sub_type_hdr->minimum_bandwidth_mbs = mem_bw;
+ sub_type_hdr->maximum_bandwidth_mbs = mem_bw;
+ }
} else {
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
sub_type_hdr->minimum_bandwidth_mbs =
@@ -2197,12 +2203,12 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
if (use_ta_info) {
sub_type_hdr->weight_xgmi = KFD_CRAT_XGMI_WEIGHT *
- amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
- sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev,
- peer_kdev->adev, false);
- sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
+ amdgpu_xgmi_get_hops_count(kdev->adev, peer_kdev->adev);
+ amdgpu_xgmi_get_bandwidth(kdev->adev, peer_kdev->adev,
+ AMDGPU_XGMI_BW_MODE_PER_PEER,
+ AMDGPU_XGMI_BW_UNIT_MBYTES,
+ &sub_type_hdr->minimum_bandwidth_mbs,
+ &sub_type_hdr->maximum_bandwidth_mbs);
} else {
bool is_single_hop = kdev->kfd == peer_kdev->kfd;
int weight = is_single_hop ? KFD_CRAT_INTRA_SOCKET_WEIGHT :
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 312dfa84f29f..ba99e0f258ae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -204,11 +204,12 @@ bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
size_t exception_data_size)
{
struct kfd_process *p;
+ struct kfd_process_device *pdd = NULL;
bool signaled_to_debugger_or_runtime = false;
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, &pdd);
- if (!p)
+ if (!pdd)
return false;
if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true,
@@ -238,9 +239,8 @@ bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
mutex_unlock(&p->mutex);
} else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
- kfd_dqm_evict_pasid(dev->dqm, p->pasid);
- kfd_signal_vm_fault_event(dev, p->pasid, NULL,
- exception_data);
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, exception_data);
signaled_to_debugger_or_runtime = true;
}
@@ -276,8 +276,8 @@ int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
data = (struct kfd_hsa_memory_exception_data *)
pdd->vm_fault_exc_data;
- kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid);
- kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data);
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, data);
error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION);
}
@@ -350,10 +350,27 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
{
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags;
+ struct amdgpu_device *adev = pdd->dev->adev;
+ int r;
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0;
+ if (!pdd->proc_ctx_cpu_ptr) {
+ r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate process context bo\n");
+ return r;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
pdd->watch_points, flags, sq_trap_en);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4a5a0a4e00f2..9bde2c64540f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -27,6 +27,16 @@
#include "kfd_priv.h"
static struct dentry *debugfs_root;
+static struct dentry *debugfs_proc;
+static struct list_head procs;
+
+struct debugfs_proc_entry {
+ struct list_head list;
+ struct dentry *proc_dentry;
+ pid_t pid;
+};
+
+#define MAX_DEBUGFS_FILENAME_LEN 32
static int kfd_debugfs_open(struct inode *inode, struct file *file)
{
@@ -92,6 +102,8 @@ static const struct file_operations kfd_debugfs_hang_hws_fops = {
void kfd_debugfs_init(void)
{
debugfs_root = debugfs_create_dir("kfd", NULL);
+ debugfs_proc = debugfs_create_dir("proc", debugfs_root);
+ INIT_LIST_HEAD(&procs);
debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
kfd_debugfs_mqds_by_process, &kfd_debugfs_fops);
@@ -107,5 +119,69 @@ void kfd_debugfs_init(void)
void kfd_debugfs_fini(void)
{
+ debugfs_remove_recursive(debugfs_proc);
debugfs_remove_recursive(debugfs_root);
}
+
+static ssize_t kfd_debugfs_pasid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kfd_process_device *pdd = file_inode(file)->i_private;
+ char tmp[32];
+ int len;
+
+ len = snprintf(tmp, sizeof(tmp), "%u\n", pdd->pasid);
+
+ return simple_read_from_buffer(buf, count, ppos, tmp, len);
+}
+
+static const struct file_operations kfd_debugfs_pasid_fops = {
+ .owner = THIS_MODULE,
+ .read = kfd_debugfs_pasid_read,
+};
+
+void kfd_debugfs_add_process(struct kfd_process *p)
+{
+ int i;
+ char name[MAX_DEBUGFS_FILENAME_LEN];
+ struct debugfs_proc_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ list_add(&entry->list, &procs);
+ entry->pid = p->lead_thread->pid;
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "%d",
+ (int)entry->pid);
+ entry->proc_dentry = debugfs_create_dir(name, debugfs_proc);
+
+ /* Create debugfs files for each GPU:
+ * - proc/<pid>/pasid_<gpuid>
+ */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "pasid_%u",
+ pdd->dev->id);
+ debugfs_create_file((const char *)name, S_IFREG | 0444,
+ entry->proc_dentry, pdd,
+ &kfd_debugfs_pasid_fops);
+ }
+}
+
+void kfd_debugfs_remove_process(struct kfd_process *p)
+{
+ struct debugfs_proc_entry *entry, *next;
+
+ mutex_lock(&kfd_processes_mutex);
+ list_for_each_entry_safe(entry, next, &procs, list) {
+ if (entry->pid != p->lead_thread->pid)
+ continue;
+
+ debugfs_remove_recursive(entry->proc_dentry);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a29374c86405..e9cfb80bd436 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -101,6 +101,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 1, 1):
case IP_VERSION(6, 1, 2):
+ case IP_VERSION(6, 1, 3):
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
kfd->device_info.num_sdma_queues_per_engine = 8;
@@ -122,6 +123,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 1, 1):
case IP_VERSION(6, 1, 2):
+ case IP_VERSION(6, 1, 3):
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
/* Reserve 1 for paging and 1 for gfx */
@@ -180,6 +182,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
break;
case IP_VERSION(12, 0, 0):
@@ -349,11 +352,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &aldebaran_kfd2kgd;
break;
case IP_VERSION(9, 4, 3):
- gfx_target_version = adev->rev_id >= 1 ? 90402
- : adev->flags & AMD_IS_APU ? 90400
- : 90401;
- f2g = &gc_9_4_3_kfd2kgd;
- break;
case IP_VERSION(9, 4, 4):
gfx_target_version = 90402;
f2g = &gc_9_4_3_kfd2kgd;
@@ -454,6 +452,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 110502;
f2g = &gfx_v11_kfd2kgd;
break;
+ case IP_VERSION(11, 5, 3):
+ gfx_target_version = 110503;
+ f2g = &gfx_v11_kfd2kgd;
+ break;
case IP_VERSION(12, 0, 0):
gfx_target_version = 120000;
f2g = &gfx_v12_kfd2kgd;
@@ -493,6 +495,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
mutex_init(&kfd->doorbell_mutex);
ida_init(&kfd->doorbell_ida);
+ atomic_set(&kfd->kfd_processes_count, 0);
return kfd;
}
@@ -583,9 +586,13 @@ static int kfd_gws_init(struct kfd_node *node)
&& kfd->mec2_fw_version >= 0x6b) ||
(KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
- && mes_rev >= 68))))
+ && mes_rev >= 68) ||
+ (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))))) {
+ if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
+ node->adev->gds.gws_size = 64;
ret = amdgpu_amdkfd_alloc_gws(node->adev,
node->adev->gds.gws_size, &node->gws);
+ }
return ret;
}
@@ -965,7 +972,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd,
kfd_smi_event_update_gpu_reset(node, false, reset_context);
}
- kgd2kfd_suspend(kfd, false);
+ kgd2kfd_suspend(kfd, true);
for (i = 0; i < kfd->num_nodes; i++)
kfd_signal_reset_event(kfd->nodes[i]);
@@ -1007,13 +1014,33 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
return 0;
}
-bool kfd_is_locked(void)
+bool kfd_is_locked(struct kfd_dev *kfd)
{
+ uint8_t id = 0;
+ struct kfd_node *dev;
+
lockdep_assert_held(&kfd_processes_mutex);
- return (kfd_locked > 0);
+
+ /* check reset/suspend lock */
+ if (kfd_locked > 0)
+ return true;
+
+ if (kfd)
+ return kfd->kfd_dev_lock > 0;
+
+ /* check lock on all cgroup accessible devices */
+ while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) {
+ if (!dev || kfd_devcgroup_check_permission(dev))
+ continue;
+
+ if (dev->kfd->kfd_dev_lock > 0)
+ return true;
+ }
+
+ return false;
}
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
struct kfd_node *node;
int i;
@@ -1021,14 +1048,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
if (!kfd->init_complete)
return;
- /* for runtime suspend, skip locking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- /* For first KFD device suspend all the KFD processes */
- if (++kfd_locked == 1)
- kfd_suspend_all_processes();
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (suspend_proc)
+ kgd2kfd_suspend_process(kfd);
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
@@ -1036,7 +1057,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
}
}
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
{
int ret, i;
@@ -1049,14 +1070,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret;
}
- /* for runtime resume, skip unlocking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- if (--kfd_locked == 0)
- ret = kfd_resume_all_processes();
- WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (resume_proc)
+ ret = kgd2kfd_resume_process(kfd);
+
+ return ret;
+}
+
+void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return;
+
+ mutex_lock(&kfd_processes_mutex);
+ /* For first KFD device suspend all the KFD processes */
+ if (++kfd_locked == 1)
+ kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
+}
+
+int kgd2kfd_resume_process(struct kfd_dev *kfd)
+{
+ int ret = 0;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ mutex_lock(&kfd_processes_mutex);
+ if (--kfd_locked == 0)
+ ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
return ret;
}
@@ -1091,7 +1134,15 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
}
for (i = 0; i < kfd->num_nodes; i++) {
- node = kfd->nodes[i];
+ /* Race if another thread in b/w
+ * kfd_cleanup_nodes and kfree(kfd),
+ * when kfd->nodes[i] = NULL
+ */
+ if (kfd->nodes[i])
+ node = kfd->nodes[i];
+ else
+ return;
+
spin_lock_irqsave(&node->interrupt_lock, flags);
if (node->interrupts_active
@@ -1436,24 +1487,62 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
kfd_get_num_sdma_engines(node);
}
-int kgd2kfd_check_and_lock_kfd(void)
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
+ struct kfd_process *p;
+ int r = 0, temp, idx;
+
mutex_lock(&kfd_processes_mutex);
- if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
- return -EBUSY;
+
+ /* kfd_processes_count is per kfd_dev, return -EBUSY without
+ * further check
+ */
+ if (!!atomic_read(&kfd->kfd_processes_count)) {
+ pr_debug("process_wq_release not finished\n");
+ r = -EBUSY;
+ goto out;
+ }
+
+ if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
+ goto out;
+
+ /* fail under system reset/resume or kfd device is partition switching. */
+ if (kfd_is_locked(kfd)) {
+ r = -EBUSY;
+ goto out;
}
- ++kfd_locked;
+ /*
+ * ensure all running processes are cgroup excluded from device before mode switch.
+ * i.e. no pdd was created on the process socket.
+ */
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->dev->kfd != kfd)
+ continue;
+
+ r = -EBUSY;
+ goto proc_check_unlock;
+ }
+ }
+
+proc_check_unlock:
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+out:
+ if (!r)
+ ++kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
- return 0;
+ return r;
}
-void kgd2kfd_unlock_kfd(void)
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
mutex_lock(&kfd_processes_mutex);
- --kfd_locked;
+ --kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
}
@@ -1479,6 +1568,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.unhalt(node->dqm);
+ if (r) {
+ dev_err(kfd_device, "Error in starting scheduler\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@@ -1496,6 +1604,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.halt(node->dqm);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@@ -1558,7 +1683,7 @@ bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entr
u32 cam_index;
if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
- p = kfd_lookup_process_by_pasid(entry->pasid);
+ p = kfd_lookup_process_by_pasid(entry->pasid, NULL);
if (!p)
return true;
@@ -1593,6 +1718,11 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev)
return -EINVAL;
}
+ if (dev->kfd->shared_resources.enable_mes) {
+ dev_err(dev->adev->dev, "Inducing MES hang is not supported\n");
+ return -EINVAL;
+ }
+
return dqm_debugfs_hang_hws(dev->dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 1405e8affd48..d7a2e7178ea9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -36,12 +36,15 @@
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_reset.h"
+#include "amdgpu_sdma.h"
#include "mes_v11_api_def.h"
#include "kfd_debug.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
+/* See unmap_queues_cpsch() */
+#define USE_DEFAULT_GRACE_PERIOD 0xffffffff
static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
u32 pasid, unsigned int vmid);
@@ -66,7 +69,8 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q, const uint32_t *restore_sdma_id);
-static void kfd_process_hw_exception(struct work_struct *work);
+
+static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma);
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
@@ -170,7 +174,7 @@ static void kfd_hws_hang(struct device_queue_manager *dqm)
/*
* Issue a GPU reset if HWS is unresponsive
*/
- schedule_work(&dqm->hw_exception_work);
+ amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
}
static int convert_to_mes_queue_type(int queue_type)
@@ -207,23 +211,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
- if (!pdd->proc_ctx_cpu_ptr) {
- r = amdgpu_amdkfd_alloc_gtt_mem(adev,
- AMDGPU_MES_PROC_CTX_SIZE,
- &pdd->proc_ctx_bo,
- &pdd->proc_ctx_gpu_addr,
- &pdd->proc_ctx_cpu_ptr,
- false);
- if (r) {
- dev_err(adev->dev,
- "failed to allocate process context bo\n");
- return r;
- }
- memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
- }
-
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
- queue_input.process_id = qpd->pqm->process->pasid;
+ queue_input.process_id = pdd->pasid;
queue_input.page_table_base_addr = qpd->page_table_base;
queue_input.process_va_start = 0;
queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
@@ -542,6 +531,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct device *dev = dqm->dev->adev->dev;
int allocated_vmid = -1, i;
@@ -560,9 +550,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
pr_debug("vmid allocated: %d\n", allocated_vmid);
- dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
+ dqm->vmid_pasid[allocated_vmid] = pdd->pasid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
+ set_pasid_vmid_mapping(dqm, pdd->pasid, allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
@@ -814,6 +804,11 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process
return -EOPNOTSUPP;
}
+ /* taking the VMID for that process on the safe way using PDD */
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd)
+ return -EFAULT;
+
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING
* to check which VMID the current process is mapped to.
@@ -823,23 +818,19 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process
status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
(dev->adev, vmid, &queried_pasid);
- if (status && queried_pasid == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
- vmid, p->pasid);
+ if (status && queried_pasid == pdd->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and process pid %d\n",
+ vmid, p->lead_thread->pid);
break;
}
}
if (vmid > last_vmid_to_scan) {
- dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n", p->pasid);
+ dev_err(dev->adev->dev, "Didn't find vmid for process pid %d\n",
+ p->lead_thread->pid);
return -EFAULT;
}
- /* taking the VMID for that process on the safe way using PDD */
- pdd = kfd_get_process_device_data(dev, p);
- if (!pdd)
- return -EFAULT;
-
reg_gfx_index.bits.sh_broadcast_writes = 1;
reg_gfx_index.bits.se_broadcast_writes = 1;
reg_gfx_index.bits.instance_broadcast_writes = 1;
@@ -1075,8 +1066,8 @@ static int suspend_single_queue(struct device_queue_manager *dqm,
if (q->properties.is_suspended)
return 0;
- pr_debug("Suspending PASID %u queue [%i]\n",
- pdd->process->pasid,
+ pr_debug("Suspending process pid %d queue [%i]\n",
+ pdd->process->lead_thread->pid,
q->properties.queue_id);
is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
@@ -1123,8 +1114,8 @@ static int resume_single_queue(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
- pr_debug("Restoring from suspend PASID %u queue [%i]\n",
- pdd->process->pasid,
+ pr_debug("Restoring from suspend process pid %d queue [%i]\n",
+ pdd->process->lead_thread->pid,
q->properties.queue_id);
q->properties.is_suspended = false;
@@ -1157,8 +1148,8 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Evicting process pid %d queues\n",
+ pdd->process->lead_thread->pid);
pdd->last_evict_timestamp = get_jiffies_64();
/* Mark all queues as evicted. Deactivate all active queues on
@@ -1215,8 +1206,17 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
if (!pdd->drm_priv)
goto out;
- pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Evicting process pid %d queues\n",
+ pdd->process->lead_thread->pid);
+
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ pdd->last_evict_timestamp = get_jiffies_64();
+ retval = suspend_all_queues_mes(dqm);
+ if (retval) {
+ dev_err(dev, "Suspending all queues failed");
+ goto out;
+ }
+ }
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
@@ -1238,13 +1238,19 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
}
}
}
- pdd->last_evict_timestamp = get_jiffies_64();
- if (!dqm->dev->kfd->shared_resources.enable_mes)
+
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
+ pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
+ } else {
+ retval = resume_all_queues_mes(dqm);
+ if (retval)
+ dev_err(dev, "Resuming all queues failed");
+ }
out:
dqm_unlock(dqm);
@@ -1274,8 +1280,8 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Restoring process pid %d queues\n",
+ pdd->process->lead_thread->pid);
/* Update PD Base in QPD */
qpd->page_table_base = pd_base;
@@ -1358,8 +1364,8 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
if (!pdd->drm_priv)
goto vm_not_acquired;
- pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Restoring process pid %d queues\n",
+ pdd->process->lead_thread->pid);
/* Update PD Base in QPD */
qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
@@ -1583,8 +1589,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->sdma_bitmap, get_num_sdma_queues(dqm))) {
+ dev_warn(dev, "No more SDMA queue to allocate (%d total queues)\n",
+ get_num_sdma_queues(dqm));
return -ENOMEM;
}
@@ -1609,8 +1616,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more XGMI SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->xgmi_sdma_bitmap, get_num_xgmi_sdma_queues(dqm))) {
+ dev_warn(dev, "No more XGMI SDMA queue to allocate (%d total queues)\n",
+ get_num_xgmi_sdma_queues(dqm));
return -ENOMEM;
}
if (restore_sdma_id) {
@@ -1669,8 +1677,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
}
if (!free_bit_found) {
- dev_err(dev, "No more SDMA queue to allocate for target ID %i\n",
- q->properties.sdma_engine_id);
+ dev_warn(dev, "No more SDMA queue to allocate for target ID %i (%d total queues)\n",
+ q->properties.sdma_engine_id, num_queues);
return -ENOMEM;
}
}
@@ -1753,15 +1761,11 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
- INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
dqm->trap_debug_vmid = 0;
init_sdma_bitmaps(dqm);
- if (dqm->dev->kfd2kgd->get_iq_wait_times)
- dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
- &dqm->wait_times,
- ffs(dqm->dev->xcc_mask) - 1);
+ update_dqm_wait_times(dqm);
return 0;
}
@@ -1857,25 +1861,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
/* clear hang status when driver try to start the hw scheduler */
dqm->sched_running = true;
- if (!dqm->dev->kfd->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
+ if (pm_config_dequeue_wait_counts(&dqm->packet_mgr,
+ KFD_DEQUEUE_WAIT_INIT, 0 /* unused */))
+ dev_err(dev, "Setting optimized dequeue wait failed. Using default values\n");
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
-
- /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
- if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
- (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) {
- uint32_t reg_offset = 0;
- uint32_t grace_period = 1;
-
- retval = pm_update_grace_period(&dqm->packet_mgr,
- grace_period);
- if (retval)
- dev_err(dev, "Setting grace timeout failed\n");
- else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
- /* Update dqm->wait_times maintained in software */
- dqm->dev->kfd2kgd->build_grace_period_packet_info(
- dqm->dev->adev, dqm->wait_times,
- grace_period, &reg_offset,
- &dqm->wait_times);
}
/* setup per-queue reset detection buffer */
@@ -1907,6 +1897,8 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ int ret = 0;
+
dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
@@ -1914,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
+ ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD, false);
else
- remove_all_kfd_queues_mes(dqm);
+ ret = remove_all_kfd_queues_mes(dqm);
dqm->sched_running = false;
@@ -1930,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
dqm->detect_hang_info = NULL;
dqm_unlock(dqm);
- return 0;
+ return ret;
}
static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
@@ -2101,7 +2094,8 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
while (*fence_addr != fence_value) {
/* Fatal err detected, this response won't come */
- if (amdgpu_amdkfd_is_fed(dqm->dev->adev))
+ if (amdgpu_amdkfd_is_fed(dqm->dev->adev) ||
+ amdgpu_in_reset(dqm->dev->adev))
return -EIO;
if (time_after(jiffies, end_jiffies)) {
@@ -2150,8 +2144,8 @@ static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q
{
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
- dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid 0x%0x is reset\n",
- q->properties.queue_id, q->process->pasid);
+ dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid %d is reset\n",
+ q->properties.queue_id, pdd->process->lead_thread->pid);
pdd->has_reset_queue = true;
if (q->properties.is_active) {
@@ -2220,8 +2214,7 @@ static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uin
return NULL;
}
-/* only for compute queue */
-static int reset_queues_on_hws_hang(struct device_queue_manager *dqm)
+static int reset_hung_queues(struct device_queue_manager *dqm)
{
int r = 0, reset_count = 0, i;
@@ -2274,7 +2267,121 @@ reset_fail:
return r;
}
-/* dqm->lock mutex has to be locked before calling this function */
+static bool sdma_has_hang(struct device_queue_manager *dqm)
+{
+ int engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+ int engine_end = engine_start + get_num_all_sdma_engines(dqm);
+ int num_queues_per_eng = dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
+ int i, j;
+
+ for (i = engine_start; i < engine_end; i++) {
+ for (j = 0; j < num_queues_per_eng; j++) {
+ if (!dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j))
+ continue;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool set_sdma_queue_as_reset(struct device_queue_manager *dqm,
+ uint32_t doorbell_off)
+{
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if ((q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) &&
+ q->properties.doorbell_off == doorbell_off) {
+ set_queue_as_reset(dqm, q, qpd);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int reset_hung_queues_sdma(struct device_queue_manager *dqm)
+{
+ int engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+ int engine_end = engine_start + get_num_all_sdma_engines(dqm);
+ int num_queues_per_eng = dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
+ int r = 0, i, j;
+
+ if (dqm->is_hws_hang)
+ return -EIO;
+
+ /* Scan for hung HW queues and reset engine. */
+ dqm->detect_hang_count = 0;
+ for (i = engine_start; i < engine_end; i++) {
+ for (j = 0; j < num_queues_per_eng; j++) {
+ uint32_t doorbell_off =
+ dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j);
+
+ if (!doorbell_off)
+ continue;
+
+ /* Reset engine and check. */
+ if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) ||
+ dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) ||
+ !set_sdma_queue_as_reset(dqm, doorbell_off)) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ /* Should only expect one queue active per engine */
+ dqm->detect_hang_count++;
+ break;
+ }
+ }
+
+ /* Signal process reset */
+ if (dqm->detect_hang_count)
+ kfd_signal_reset_event(dqm->dev);
+ else
+ r = -ENOTRECOVERABLE;
+
+reset_fail:
+ dqm->detect_hang_count = 0;
+
+ return r;
+}
+
+static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma)
+{
+ struct amdgpu_device *adev = dqm->dev->adev;
+
+ while (halt_if_hws_hang)
+ schedule();
+
+ if (adev->debug_disable_gpu_ring_reset) {
+ dev_info_once(adev->dev,
+ "%s queue hung, but ring reset disabled",
+ is_sdma ? "sdma" : "compute");
+
+ return -EPERM;
+ }
+ if (!amdgpu_gpu_recovery)
+ return -ENOTRECOVERABLE;
+
+ return is_sdma ? reset_hung_queues_sdma(dqm) : reset_hung_queues(dqm);
+}
+
+/* dqm->lock mutex has to be locked before calling this function
+ *
+ * @grace_period: If USE_DEFAULT_GRACE_PERIOD then default wait time
+ * for context switch latency. Lower values are used by debugger
+ * since context switching are triggered at high frequency.
+ * This is configured by setting CP_IQ_WAIT_TIME2.SCH_WAVE
+ *
+ */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param,
@@ -2293,7 +2400,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
return -EIO;
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
- retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
+ retval = pm_config_dequeue_wait_counts(&dqm->packet_mgr,
+ KFD_DEQUEUE_WAIT_SET_SCH_WAVE, grace_period);
if (retval)
goto out;
}
@@ -2324,30 +2432,32 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
* check those fields
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
- if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
- if (reset_queues_on_hws_hang(dqm)) {
- while (halt_if_hws_hang)
- schedule();
- dqm->is_hws_hang = true;
- kfd_hws_hang(dqm);
- retval = -ETIME;
- goto out;
- }
- }
+ if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd) &&
+ reset_queues_on_hws_hang(dqm, false))
+ goto reset_fail;
+
+ /* Check for SDMA hang and attempt SDMA reset */
+ if (sdma_has_hang(dqm) && reset_queues_on_hws_hang(dqm, true))
+ goto reset_fail;
/* We need to reset the grace period value for this device */
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
- if (pm_update_grace_period(&dqm->packet_mgr,
- USE_DEFAULT_GRACE_PERIOD))
+ if (pm_config_dequeue_wait_counts(&dqm->packet_mgr,
+ KFD_DEQUEUE_WAIT_RESET, 0 /* unused */))
dev_err(dev, "Failed to reset grace period\n");
}
pm_release_ib(&dqm->packet_mgr);
dqm->active_runlist = false;
-
out:
up_read(&dqm->dev->adev->reset_domain->sem);
return retval;
+
+reset_fail:
+ dqm->is_hws_hang = true;
+ kfd_hws_hang(dqm);
+ up_read(&dqm->dev->adev->reset_domain->sem);
+ return -ETIME;
}
/* only for compute queue */
@@ -2504,20 +2614,13 @@ failed_try_destroy_debugged_queue:
return retval;
}
-/*
- * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
- * stay in user mode.
- */
-#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
-/* APE1 limit is inclusive and 64K aligned. */
-#define APE1_LIMIT_ALIGNMENT 0xFFFF
-
static bool set_cache_memory_policy(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
bool retval = true;
@@ -2526,41 +2629,17 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
dqm_lock(dqm);
- if (alternate_aperture_size == 0) {
- /* base > limit disables APE1 */
- qpd->sh_mem_ape1_base = 1;
- qpd->sh_mem_ape1_limit = 0;
- } else {
- /*
- * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
- * SH_MEM_APE1_BASE[31:0], 0x0000 }
- * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
- * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
- * Verify that the base and size parameters can be
- * represented in this format and convert them.
- * Additionally restrict APE1 to user-mode addresses.
- */
-
- uint64_t base = (uintptr_t)alternate_aperture_base;
- uint64_t limit = base + alternate_aperture_size - 1;
-
- if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
- (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
- retval = false;
- goto out;
- }
-
- qpd->sh_mem_ape1_base = base >> 16;
- qpd->sh_mem_ape1_limit = limit >> 16;
- }
-
retval = dqm->asic_ops.set_cache_memory_policy(
dqm,
qpd,
default_policy,
alternate_policy,
alternate_aperture_base,
- alternate_aperture_size);
+ alternate_aperture_size,
+ misc_process_properties);
+
+ if (retval)
+ goto out;
if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
@@ -2663,7 +2742,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
- *mqd_size = mqd_mgr->mqd_size;
+ *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask);
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
@@ -2987,20 +3066,19 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id)
{
- struct kfd_process_device *pdd;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process_device *pdd = NULL;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, &pdd);
struct device_queue_manager *dqm = knode->dqm;
struct device *dev = dqm->dev->adev->dev;
struct qcm_process_device *qpd;
struct queue *q = NULL;
int ret = 0;
- if (!p)
+ if (!pdd)
return -EINVAL;
dqm_lock(dqm);
- pdd = kfd_get_process_device_data(dqm->dev, p);
if (pdd) {
qpd = &pdd->qpd;
@@ -3033,74 +3111,21 @@ int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbel
out:
dqm_unlock(dqm);
+ kfd_unref_process(p);
return ret;
}
-static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+int kfd_evict_process_device(struct kfd_process_device *pdd)
{
- struct device *dev = dqm->dev->adev->dev;
- int ret = 0;
-
- /* Check if process is already evicted */
- dqm_lock(dqm);
- if (qpd->evicted) {
- /* Increment the evicted count to make sure the
- * process stays evicted before its terminated.
- */
- qpd->evicted++;
- dqm_unlock(dqm);
- goto out;
- }
- dqm_unlock(dqm);
-
- ret = suspend_all_queues_mes(dqm);
- if (ret) {
- dev_err(dev, "Suspending all queues failed");
- goto out;
- }
-
- ret = dqm->ops.evict_process_queues(dqm, qpd);
- if (ret) {
- dev_err(dev, "Evicting process queues failed");
- goto out;
- }
-
- ret = resume_all_queues_mes(dqm);
- if (ret)
- dev_err(dev, "Resuming all queues failed");
-
-out:
- return ret;
-}
+ struct device_queue_manager *dqm;
+ struct kfd_process *p;
-int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
-{
- struct kfd_process_device *pdd;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
- int ret = 0;
+ p = pdd->process;
+ dqm = pdd->dev->dqm;
- if (!p)
- return -EINVAL;
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
- pdd = kfd_get_process_device_data(dqm->dev, p);
- if (pdd) {
- if (dqm->dev->kfd->shared_resources.enable_mes)
- ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
- else
- ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
- }
-
- kfd_unref_process(p);
- return ret;
-}
-
-static void kfd_process_hw_exception(struct work_struct *work)
-{
- struct device_queue_manager *dqm = container_of(work,
- struct device_queue_manager, hw_exception_work);
- amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
+ return dqm->ops.evict_process_queues(dqm, &pdd->qpd);
}
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 09ab36f8e8c6..74a61b5b2f0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -37,7 +37,6 @@
#define KFD_MES_PROCESS_QUANTUM 100000
#define KFD_MES_GANG_QUANTUM 10000
-#define USE_DEFAULT_GRACE_PERIOD 0xffffffff
struct device_process_node {
struct qcm_process_device *qpd;
@@ -174,7 +173,8 @@ struct device_queue_manager_ops {
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
int (*process_termination)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
@@ -210,7 +210,8 @@ struct device_queue_manager_asic_ops {
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void (*init_sdma_vm)(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
@@ -269,7 +270,6 @@ struct device_queue_manager {
/* hw exception */
bool is_hws_hang;
bool is_resetting;
- struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
bool sched_halt;
@@ -359,4 +359,14 @@ static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val
/* SDMA activity counter is stored at queue's RPTR + 0x8 location. */
return get_user(*val, q_rptr + 1);
}
+
+static inline void update_dqm_wait_times(struct device_queue_manager *dqm)
+{
+ if (dqm->dev->kfd2kgd->get_iq_wait_times)
+ dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
+ &dqm->wait_times,
+ ffs(dqm->dev->xcc_mask) - 1);
+}
+
+
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index d4d95c7f2e5d..0508ef5a41d7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -27,12 +27,21 @@
#include "oss/oss_2_4_sh_mask.h"
#include "gca/gfx_7_2_sh_mask.h"
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm,
@@ -80,10 +89,41 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
+ unsigned int temp;
+ bool retval = true;
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
+ goto out;
+ }
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
@@ -97,37 +137,22 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
| DEFAULT_MTYPE(default_mtype)
| APE1_MTYPE(ape1_mtype);
-
- return true;
-}
-
-static int update_qpd_cik(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
- DEFAULT_MTYPE(MTYPE_NONCACHED) |
- APE1_MTYPE(MTYPE_NONCACHED);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
- temp = get_sh_mem_bases_nybble_64(pdd);
+ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+out:
+ return retval;
+}
+
+static int update_qpd_cik(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
index 245a90dfc2f6..ba6e3d747ccd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
@@ -31,10 +31,18 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v10(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v10;
asic_ops->update_qpd = update_qpd_v10;
asic_ops->init_sdma_vm = init_sdma_vm_v10;
asic_ops->mqd_manager_init = mqd_manager_init_v10;
@@ -49,27 +57,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v10(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
index 2e129da7acb4..8b447d04558f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
@@ -30,10 +30,18 @@ static int update_qpd_v11(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v11(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v11;
asic_ops->update_qpd = update_qpd_v11;
asic_ops->init_sdma_vm = init_sdma_vm_v11;
asic_ops->mqd_manager_init = mqd_manager_init_v11;
@@ -48,28 +56,29 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v11(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
index 4f3295b29dfb..3550da3a46f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
@@ -30,10 +30,18 @@ static int update_qpd_v12(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v12(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v12;
asic_ops->update_qpd = update_qpd_v12;
asic_ops->init_sdma_vm = init_sdma_vm_v12;
asic_ops->mqd_manager_init = mqd_manager_init_v12;
@@ -48,28 +56,29 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v12(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 67137e674f1d..9fcc8c6e57b7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -30,10 +30,18 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v9(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v9;
asic_ops->update_qpd = update_qpd_v9;
asic_ops->init_sdma_vm = init_sdma_vm_v9;
asic_ops->mqd_manager_init = mqd_manager_init_v9;
@@ -48,10 +56,42 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
+{
+ qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+
+ if (dqm->dev->kfd->noretry)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
+ qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
+
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0)) {
+ if (misc_process_properties & KFD_PROC_FLAG_MFMA_HIGH_PRECISION)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRECISION_MODE__SHIFT;
+ }
+
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
+
+ pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
+ qpd->sh_mem_config);
+ return true;
+}
+
static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- struct kfd_process_device *pdd;
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
pdd = qpd_to_pdd(qpd);
@@ -64,8 +104,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0))
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
qpd->sh_mem_config |=
(1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index b291ee0fab94..dad83356e976 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -27,12 +27,21 @@
#include "gca/gfx_8_0_sh_mask.h"
#include "oss/oss_3_0_sh_mask.h"
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm,
@@ -81,10 +90,41 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
+ unsigned int temp;
+ bool retval = true;
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
+ goto out;
+ }
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_UC :
@@ -100,40 +140,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
- return true;
-}
-
-static int update_qpd_vi(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
- MTYPE_UC <<
- SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
- MTYPE_UC <<
- SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
- temp = get_sh_mem_bases_nybble_64(pdd);
+ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
temp, qpd->sh_mem_bases);
+out:
+ return retval;
+}
+static int update_qpd_vi(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d075f24e5f9f..5a190dd6be4e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -727,7 +727,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return; /* Presumably process exited. */
@@ -748,16 +748,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint64_t *slots = page_slots(p->signal_page);
uint32_t id;
- /*
- * If id is valid but slot is not signaled, GPU may signal the same event twice
- * before driver have chance to process the first interrupt, then signal slot is
- * auto-reset after set_event wakeup the user space, just drop the second event as
- * the application only need wakeup once.
- */
- if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) &&
- partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT)
- goto out_unlock;
-
if (valid_id_bits)
pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
partial_id, valid_id_bits);
@@ -786,7 +776,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
}
}
-out_unlock:
rcu_read_unlock();
kfd_unref_process(p);
}
@@ -1139,8 +1128,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to process %d (pasid 0x%x)",
- p->lead_thread->pid, p->pasid);
+ "Sending SIGSEGV to process pid %d",
+ p->lead_thread->pid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -1148,13 +1137,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to process %d (pasid 0x%x)",
- p->lead_thread->pid, p->pasid);
+ "Sending SIGTERM to process pid %d",
+ p->lead_thread->pid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "Process %d (pasid 0x%x) got unhandled exception",
- p->lead_thread->pid, p->pasid);
+ "Process pid %d got unhandled exception",
+ p->lead_thread->pid);
}
}
@@ -1168,7 +1157,7 @@ void kfd_signal_hw_exception_event(u32 pasid)
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return; /* Presumably process exited. */
@@ -1177,22 +1166,39 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
-void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_hsa_memory_exception_data exception_data;
+ int i;
+
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.va = gpu_va;
+ exception_data.failure.NotPresent = 1;
+
+ // Send VM seg fault to all kfd process device
+ for (i = 0; i < p->n_pdds; i++) {
+ pdd = p->pdds[i];
+ exception_data.gpu_id = pdd->user_gpu_id;
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, &exception_data);
+ }
+}
+
+void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data)
{
struct kfd_event *ev;
uint32_t id;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = pdd->process;
struct kfd_hsa_memory_exception_data memory_exception_data;
int user_gpu_id;
- if (!p)
- return; /* Presumably process exited. */
-
- user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+ user_gpu_id = kfd_process_get_user_gpu_id(p, pdd->dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
- WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+ WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n",
+ pdd->dev->id);
return;
}
@@ -1229,7 +1235,6 @@ void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
}
rcu_read_unlock();
- kfd_unref_process(p);
}
void kfd_signal_reset_event(struct kfd_node *dev)
@@ -1264,7 +1269,8 @@ void kfd_signal_reset_event(struct kfd_node *dev)
}
if (unlikely(!pdd)) {
- WARN_ONCE(1, "Could not get device data from pasid:0x%x\n", p->pasid);
+ WARN_ONCE(1, "Could not get device data from process pid:%d\n",
+ p->lead_thread->pid);
continue;
}
@@ -1273,12 +1279,19 @@ void kfd_signal_reset_event(struct kfd_node *dev)
if (dev->dqm->detect_hang_count) {
struct amdgpu_task_info *ti;
+ struct amdgpu_fpriv *drv_priv;
+
+ if (unlikely(amdgpu_file_to_fpriv(pdd->drm_file, &drv_priv))) {
+ WARN_ONCE(1, "Could not get vm for device %x from pid:%d\n",
+ dev->id, p->lead_thread->pid);
+ continue;
+ }
- ti = amdgpu_vm_get_task_info_pasid(dev->adev, p->pasid);
+ ti = amdgpu_vm_get_task_info_vm(&drv_priv->vm);
if (ti) {
dev_err(dev->adev->dev,
"Queues reset on process %s tid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ ti->process_name, ti->tgid, ti->task.comm, ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
@@ -1311,7 +1324,7 @@ void kfd_signal_reset_event(struct kfd_node *dev)
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
{
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_hsa_hw_exception_data hw_exception_data;
struct kfd_event *ev;
@@ -1326,6 +1339,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+ kfd_unref_process(p);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index dbcb60eb54b2..1d170dc50df3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -23,7 +23,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 37b69fe0ede3..3e1ad8974797 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -168,14 +168,14 @@ static bool event_interrupt_isr_v10(struct kfd_node *dev,
client_id != SOC15_IH_CLIENTID_SE3SH)
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return 0;
/* Interrupt types we care about: various signals and faults.
@@ -217,37 +217,66 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF0_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF1_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF0_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF1_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID));
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
@@ -259,21 +288,37 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID),
sq_intr_err_type);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index b3f988b275a8..2788a52714d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -148,44 +148,69 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \
KFD_CTXID0_DOORBELL_ID_MASK)
-static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
}
-static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
}
-static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_warn_ratelimited(
+ dev_warn_ratelimited(
+ dev->adev->dev,
"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ DETAIL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ TYPE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ WGP_ID));
}
static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
@@ -194,7 +219,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
enum amdgpu_ras_block block = 0;
int ret = -EINVAL;
uint32_t reset = 0;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return;
@@ -255,14 +280,14 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev,
(context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG))
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return false;
/* Interrupt types we care about: various signals and faults.
@@ -353,10 +378,10 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (sq_int_enc) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- print_sq_intr_info_auto(context_id0, context_id1);
+ print_sq_intr_info_auto(dev, context_id0, context_id1);
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- print_sq_intr_info_inst(context_id0, context_id1);
+ print_sq_intr_info_inst(dev, context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -366,7 +391,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
- print_sq_intr_info_error(context_id0, context_id1);
+ print_sq_intr_info_error(dev, context_id0, context_id1);
sq_int_errtype = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE);
if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 0cb5c582ce7d..d76fb61869c7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -28,6 +28,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
/*
* GFX9 SQ Interrupts
@@ -146,7 +147,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
uint32_t reset = 0;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
u64 event_id;
int old_poison, ret;
@@ -228,7 +229,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
kfd_signal_poison_consumed_event(dev, pasid);
- event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
+ if (amdgpu_uniras_enabled(dev->adev))
+ event_id = amdgpu_ras_mgr_gen_ras_event_seqno(dev->adev,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION);
+ else
+ event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
RAS_EVENT_LOG(dev->adev, event_id,
"poison is consumed by client %d, kick off gpu reset flow\n", client_id);
@@ -314,11 +319,12 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev,
& ~pasid_mask) | pasid);
}
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
@@ -379,28 +385,82 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_int_data);
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -412,14 +472,37 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 2b0a830f5b29..fb3129883a4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -46,11 +46,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
- return false;
-
- pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
- queue_size);
+ pr_debug("Initializing queue type %d size %d\n", type, queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -69,6 +65,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break;
default:
+ WARN(1, "Invalid queue type %d\n", type);
dev_err(dev->adev->dev, "Invalid queue type %d\n", type);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4b275937d05e..af53e796ea1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -21,7 +21,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
@@ -39,22 +38,22 @@
#endif
#define dev_fmt(fmt) "kfd_migrate: " fmt
-static uint64_t
-svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
+static u64
+svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
{
return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
}
static int
-svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
- dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
+svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages,
+ dma_addr_t *addr, u64 *gart_addr, u64 flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
unsigned int num_dw, num_bytes;
struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t pte_flags;
+ u64 src_addr, dst_addr;
+ u64 pte_flags;
void *cpu_addr;
int r;
@@ -68,7 +67,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
if (r)
return r;
@@ -122,15 +122,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
- uint64_t *vram, uint64_t npages,
+ u64 *vram, u64 npages,
enum MIGRATION_COPY_DIR direction,
struct dma_fence **mfence)
{
- const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
+ const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint64_t gart_s, gart_d;
+ u64 gart_s, gart_d;
struct dma_fence *next;
- uint64_t size;
+ u64 size;
int r;
mutex_lock(&adev->mman.gtt_window_lock);
@@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- zone_device_page_init(page);
+ zone_device_page_init(page, 0);
}
static void
@@ -260,49 +260,52 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
-static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
+static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
{
- unsigned long upages = 0;
+ unsigned long mpages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- upages++;
+ if (migrate->dst[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ mpages++;
}
- return upages;
+ return mpages;
}
static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t ttm_res_offset)
+ dma_addr_t *scratch, u64 ttm_res_offset)
{
- uint64_t npages = migrate->cpages;
+ u64 npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
+ u64 mpages = 0;
dma_addr_t *src;
- uint64_t *dst;
- uint64_t i, j;
+ u64 *dst;
+ u64 i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
- dst = (uint64_t *)(scratch + npages);
+ dst = (u64 *)(scratch + npages);
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
- for (i = j = 0; i < npages; i++) {
+ for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
struct page *spage;
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
-
+ if (migrate->src[i] & MIGRATE_PFN_MIGRATE) {
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+ mpages++;
+ }
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
@@ -353,9 +356,12 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
- while (i--) {
+ for (i = 0; i < npages && mpages; i++) {
+ if (!dst[i])
+ continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
+ mpages--;
}
}
@@ -379,11 +385,11 @@ out_free_vram_pages:
static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
+ struct vm_area_struct *vma, u64 start,
+ u64 end, uint32_t trigger, u64 ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
+ u64 npages = (end - start) >> PAGE_SHIFT;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -402,7 +408,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -441,9 +447,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- mpages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
@@ -484,7 +490,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
- uint64_t ttm_res_offset;
+ u64 ttm_res_offset;
struct kfd_node *node;
unsigned long mpages = 0;
long r = 0;
@@ -561,8 +567,9 @@ out:
return r < 0 ? r : 0;
}
-static void svm_migrate_page_free(struct page *page)
+static void svm_migrate_folio_free(struct folio *folio)
{
+ struct page *page = &folio->page;
struct svm_range_bo *svm_bo = page->zone_device_data;
if (svm_bo) {
@@ -574,14 +581,14 @@ static void svm_migrate_page_free(struct page *page)
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t npages)
+ dma_addr_t *scratch, u64 npages)
{
struct device *dev = adev->dev;
- uint64_t *src;
+ u64 *src;
dma_addr_t *dst;
struct page *dpage;
- uint64_t i = 0, j;
- uint64_t addr;
+ u64 i = 0, j;
+ u64 addr;
int r = 0;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
@@ -589,7 +596,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
addr = migrate->start;
- src = (uint64_t *)(scratch + npages);
+ src = (u64 *)(scratch + npages);
dst = scratch;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
@@ -677,12 +684,11 @@ out_oom:
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start, uint64_t end,
+ struct vm_area_struct *vma, u64 start, u64 end,
uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
- unsigned long upages = npages;
+ u64 npages = (end - start) >> PAGE_SHIFT;
unsigned long cpages = 0;
unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
@@ -704,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -730,7 +736,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
- upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
@@ -743,9 +748,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
scratch, npages);
migrate_vma_pages(&migrate);
- upages = svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- upages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -758,8 +763,7 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger, r);
out:
- if (!r && cpages) {
- mpages = cpages - upages;
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
@@ -842,6 +846,9 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
if (r >= 0) {
+ WARN_ONCE(prange->vram_pages < mpages,
+ "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
+ prange->vram_pages, mpages);
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system
@@ -1002,7 +1009,7 @@ out_mmput:
}
static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
- .page_free = svm_migrate_page_free,
+ .folio_free = svm_migrate_folio_free,
.migrate_to_ram = svm_migrate_to_ram,
};
@@ -1021,7 +1028,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
- if (adev->flags & AMD_IS_APU)
+ if (adev->apu_prefer_gtt)
return 0;
pgmap = &kfddev->pgmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 2eebf67f9c2c..2b7fd442d29c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "kfd_priv.h"
#include "kfd_svm.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index aee2212e52f6..33aa23450b3f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -78,8 +78,8 @@ err_ioctl:
static void kfd_exit(void)
{
kfd_cleanup_processes();
- kfd_debugfs_fini();
kfd_process_destroy_wq();
+ kfd_debugfs_fini();
kfd_procfs_shutdown();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 2eff37aaf827..1695dd78ede8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 68dbc0399c87..3c0ae28c5923 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
index 2b72d5b4949b..565858b9044d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
@@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index ff417d5361c4..f2dee320fada 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -183,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -245,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
@@ -370,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac
{
struct v9_mqd *m = get_mqd(mqd);
- *ctl_stack_size = m->cp_hqd_cntl_stack_size;
+ *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
@@ -385,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi
memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
}
+static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
+ void *mqd,
+ void *mqd_dst,
+ void *ctl_stack_dst)
+{
+ struct v9_mqd *m;
+ int xcc;
+ uint64_t size = get_mqd(mqd)->cp_mqd_stride_size;
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ m = get_mqd(mqd + size * xcc);
+
+ checkpoint_mqd(mm, m,
+ (uint8_t *)mqd_dst + sizeof(*m) * xcc,
+ (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc);
+ }
+}
+
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
@@ -492,6 +513,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
+ /* Allow context switch so we don't cross-process starve with a massive
+ * command buffer of long-running SDMA commands
+ */
+ m->sdmax_rlcx_ib_cntl |= SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
@@ -551,7 +576,7 @@ static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
- if (amdgpu_sriov_vf(mm->dev->adev))
+ if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
m->cp_hqd_pq_doorbell_control |= 1 <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
@@ -664,7 +689,9 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc);
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
-
+ if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
+ m->cp_hqd_pq_doorbell_control |= 1 <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
m->cp_mqd_stride_size = offset;
/*
@@ -724,6 +751,9 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd + size * xcc);
update_mqd(mm, m, q, minfo);
+ if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
+ m->cp_hqd_pq_doorbell_control |= 1 <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
update_cu_mask(mm, m, minfo, xcc);
if (q->format == KFD_QUEUE_FORMAT_AQL) {
@@ -746,6 +776,43 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
}
}
+static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *qp,
+ const void *mqd_src,
+ const void *ctl_stack_src, u32 ctl_stack_size)
+{
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ u32 mqd_ctl_stack_size;
+ struct v9_mqd *m;
+ u32 num_xcc;
+ int xcc;
+
+ uint64_t offset = mm->mqd_stride(mm, qp);
+
+ mm->dev->dqm->current_logical_xcc_start++;
+
+ num_xcc = NUM_XCC(mm->dev->xcc_mask);
+ mqd_ctl_stack_size = ctl_stack_size / num_xcc;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = mqd_mem_obj->cpu_ptr;
+ if (gart_addr)
+ *gart_addr = mqd_mem_obj->gpu_addr;
+
+ for (xcc = 0; xcc < num_xcc; xcc++) {
+ get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc);
+ restore_mqd(mm, (void **)&m,
+ &xcc_mqd_mem_obj,
+ NULL,
+ qp,
+ (uint8_t *)mqd_src + xcc * sizeof(*m),
+ (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size,
+ mqd_ctl_stack_size);
+ }
+}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type, unsigned int timeout,
uint32_t pipe_id, uint32_t queue_id)
@@ -879,8 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->free_mqd = kfd_free_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
- mqd->checkpoint_mqd = checkpoint_mqd;
- mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
@@ -894,12 +959,16 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
+ mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3;
+ mqd->restore_mqd = restore_mqd_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
}
break;
case KFD_MQD_TYPE_HIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 4984b41cd372..b1a6eb349bb3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -31,6 +31,7 @@
#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
+#define OVER_SUBSCRIPTION_XNACK_CONFLICT (1 << 3)
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
@@ -44,7 +45,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
- int *over_subscription)
+ int *over_subscription,
+ int xnack_conflict)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -73,6 +75,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
if (gws_queue_count > 1)
*over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
+ if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ *over_subscription |= OVER_SUBSCRIPTION_XNACK_CONFLICT;
if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
@@ -96,7 +100,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
- int *is_over_subscription)
+ int *is_over_subscription,
+ int xnack_conflict)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -105,7 +110,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
if (WARN_ON(pm->allocated))
return -EINVAL;
- pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription,
+ xnack_conflict);
mutex_lock(&pm->lock);
@@ -142,11 +148,27 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct queue *q;
struct kernel_queue *kq;
int is_over_subscription;
+ int xnack_enabled = -1;
+ bool xnack_conflict = 0;
rl_wptr = retval = processes_mapped = 0;
+ /* Check if processes set different xnack modes */
+ list_for_each_entry(cur, queues, list) {
+ qpd = cur->qpd;
+ if (xnack_enabled < 0)
+ /* First process */
+ xnack_enabled = qpd->pqm->process->xnack_enabled;
+ else if (qpd->pqm->process->xnack_enabled != xnack_enabled) {
+ /* Found a process with a different xnack mode */
+ xnack_conflict = 1;
+ break;
+ }
+ }
+
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
- &alloc_size_bytes, &is_over_subscription);
+ &alloc_size_bytes, &is_over_subscription,
+ xnack_conflict);
if (retval)
return retval;
@@ -156,9 +178,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->active_queue_count);
+build_runlist_ib:
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
qpd = cur->qpd;
+ /* group processes with the same xnack mode together */
+ if (qpd->pqm->process->xnack_enabled != xnack_enabled)
+ continue;
/* build map process packet */
if (processes_mapped >= pm->dqm->processes_count) {
dev_dbg(dev, "Not enough space left in runlist IB\n");
@@ -215,18 +241,26 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
alloc_size_bytes);
}
}
+ if (xnack_conflict) {
+ /* pick up processes with the other xnack mode */
+ xnack_enabled = !xnack_enabled;
+ xnack_conflict = 0;
+ goto build_runlist_ib;
+ }
dev_dbg(dev, "Finished map process and queues to runlist\n");
if (is_over_subscription) {
if (!pm->is_over_subscription)
- dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
- is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
- " too many processes." : "",
- is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
- " too many queues." : "",
- is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
- " multiple processes using cooperative launch." : "");
+ dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s%s. Expect reduced ROCm performance.\n",
+ is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
+ " too many processes" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
+ " too many queues" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
+ " multiple processes using cooperative launch" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_XNACK_CONFLICT ?
+ " xnack on/off processes mixed on gfx9" : "");
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
@@ -396,14 +430,33 @@ out:
return retval;
}
-int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
+/* pm_config_dequeue_wait_counts: Configure dequeue timer Wait Counts
+ * by writing to CP_IQ_WAIT_TIME2 registers.
+ *
+ * @cmd: See emum kfd_config_dequeue_wait_counts_cmd definition
+ * @value: Depends on the cmd. This parameter is unused for
+ * KFD_DEQUEUE_WAIT_INIT and KFD_DEQUEUE_WAIT_RESET. For
+ * KFD_DEQUEUE_WAIT_SET_SCH_WAVE it holds value to be set
+ *
+ */
+int pm_config_dequeue_wait_counts(struct packet_manager *pm,
+ enum kfd_config_dequeue_wait_counts_cmd cmd,
+ uint32_t value)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
int retval = 0;
uint32_t *buffer, size;
- size = pm->pmf->set_grace_period_size;
+ if (!pm->pmf->config_dequeue_wait_counts ||
+ !pm->pmf->config_dequeue_wait_counts_size)
+ return 0;
+
+ if (cmd == KFD_DEQUEUE_WAIT_INIT && (KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(9, 4, 1) ||
+ KFD_GC_VERSION(pm->dqm->dev) >= IP_VERSION(10, 0, 0)))
+ return 0;
+
+ size = pm->pmf->config_dequeue_wait_counts_size;
mutex_lock(&pm->lock);
@@ -419,13 +472,18 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
goto out;
}
- retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
- if (!retval)
+ retval = pm->pmf->config_dequeue_wait_counts(pm, buffer,
+ cmd, value);
+ if (!retval) {
retval = kq_submit_packet(pm->priv_queue);
- else
+
+ /* If default value is modified, cache that in dqm->wait_times */
+ if (!retval && cmd == KFD_DEQUEUE_WAIT_INIT)
+ update_dqm_wait_times(pm->dqm);
+ } else {
kq_rollback_packet(pm->priv_queue);
+ }
}
-
out:
mutex_unlock(&pm->lock);
return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 1f9f5bfeaf86..505036968a77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -43,11 +43,11 @@ static int pm_map_process_v9(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
- if (adev->enforce_isolation[kfd->node_id])
+ if (adev->enforce_isolation[kfd->node_id] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
@@ -102,11 +102,12 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process_aldebaran));
- if (adev->enforce_isolation[knode->node_id])
+ if (adev->enforce_isolation[knode->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
@@ -165,9 +166,9 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ?
- 1 : min(pm->dqm->processes_count,
- kfd->max_proc_per_quantum);
+ concurrent_proc_cnt = (adev->enforce_isolation[kfd->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE) ?
+ 1 : min(pm->dqm->processes_count, kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
@@ -202,6 +203,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+ if (pm->dqm->dev->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)
+ packet->bitfields2.enb_xnack_retry_disable_check = 1;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
@@ -237,7 +240,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
- packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
+ packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =
@@ -297,23 +300,79 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-static int pm_set_grace_period_v9(struct packet_manager *pm,
+static inline void pm_build_dequeue_wait_counts_packet_info(struct packet_manager *pm,
+ uint32_t sch_value, uint32_t que_sleep, uint32_t *reg_offset,
+ uint32_t *reg_data)
+{
+ pm->dqm->dev->kfd2kgd->build_dequeue_wait_counts_packet_info(
+ pm->dqm->dev->adev,
+ pm->dqm->wait_times,
+ sch_value,
+ que_sleep,
+ reg_offset,
+ reg_data);
+}
+
+/* pm_config_dequeue_wait_counts_v9: Builds WRITE_DATA packet with
+ * register/value for configuring dequeue wait counts
+ *
+ * @return: -ve for failure and 0 for success and buffer is
+ * filled in with packet
+ *
+ **/
+static int pm_config_dequeue_wait_counts_v9(struct packet_manager *pm,
uint32_t *buffer,
- uint32_t grace_period)
+ enum kfd_config_dequeue_wait_counts_cmd cmd,
+ uint32_t value)
{
struct pm4_mec_write_data_mmio *packet;
uint32_t reg_offset = 0;
uint32_t reg_data = 0;
- pm->dqm->dev->kfd2kgd->build_grace_period_packet_info(
- pm->dqm->dev->adev,
- pm->dqm->wait_times,
- grace_period,
- &reg_offset,
- &reg_data);
+ switch (cmd) {
+ case KFD_DEQUEUE_WAIT_INIT: {
+ uint32_t sch_wave = 0, que_sleep = 1;
+
+ /* For all gfx9 ASICs > gfx941,
+ * Reduce CP_IQ_WAIT_TIME2.QUE_SLEEP to 0x1 from default 0x40.
+ * On a 1GHz machine this is roughly 1 microsecond, which is
+ * about how long it takes to load data out of memory during
+ * queue connect
+ * QUE_SLEEP: Wait Count for Dequeue Retry.
+ *
+ * Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU
+ */
+ if (KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(9, 4, 1) ||
+ KFD_GC_VERSION(pm->dqm->dev) >= IP_VERSION(10, 0, 0))
+ return -EPERM;
+
+ if (amdgpu_emu_mode == 0 && pm->dqm->dev->adev->gmc.is_app_apu &&
+ (KFD_GC_VERSION(pm->dqm->dev) == IP_VERSION(9, 4, 3)))
+ sch_wave = 1;
- if (grace_period == USE_DEFAULT_GRACE_PERIOD)
- reg_data = pm->dqm->wait_times;
+ pm_build_dequeue_wait_counts_packet_info(pm, sch_wave, que_sleep,
+ &reg_offset, &reg_data);
+
+ break;
+ }
+ case KFD_DEQUEUE_WAIT_RESET:
+ /* reg_data would be set to dqm->wait_times */
+ pm_build_dequeue_wait_counts_packet_info(pm, 0, 0, &reg_offset, &reg_data);
+ break;
+
+ case KFD_DEQUEUE_WAIT_SET_SCH_WAVE:
+ /* The CP cannot handle value 0 and it will result in
+ * an infinite grace period being set so set to 1 to prevent this. Also
+ * avoid debugger API breakage as it sets 0 and expects a low value.
+ */
+ if (!value)
+ value = 1;
+ pm_build_dequeue_wait_counts_packet_info(pm, value, 0, &reg_offset, &reg_data);
+ break;
+ default:
+ pr_err("Invalid dequeue wait cmd\n");
+ return -EINVAL;
+ }
packet = (struct pm4_mec_write_data_mmio *)buffer;
memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio));
@@ -415,7 +474,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
- .set_grace_period = pm_set_grace_period_v9,
+ .config_dequeue_wait_counts = pm_config_dequeue_wait_counts_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -423,7 +482,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
+ .config_dequeue_wait_counts_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
@@ -434,7 +493,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
- .set_grace_period = pm_set_grace_period_v9,
+ .config_dequeue_wait_counts = pm_config_dequeue_wait_counts_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process_aldebaran),
@@ -442,7 +501,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
+ .config_dequeue_wait_counts_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index c1199d06d131..a1de5d7e173a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -42,6 +42,7 @@ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct pm4_mes_map_process *packet;
packet = (struct pm4_mes_map_process *)buffer;
@@ -52,7 +53,7 @@ static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
sizeof(struct pm4_mes_map_process));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields3.page_table_base = qpd->page_table_base;
packet->bitfields10.gds_size = qpd->gds_size;
packet->bitfields10.num_gws = qpd->num_gws;
@@ -303,7 +304,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources = pm_set_resources_vi,
.map_queues = pm_map_queues_vi,
.unmap_queues = pm_unmap_queues_vi,
- .set_grace_period = NULL,
+ .config_dequeue_wait_counts = NULL,
.query_status = pm_query_status_vi,
.release_mem = pm_release_mem_vi,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -311,7 +312,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = 0,
+ .config_dequeue_wait_counts_size = 0,
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = sizeof(struct pm4_mec_release_mem)
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index cd8611401a66..e356a207d03c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -63,7 +63,8 @@ struct pm4_mes_set_resources {
struct {
uint32_t vmid_mask:16;
uint32_t unmap_latency:8;
- uint32_t reserved1:5;
+ uint32_t reserved1:4;
+ uint32_t enb_xnack_retry_disable_check:1;
enum mes_set_resources_queue_type_enum queue_type:3;
} bitfields2;
uint32_t ordinal2;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index c32b255c0eb2..70ef051511bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -32,7 +32,7 @@
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
-#include <linux/kfd_ioctl.h>
+#include <uapi/linux/kfd_ioctl.h>
#include <linux/idr.h>
#include <linux/kfifo.h>
#include <linux/seq_file.h>
@@ -111,7 +111,14 @@
#define KFD_KERNEL_QUEUE_SIZE 2048
-#define KFD_UNMAP_LATENCY_MS (4000)
+/* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC
+ * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time
+ * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is
+ * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload
+ * The format here makes CP workload 10% of total timeout
+ */
+#define KFD_UNMAP_LATENCY_MS \
+ ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1)
#define KFD_MAX_SDMA_QUEUES 128
@@ -289,7 +296,6 @@ struct kfd_node {
/* Global GWS resource shared between processes */
void *gws;
- bool gws_debug_workaround;
/* Clients watching SMI events */
struct list_head smi_clients;
@@ -373,6 +379,11 @@ struct kfd_dev {
/* bitmap for dynamic doorbell allocation from doorbell object */
unsigned long *doorbell_bitmap;
+
+ /* for dynamic partitioning */
+ int kfd_dev_lock;
+
+ atomic_t kfd_processes_count;
};
enum kfd_mempool {
@@ -851,6 +862,8 @@ struct kfd_process_device {
/* Tracks queue reset status */
bool has_reset_queue;
+
+ u32 pasid;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -910,8 +923,6 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- u32 pasid;
-
/*
* Array of kfd_process_device pointers,
* one for each device the process is using.
@@ -1039,7 +1050,8 @@ void kfd_process_destroy_wq(void);
void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct task_struct *thread);
struct kfd_process *kfd_get_process(const struct task_struct *task);
-struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
+ struct kfd_process_device **pdd);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
@@ -1091,8 +1103,6 @@ struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid);
/* PASIDs */
int kfd_pasid_init(void);
void kfd_pasid_exit(void);
-bool kfd_set_pasid_limit(unsigned int new_limit);
-unsigned int kfd_get_pasid_limit(void);
u32 kfd_pasid_alloc(void);
void kfd_pasid_free(u32 pasid);
@@ -1142,7 +1152,6 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
uint32_t proximity_domain);
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_node *kfd_device_by_id(uint32_t gpu_id);
-struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev);
static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id,
uint32_t vmid)
{
@@ -1337,7 +1346,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
-int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
+int kfd_evict_process_device(struct kfd_process_device *pdd);
int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id);
/* Process Queue Manager */
@@ -1366,8 +1375,6 @@ int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
struct mqd_update_info *minfo);
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws);
-struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
- unsigned int qid);
struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
unsigned int qid);
int pqm_get_wave_state(struct process_queue_manager *pqm,
@@ -1394,6 +1401,24 @@ int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
#define KFD_FENCE_COMPLETED (100)
#define KFD_FENCE_INIT (10)
+/**
+ * enum kfd_config_dequeue_wait_counts_cmd - Command for configuring
+ * dequeue wait counts.
+ *
+ * @KFD_DEQUEUE_WAIT_INIT: Set optimized dequeue wait counts for a
+ * certain ASICs. For these ASICs, this is default value used by RESET
+ * @KFD_DEQUEUE_WAIT_RESET: Reset dequeue wait counts to the optimized value
+ * for certain ASICs. For others set it to default hardware reset value
+ * @KFD_DEQUEUE_WAIT_SET_SCH_WAVE: Set context switch latency wait
+ *
+ */
+enum kfd_config_dequeue_wait_counts_cmd {
+ KFD_DEQUEUE_WAIT_INIT = 1,
+ KFD_DEQUEUE_WAIT_RESET = 2,
+ KFD_DEQUEUE_WAIT_SET_SCH_WAVE = 3
+};
+
+
struct packet_manager {
struct device_queue_manager *dqm;
struct kernel_queue *priv_queue;
@@ -1419,8 +1444,8 @@ struct packet_manager_funcs {
int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset);
- int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer,
- uint32_t grace_period);
+ int (*config_dequeue_wait_counts)(struct packet_manager *pm, uint32_t *buffer,
+ enum kfd_config_dequeue_wait_counts_cmd cmd, uint32_t value);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint64_t fence_value);
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
@@ -1431,7 +1456,7 @@ struct packet_manager_funcs {
int set_resources_size;
int map_queues_size;
int unmap_queues_size;
- int set_grace_period_size;
+ int config_dequeue_wait_counts_size;
int query_status_size;
int release_mem_size;
};
@@ -1454,7 +1479,9 @@ int pm_send_unmap_queue(struct packet_manager *pm,
void pm_release_ib(struct packet_manager *pm);
-int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period);
+int pm_config_dequeue_wait_counts(struct packet_manager *pm,
+ enum kfd_config_dequeue_wait_counts_cmd cmd,
+ uint32_t wait_counts_config);
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
@@ -1492,7 +1519,9 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
-void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va);
+
+void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data);
@@ -1519,7 +1548,7 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
int kfd_send_exception_to_runtime(struct kfd_process *p,
unsigned int queue_id,
uint64_t error_reason);
-bool kfd_is_locked(void);
+bool kfd_is_locked(struct kfd_dev *kfd);
/* Compute profile */
void kfd_inc_compute_active(struct kfd_node *dev);
@@ -1566,10 +1595,15 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev);
int pm_debugfs_hang_hws(struct packet_manager *pm);
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
+void kfd_debugfs_add_process(struct kfd_process *p);
+void kfd_debugfs_remove_process(struct kfd_process *p);
+
#else
static inline void kfd_debugfs_init(void) {}
static inline void kfd_debugfs_fini(void) {}
+static inline void kfd_debugfs_add_process(struct kfd_process *p) {}
+static inline void kfd_debugfs_remove_process(struct kfd_process *p) {}
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 0976b5b0e8e8..a085faac9fe1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -35,6 +35,7 @@
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
+#include "amdgpu_reset.h"
struct mm_struct;
@@ -282,8 +283,8 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
cu_cnt = 0;
proc = pdd->process;
if (pdd->qpd.queue_count == 0) {
- pr_debug("Gpu-Id: %d has no active queues for process %d\n",
- dev->id, proc->pasid);
+ pr_debug("Gpu-Id: %d has no active queues for process pid %d\n",
+ dev->id, (int)proc->lead_thread->pid);
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
@@ -327,12 +328,9 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- if (strcmp(attr->name, "pasid") == 0) {
- struct kfd_process *p = container_of(attr, struct kfd_process,
- attr_pasid);
-
- return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
- } else if (strncmp(attr->name, "vram_", 5) == 0) {
+ if (strcmp(attr->name, "pasid") == 0)
+ return snprintf(buffer, PAGE_SIZE, "%d\n", 0);
+ else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
@@ -841,6 +839,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
return ERR_PTR(-EINVAL);
}
+ /* If the process just called exec(3), it is possible that the
+ * cleanup of the kfd_process (following the release of the mm
+ * of the old process image) is still in the cleanup work queue.
+ * Make sure to drain any job before trying to recreate any
+ * resource for this process.
+ */
+ flush_workqueue(kfd_process_wq);
+
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
@@ -848,7 +854,7 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
*/
mutex_lock(&kfd_processes_mutex);
- if (kfd_is_locked()) {
+ if (kfd_is_locked(NULL)) {
pr_debug("KFD is locked! Cannot create process");
process = ERR_PTR(-EINVAL);
goto out;
@@ -861,14 +867,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
if (process) {
pr_debug("Process already found\n");
} else {
- /* If the process just called exec(3), it is possible that the
- * cleanup of the kfd_process (following the release of the mm
- * of the old process image) is still in the cleanup work queue.
- * Make sure to drain any job before trying to recreate any
- * resource for this process.
- */
- flush_workqueue(kfd_process_wq);
-
process = create_process(thread);
if (IS_ERR(process))
goto out;
@@ -902,6 +900,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
+ kfd_debugfs_add_process(process);
+
init_waitqueue_head(&process->wait_irq_drain);
}
out:
@@ -1056,17 +1056,15 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
- pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
- pdd->dev->id, p->pasid);
+ kfd_smi_event_process(pdd, false);
+ pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
+ pdd->dev->id, p->lead_thread->pid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
kfd_process_device_destroy_ib_mem(pdd);
- if (pdd->drm_file) {
- amdgpu_amdkfd_gpuvm_release_process_vm(
- pdd->dev->adev, pdd->drm_priv);
+ if (pdd->drm_file)
fput(pdd->drm_file);
- }
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
@@ -1085,11 +1083,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
* for auto suspend
*/
if (pdd->runtime_inuse) {
- pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
pdd->runtime_inuse = false;
}
+ atomic_dec(&pdd->dev->kfd->kfd_processes_count);
+
kfree(pdd);
p->pdds[i] = NULL;
}
@@ -1140,6 +1139,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p)
p->kobj = NULL;
}
+/*
+ * If any GPU is ongoing reset, wait for reset complete.
+ */
+static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p)
+{
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++)
+ flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq);
+}
+
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
@@ -1151,8 +1161,10 @@ static void kfd_process_wq_release(struct work_struct *work)
release_work);
struct dma_fence *ef;
- kfd_process_dequeue_from_all_devices(p);
- pqm_uninit(&p->pqm);
+ /*
+ * If GPU in reset, user queues may still running, wait for reset complete.
+ */
+ kfd_process_wait_gpu_reset_complete(p);
/* Signal the eviction fence after user mode queues are
* destroyed. This allows any BOs to be freed without
@@ -1160,9 +1172,11 @@ static void kfd_process_wq_release(struct work_struct *work)
*/
synchronize_rcu();
ef = rcu_access_pointer(p->ef);
- dma_fence_signal(ef);
+ if (ef)
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
+ kfd_debugfs_remove_process(p);
kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
@@ -1173,7 +1187,6 @@ static void kfd_process_wq_release(struct work_struct *work)
kfd_event_free_process(p);
- kfd_pasid_free(p->pasid);
mutex_destroy(&p->mutex);
put_task_struct(p->lead_thread);
@@ -1209,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p)
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
+ /*
+ * Dequeue and destroy user queues, it is not safe for GPU to access
+ * system memory after mmu release notifier callback returns because
+ * exit_mmap free process memory afterwards.
+ */
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
@@ -1524,12 +1545,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
atomic_set(&process->debugged_process_count, 0);
sema_init(&process->runtime_enable_sema, 0);
- process->pasid = kfd_pasid_alloc();
- if (process->pasid == 0) {
- err = -ENOSPC;
- goto err_alloc_pasid;
- }
-
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
@@ -1583,8 +1598,6 @@ err_init_svm_range_list:
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
- kfd_pasid_free(process->pasid);
-err_alloc_pasid:
kfd_event_free_process(process);
err_event_init:
mutex_destroy(&process->mutex);
@@ -1642,6 +1655,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
+ atomic_inc(&dev->kfd->kfd_processes_count);
+
return pdd;
}
@@ -1703,15 +1718,21 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (ret)
goto err_init_cwsr;
- ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
- if (ret)
- goto err_set_pasid;
+ if (unlikely(!avm->pasid)) {
+ dev_warn(pdd->dev->adev->dev, "WARN: vm %p has no pasid associated",
+ avm);
+ ret = -EINVAL;
+ goto err_get_pasid;
+ }
+ pdd->pasid = avm->pasid;
pdd->drm_file = drm_file;
+ kfd_smi_event_process(pdd, true);
+
return 0;
-err_set_pasid:
+err_get_pasid:
kfd_process_device_destroy_cwsr_dgpu(pdd);
err_init_cwsr:
kfd_process_device_destroy_ib_mem(pdd);
@@ -1797,25 +1818,50 @@ void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
idr_remove(&pdd->alloc_idr, handle);
}
-/* This increments the process->ref counter. */
-struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
+static struct kfd_process_device *kfd_lookup_process_device_by_pasid(u32 pasid)
{
- struct kfd_process *p, *ret_p = NULL;
+ struct kfd_process_device *ret_p = NULL;
+ struct kfd_process *p;
unsigned int temp;
-
- int idx = srcu_read_lock(&kfd_processes_srcu);
+ int i;
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- if (p->pasid == pasid) {
- kref_get(&p->ref);
- ret_p = p;
- break;
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->pasid == pasid) {
+ ret_p = p->pdds[i];
+ break;
+ }
}
+ if (ret_p)
+ break;
+ }
+ return ret_p;
+}
+
+/* This increments the process->ref counter. */
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
+ struct kfd_process_device **pdd)
+{
+ struct kfd_process_device *ret_p;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ ret_p = kfd_lookup_process_device_by_pasid(pasid);
+ if (ret_p) {
+ if (pdd)
+ *pdd = ret_p;
+ kref_get(&ret_p->process->ref);
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+ return ret_p->process;
}
srcu_read_unlock(&kfd_processes_srcu, idx);
- return ret_p;
+ if (pdd)
+ *pdd = NULL;
+
+ return NULL;
}
/* This increments the process->ref counter. */
@@ -1971,7 +2017,7 @@ static void evict_process_worker(struct work_struct *work)
*/
p = container_of(dwork, struct kfd_process, eviction_work);
- pr_debug("Started evicting pasid 0x%x\n", p->pasid);
+ pr_debug("Started evicting process pid %d\n", p->lead_thread->pid);
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
if (!ret) {
/* If another thread already signaled the eviction fence,
@@ -1983,9 +2029,9 @@ static void evict_process_worker(struct work_struct *work)
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
- pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
+ pr_debug("Finished evicting process pid %d\n", p->lead_thread->pid);
} else
- pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
+ pr_err("Failed to evict queues of process pid %d\n", p->lead_thread->pid);
}
static int restore_process_helper(struct kfd_process *p)
@@ -2002,9 +2048,11 @@ static int restore_process_helper(struct kfd_process *p)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
+ pr_debug("Finished restoring process pid %d\n",
+ p->lead_thread->pid);
else
- pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
+ pr_err("Failed to restore queues of process pid %d\n",
+ p->lead_thread->pid);
return ret;
}
@@ -2021,7 +2069,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
- pr_debug("Started restoring pasid 0x%x\n", p->pasid);
+ pr_debug("Started restoring process pasid %d\n", (int)p->lead_thread->pid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -2037,8 +2085,8 @@ static void restore_process_worker(struct work_struct *work)
ret = restore_process_helper(p);
if (ret) {
- pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
- p->pasid, PROCESS_BACK_OFF_TIME_MS);
+ pr_debug("Failed to restore BOs of process pid %d, retry after %d ms\n",
+ p->lead_thread->pid, PROCESS_BACK_OFF_TIME_MS);
if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
@@ -2054,7 +2102,7 @@ void kfd_suspend_all_processes(void)
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
- pr_err("Failed to suspend process 0x%x\n", p->pasid);
+ pr_err("Failed to suspend process pid %d\n", p->lead_thread->pid);
signal_eviction_fence(p);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
@@ -2068,8 +2116,8 @@ int kfd_resume_all_processes(void)
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (restore_process_helper(p)) {
- pr_err("Restore process %d failed during resume\n",
- p->pasid);
+ pr_err("Restore process pid %d failed during resume\n",
+ p->lead_thread->pid);
ret = -EFAULT;
}
}
@@ -2124,7 +2172,7 @@ int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
KFD_IRQ_FENCE_CLIENTID;
- irq_drain_fence[3] = pdd->process->pasid;
+ irq_drain_fence[3] = pdd->pasid;
/*
* For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
@@ -2155,7 +2203,7 @@ void kfd_process_close_interrupt_drain(unsigned int pasid)
{
struct kfd_process *p;
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return;
@@ -2276,8 +2324,8 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID 0x%x:\n",
- p->lead_thread->tgid, p->pasid);
+ seq_printf(m, "Process %d PASID %d:\n",
+ p->lead_thread->tgid, p->lead_thread->pid);
mutex_lock(&p->mutex);
r = pqm_debugfs_mqds(m, &p->pqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 9df56f8e09f9..7fbb5c274ccc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -69,8 +69,8 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid 0x%x\n",
- pqm->process->pasid);
+ pr_info("Cannot open more queues for process with pid %d\n",
+ pqm->process->lead_thread->pid);
return -ENOMEM;
}
@@ -86,9 +86,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
if (pdd->already_dequeued)
return;
-
+ /* The MES context flush needs to filter out the case which the
+ * KFD process is created without setting up the MES context and
+ * queue for creating a compute queue.
+ */
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
- if (dev->kfd->shared_resources.enable_mes &&
+ if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
down_read_trylock(&dev->adev->reset_domain->sem)) {
amdgpu_mes_flush_shader_debugger(dev->adev,
pdd->proc_ctx_gpu_addr);
@@ -276,20 +279,17 @@ static int init_user_queue(struct process_queue_manager *pqm,
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
- if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
- >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
- if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
- pr_err("Queue memory allocated to wrong device\n");
- retval = -EINVAL;
- goto free_gang_ctx_bo;
- }
+ if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
+ pr_err("Queue memory allocated to wrong device\n");
+ retval = -EINVAL;
+ goto free_gang_ctx_bo;
+ }
- retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
- &(*q)->wptr_bo_gart);
- if (retval) {
- pr_err("Failed to map wptr bo to GART\n");
- goto free_gang_ctx_bo;
- }
+ retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
+ &(*q)->wptr_bo_gart);
+ if (retval) {
+ pr_err("Failed to map wptr bo to GART\n");
+ goto free_gang_ctx_bo;
}
}
@@ -297,7 +297,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
return 0;
free_gang_ctx_bo:
- amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &(*q)->gang_ctx_bo);
cleanup:
uninit_queue(*q);
*q = NULL;
@@ -360,10 +360,26 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (retval != 0)
return retval;
+ /* Register process if this is the first queue */
if (list_empty(&pdd->qpd.queues_list) &&
list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
+ /* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */
+ if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) {
+ retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (retval) {
+ dev_err(dev->adev->dev, "failed to allocate process context bo\n");
+ return retval;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
retval = -ENOMEM;
@@ -432,8 +448,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
- pqm->process->pasid, type, retval);
+ if ((type == KFD_QUEUE_TYPE_SDMA ||
+ type == KFD_QUEUE_TYPE_SDMA_XGMI ||
+ type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) &&
+ retval == -ENOMEM)
+ pr_warn("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
+ else
+ pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
goto err_create_queue;
}
@@ -527,9 +550,9 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
- pqm->process->pasid,
+ pdd->pasid,
pqn->q->properties.queue_id, retval);
- if (retval != -ETIME)
+ if (retval != -ETIME && retval != -EIO)
goto err_destroy_queue;
}
kfd_procfs_del_queue(pqn->q);
@@ -649,19 +672,6 @@ int pqm_update_mqd(struct process_queue_manager *pqm,
return 0;
}
-struct kernel_queue *pqm_get_kernel_queue(
- struct process_queue_manager *pqm,
- unsigned int qid)
-{
- struct process_queue_node *pqn;
-
- pqn = get_queue_by_qid(pqm, qid);
- if (pqn && pqn->kq)
- return pqn->kq;
-
- return NULL;
-}
-
struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
unsigned int qid)
{
@@ -904,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
- /* data stored in this order: priv_data, mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
q_data->mqd_size = mqd_size;
q_data->ctl_stack_size = ctl_stack_size;
@@ -953,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p,
}
static void set_queue_properties_from_criu(struct queue_properties *qp,
- struct kfd_criu_queue_priv_data *q_data)
+ struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc)
{
qp->is_interop = false;
qp->queue_percent = q_data->q_percent;
@@ -966,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
- qp->ctl_stack_size = q_data->ctl_stack_size;
+ if (q_data->type == KFD_QUEUE_TYPE_COMPUTE)
+ qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc;
+ else
+ qp->ctl_stack_size = q_data->ctl_stack_size;
+
qp->type = q_data->type;
qp->format = q_data->format;
}
@@ -1026,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
goto exit;
}
- /* data stored in this order: mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
memset(&qp, 0, sizeof(qp));
- set_queue_properties_from_criu(&qp, q_data);
+ set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask));
print_queue_properties(&qp);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index ecccd7adbab4..f1e7583650c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -233,6 +233,7 @@ void kfd_queue_buffer_put(struct amdgpu_bo **bo)
int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
{
struct kfd_topology_device *topo_dev;
+ u64 expected_queue_size;
struct amdgpu_vm *vm;
u32 total_cwsr_size;
int err;
@@ -241,6 +242,15 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
if (!topo_dev)
return -EINVAL;
+ /* AQL queues on GFX7 and GFX8 appear twice their actual size */
+ if (properties->type == KFD_QUEUE_TYPE_COMPUTE &&
+ properties->format == KFD_QUEUE_FORMAT_AQL &&
+ topo_dev->node_props.gfx_target_version >= 70000 &&
+ topo_dev->node_props.gfx_target_version < 90000)
+ expected_queue_size = properties->queue_size / 2;
+ else
+ expected_queue_size = properties->queue_size;
+
vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(vm->root.bo, false);
if (err)
@@ -255,7 +265,7 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
- &properties->ring_bo, properties->queue_size);
+ &properties->ring_bo, expected_queue_size);
if (err)
goto out_err_unreserve;
@@ -266,8 +276,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
/* EOP buffer is not required for all ASICs */
if (properties->eop_ring_buffer_address) {
if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
- pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
- properties->eop_buf_bo->tbo.base.size,
+ pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
+ properties->eop_ring_buffer_size,
topo_dev->node_props.eop_buffer_size);
err = -EINVAL;
goto out_err_unreserve;
@@ -287,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
}
- if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
- pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
+ if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) {
+ pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n",
properties->ctx_save_restore_area_size,
topo_dev->node_props.cwsr_size);
err = -EINVAL;
goto out_err_unreserve;
}
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
@@ -342,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
if (!topo_dev)
return -EINVAL;
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
@@ -392,7 +402,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
{
u32 vgpr_size = 0x40000;
- if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
+ if (gfxv == 90402 || /* GFX_VERSION_AQUA_VANJARAM */
gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
gfxv == 90008 || /* GFX_VERSION_ARCTURUS */
gfxv == 90500)
@@ -452,7 +462,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
if (gfxv == 80002) /* GFX_VERSION_TONGA */
props->eop_buffer_size = 0x8000;
- else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */
+ else if (gfxv == 90402) /* GFX_VERSION_AQUA_VANJARAM */
props->eop_buffer_size = 4096;
else if (gfxv >= 80000)
props->eop_buffer_size = 4096;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 9b8169761ec5..a499449fcb06 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -163,10 +163,9 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
unsigned int event)
{
- uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS);
uint64_t events = READ_ONCE(client->events);
- if (pid && client->pid != pid && !(client->suser && (events & all)))
+ if (pid && client->pid != pid && !client->suser)
return false;
return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
@@ -254,9 +253,9 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid);
if (task_info) {
/* Report VM faults from user applications, not retry from kernel */
- if (task_info->pid)
+ if (task_info->task.pid)
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT(
- task_info->pid, task_info->task_name));
+ task_info->task.pid, task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
@@ -345,6 +344,27 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
pid, address, last - address + 1, node->id, trigger));
}
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
+{
+ struct amdgpu_task_info *task_info;
+ struct amdgpu_vm *avm;
+
+ if (!pdd->drm_priv)
+ return;
+
+ avm = drm_priv_to_vm(pdd->drm_priv);
+ task_info = amdgpu_vm_get_task_info_vm(avm);
+
+ if (task_info) {
+ kfd_smi_event_add(0, pdd->dev,
+ start ? KFD_SMI_EVENT_PROCESS_START :
+ KFD_SMI_EVENT_PROCESS_END,
+ KFD_EVENT_FMT_PROCESS(task_info->task.pid,
+ task_info->task.comm));
+ amdgpu_vm_put_task_info(task_info);
+ }
+}
+
int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
{
struct kfd_smi_client *client;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 503bff13d815..bb4d72b57387 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -53,4 +53,5 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger);
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index bd3e20d981e0..97c2270f278f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -563,7 +563,8 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
int r;
p = container_of(prange->svms, struct kfd_process, svms);
- pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
+ pr_debug("process pid: %d svms 0x%p [0x%lx 0x%lx]\n",
+ p->lead_thread->pid, prange->svms,
prange->start, prange->last);
if (svm_range_validate_svm_bo(node, prange))
@@ -1170,13 +1171,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start,
}
static void
-svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
- struct svm_range *pchild, enum svm_work_list_ops op)
+svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
{
pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
pchild, pchild->start, pchild->last, prange, op);
- pchild->work_item.mm = mm;
+ pchild->work_item.mm = NULL;
pchild->work_item.op = op;
list_add_tail(&pchild->child_list, &prange->child_list);
}
@@ -1189,7 +1189,7 @@ svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
}
static uint64_t
-svm_range_get_pte_flags(struct kfd_node *node,
+svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
struct svm_range *prange, int domain)
{
struct kfd_node *bo_node;
@@ -1244,8 +1244,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
if (ext_coherent)
- mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
+ mtype_local = AMDGPU_VM_MTYPE_CC;
else
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1278,7 +1277,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
- if (gc_ip_version < IP_VERSION(9, 5, 0))
+ if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
else
mapping_flags |= AMDGPU_VM_MTYPE_NC;
@@ -1286,23 +1285,13 @@ svm_range_get_pte_flags(struct kfd_node *node,
break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
- if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_node != node)
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- } else {
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- }
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
break;
default:
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
- mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
-
- if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
- mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
@@ -1312,7 +1301,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
- pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
+ amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
+ pte_flags |= AMDGPU_PTE_READABLE;
+ if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
+ pte_flags |= AMDGPU_PTE_WRITEABLE;
return pte_flags;
}
@@ -1419,7 +1411,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
- pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
+ pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
@@ -1706,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
start = map_start << PAGE_SHIFT;
end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
- struct hmm_range *hmm_range = NULL;
+ struct amdgpu_hmm_range *range = NULL;
unsigned long map_start_vma;
unsigned long map_last_vma;
struct vm_area_struct *vma;
@@ -1721,10 +1713,36 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
+ /* HMM requires at least READ permissions. If provided with PROT_NONE,
+ * unmap the memory. If it's not already mapped, this is a no-op
+ * If PROT_WRITE is provided without READ, warn first then unmap
+ */
+ if (!(vma->vm_flags & VM_READ)) {
+ unsigned long e, s;
+
+ svm_range_lock(prange);
+ if (vma->vm_flags & VM_WRITE)
+ pr_debug("VM_WRITE without VM_READ is not supported");
+ s = max(start, prange->start);
+ e = min(end, prange->last);
+ if (e >= s)
+ r = svm_range_unmap_from_gpus(prange, s, e,
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
+ svm_range_unlock(prange);
+ /* If unmap returns non-zero, we'll bail on the next for loop
+ * iteration, so just leave r and continue
+ */
+ addr = next;
+ continue;
+ }
+
WRITE_ONCE(p->svms.faulting_task, current);
- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
- readonly, owner, NULL,
- &hmm_range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (likely(range))
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+ readonly, owner, range);
+ else
+ r = -ENOMEM;
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r)
pr_debug("failed %d to get svm range pages\n", r);
@@ -1735,7 +1753,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (!r) {
offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
- hmm_range->hmm_pfns);
+ range->hmm_range.hmm_pfns);
if (r)
pr_debug("failed %d to dma map range\n", r);
}
@@ -1743,14 +1761,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
svm_range_lock(prange);
/* Free backing memory of hmm_range if it was initialized
- * Overrride return value to TRY AGAIN only if prior returns
+ * Override return value to TRY AGAIN only if prior returns
* were successful
*/
- if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
+ if (range && !amdgpu_hmm_range_valid(range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
+ /* Free the hmm range */
+ amdgpu_hmm_range_free(range);
+
if (!r && !list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
r = -EAGAIN;
@@ -2400,15 +2421,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
prange->work_item.op != SVM_OP_UNMAP_RANGE)
prange->work_item.op = op;
} else {
- prange->work_item.op = op;
-
- /* Pairs with mmput in deferred_list_work */
- mmget(mm);
- prange->work_item.mm = mm;
- list_add_tail(&prange->deferred_list,
- &prange->svms->deferred_range_list);
- pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
- prange, prange->start, prange->last, op);
+ /* Pairs with mmput in deferred_list_work.
+ * If process is exiting and mm is gone, don't update mmu notifier.
+ */
+ if (mmget_not_zero(mm)) {
+ prange->work_item.mm = mm;
+ prange->work_item.op = op;
+ list_add_tail(&prange->deferred_list,
+ &prange->svms->deferred_range_list);
+ pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
+ prange, prange->start, prange->last, op);
+ }
}
spin_unlock(&svms->deferred_list_lock);
}
@@ -2422,8 +2445,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms)
}
static void
-svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
- struct svm_range *prange, unsigned long start,
+svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
unsigned long last)
{
struct svm_range *head;
@@ -2444,12 +2466,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
svm_range_split(tail, last + 1, tail->last, &head);
if (head != prange && tail != prange) {
- svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
- svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
+ svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
} else if (tail != prange) {
- svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
} else if (head != prange) {
- svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
} else if (parent != prange) {
prange->work_item.op = SVM_OP_UNMAP_RANGE;
}
@@ -2526,14 +2548,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
l = min(last, pchild->last);
if (l >= s)
svm_range_unmap_from_gpus(pchild, s, l, trigger);
- svm_range_unmap_split(mm, prange, pchild, start, last);
+ svm_range_unmap_split(prange, pchild, start, last);
mutex_unlock(&pchild->lock);
}
s = max(start, prange->start);
l = min(last, prange->last);
if (l >= s)
svm_range_unmap_from_gpus(prange, s, l, trigger);
- svm_range_unmap_split(mm, prange, prange, start, last);
+ svm_range_unmap_split(prange, prange, start, last);
if (unmap_parent)
svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
@@ -2576,8 +2598,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
if (range->event == MMU_NOTIFY_RELEASE)
return true;
- if (!mmget_not_zero(mni->mm))
- return true;
start = mni->interval_tree.start;
last = mni->interval_tree.last;
@@ -2604,7 +2624,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
}
svm_range_unlock(prange);
- mmput(mni->mm);
return true;
}
@@ -2691,7 +2710,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
- if (node->adev->flags & AMD_IS_APU)
+ if (node->adev->apu_prefer_gtt)
return 0;
if (prange->preferred_loc == gpuid ||
@@ -2979,7 +2998,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
return -EFAULT;
}
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p) {
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
return 0;
@@ -3008,19 +3027,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
goto out;
}
- /* check if this page fault time stamp is before svms->checkpoint_ts */
- if (svms->checkpoint_ts[gpuidx] != 0) {
- if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) {
- pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
- r = 0;
- goto out;
- } else
- /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
- * to zero to avoid following ts wrap around give wrong comparing
- */
- svms->checkpoint_ts[gpuidx] = 0;
- }
-
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
r = -EFAULT;
@@ -3040,6 +3046,23 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
mmap_read_lock(mm);
retry_write_locked:
mutex_lock(&svms->lock);
+
+ /* check if this page fault time stamp is before svms->checkpoint_ts */
+ if (svms->checkpoint_ts[gpuidx] != 0) {
+ if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
+ pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ if (write_locked)
+ mmap_write_downgrade(mm);
+ r = -EAGAIN;
+ goto out_unlock_svms;
+ } else {
+ /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
+ * to zero to avoid following ts wrap around give wrong comparing
+ */
+ svms->checkpoint_ts[gpuidx] = 0;
+ }
+ }
+
prange = svm_range_from_addr(svms, addr, NULL);
if (!prange) {
pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
@@ -3165,7 +3188,8 @@ out_unlock_svms:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
- svm_range_count_fault(node, p, gpuidx);
+ if (r != -EAGAIN)
+ svm_range_count_fault(node, p, gpuidx);
mmput(mm);
out:
@@ -3242,7 +3266,8 @@ void svm_range_list_fini(struct kfd_process *p)
struct svm_range *prange;
struct svm_range *next;
- pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
+ pr_debug("process pid %d svms 0x%p\n", p->lead_thread->pid,
+ &p->svms);
cancel_delayed_work_sync(&p->svms.restore_work);
@@ -3265,7 +3290,8 @@ void svm_range_list_fini(struct kfd_process *p)
mutex_destroy(&p->svms.lock);
- pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
+ pr_debug("process pid %d svms 0x%p done\n",
+ p->lead_thread->pid, &p->svms);
}
int svm_range_list_init(struct kfd_process *p)
@@ -3438,7 +3464,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out;
}
- if (bo_node->adev->flags & AMD_IS_APU) {
+ if (bo_node->adev->apu_prefer_gtt) {
best_loc = 0;
goto out;
}
@@ -3628,8 +3654,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
bool flush_tlb;
int r, ret = 0;
- pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
- p->pasid, &p->svms, start, start + size - 1, size);
+ pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
+ p->lead_thread->pid, &p->svms, start, start + size - 1, size);
r = svm_range_check_attr(p, nattr, attrs);
if (r)
@@ -3667,6 +3693,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
+ update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
+
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
@@ -3737,8 +3765,8 @@ out_unlock_range:
out:
mutex_unlock(&process_info->lock);
- pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
- &p->svms, start, start + size - 1, r);
+ pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] done, r=%d\n",
+ p->lead_thread->pid, &p->svms, start, start + size - 1, r);
return ret ? ret : r;
}
@@ -4076,8 +4104,8 @@ exit:
return ret;
}
-int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size)
+void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size)
{
uint64_t total_size, accessibility_size, common_attr_size;
int nattr_common = 4, nattr_accessibility = 1;
@@ -4089,8 +4117,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
*svm_priv_data_size = 0;
svms = &p->svms;
- if (!svms)
- return -EINVAL;
mutex_lock(&svms->lock);
list_for_each_entry(prange, &svms->list, list) {
@@ -4132,7 +4158,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
*svm_priv_data_size);
- return 0;
}
int kfd_criu_checkpoint_svm(struct kfd_process *p,
@@ -4149,8 +4174,6 @@ int kfd_criu_checkpoint_svm(struct kfd_process *p,
struct mm_struct *mm;
svms = &p->svms;
- if (!svms)
- return -EINVAL;
mm = get_task_mm(p->lead_thread);
if (!mm) {
@@ -4248,7 +4271,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
break;
default:
- r = EINVAL;
+ r = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index bddd24f04669..a63dfc95b602 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "amdgpu.h"
#include "kfd_priv.h"
@@ -184,8 +183,8 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_dma_unmap(struct svm_range *prange);
-int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size);
+void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
uint8_t __user *user_priv_data,
uint64_t *priv_offset);
@@ -202,7 +201,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
- ((adev)->flags & AMD_IS_APU))
+ ((adev)->apu_prefer_gtt))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
@@ -237,13 +236,12 @@ static inline int svm_range_schedule_evict_svm_bo(
return -EINVAL;
}
-static inline int svm_range_get_info(struct kfd_process *p,
- uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size)
+static inline void svm_range_get_info(struct kfd_process *p,
+ uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size)
{
*num_svm_ranges = 0;
*svm_priv_data_size = 0;
- return 0;
}
static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index ceb9fb475ef1..811636af14ea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -108,24 +108,6 @@ struct kfd_node *kfd_device_by_id(uint32_t gpu_id)
return top_dev->gpu;
}
-struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev)
-{
- struct kfd_topology_device *top_dev;
- struct kfd_node *device = NULL;
-
- down_read(&topology_lock);
-
- list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu && top_dev->gpu->adev->pdev == pdev) {
- device = top_dev->gpu;
- break;
- }
-
- up_read(&topology_lock);
-
- return device;
-}
-
/* Called with write topology_lock acquired */
static void kfd_release_topology_device(struct kfd_topology_device *dev)
{
@@ -528,6 +510,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.capability |=
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) &&
+ (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
+
sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
dev->node_props.max_engine_clk_fcompute);
@@ -537,11 +523,17 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kfd->mec_fw_version);
sysfs_show_32bit_prop(buffer, offs, "capability",
dev->node_props.capability);
+ sysfs_show_32bit_prop(buffer, offs, "capability2",
+ dev->node_props.capability2);
sysfs_show_64bit_prop(buffer, offs, "debug_prop",
dev->node_props.debug_prop);
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
+ dev->gpu->xcp &&
+ (dev->gpu->xcp->xcp_mgr->mode !=
+ AMDGPU_SPX_PARTITION_MODE) ?
+ dev->gpu->xcp->unique_id :
dev->gpu->adev->unique_id);
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
NUM_XCC(dev->gpu->xcc_mask));
@@ -968,24 +960,23 @@ static void kfd_update_system_properties(void)
up_read(&topology_lock);
}
-static void find_system_memory(const struct dmi_header *dm,
- void *private)
+static void find_system_memory(const struct dmi_header *dm, void *private)
{
+ struct dmi_mem_device *memdev = container_of(dm, struct dmi_mem_device, header);
struct kfd_mem_properties *mem;
- u16 mem_width, mem_clock;
struct kfd_topology_device *kdev =
(struct kfd_topology_device *)private;
- const u8 *dmi_data = (const u8 *)(dm + 1);
-
- if (dm->type == DMI_ENTRY_MEM_DEVICE && dm->length >= 0x15) {
- mem_width = (u16)(*(const u16 *)(dmi_data + 0x6));
- mem_clock = (u16)(*(const u16 *)(dmi_data + 0x11));
- list_for_each_entry(mem, &kdev->mem_props, list) {
- if (mem_width != 0xFFFF && mem_width != 0)
- mem->width = mem_width;
- if (mem_clock != 0)
- mem->mem_clk_max = mem_clock;
- }
+
+ if (memdev->header.type != DMI_ENTRY_MEM_DEVICE)
+ return;
+ if (memdev->header.length < sizeof(struct dmi_mem_device))
+ return;
+
+ list_for_each_entry(mem, &kdev->mem_props, list) {
+ if (memdev->total_width != 0xFFFF && memdev->total_width != 0)
+ mem->width = memdev->total_width;
+ if (memdev->speed != 0)
+ mem->mem_clk_max = memdev->speed;
}
}
@@ -1284,34 +1275,41 @@ static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev,
{
struct kfd_node *gpu = outbound_link->gpu;
struct amdgpu_device *adev = gpu->adev;
- int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_sdma_engines = kfd_get_num_xgmi_sdma_engines(gpu);
+ unsigned int num_sdma_engines = kfd_get_num_sdma_engines(gpu);
+ uint32_t sdma_eng_id_mask = (1 << num_sdma_engines) - 1;
+ uint32_t xgmi_sdma_eng_id_mask =
+ ((1 << num_xgmi_sdma_engines) - 1) << num_sdma_engines;
+
bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu &&
adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 &&
- kfd_get_num_xgmi_sdma_engines(gpu) >= 14 &&
- (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8);
+ num_xgmi_sdma_engines >= 6 && (!(adev->flags & AMD_IS_APU) &&
+ num_xgmi_nodes == 8);
if (support_rec_eng) {
int src_socket_id = adev->gmc.xgmi.physical_node_id;
int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id;
+ unsigned int reshift = num_xgmi_sdma_engines == 6 ? 1 : 0;
outbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id];
+ 1 << (rec_sdma_eng_map[src_socket_id][dst_socket_id] >> reshift);
inbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id];
- } else {
- int num_sdma_eng = kfd_get_num_sdma_engines(gpu);
- int i, eng_offset = 0;
+ 1 << (rec_sdma_eng_map[dst_socket_id][src_socket_id] >> reshift);
- if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
- kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) {
- eng_offset = num_sdma_eng;
- num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu);
- }
+ /* If recommended engine is out of range, need to reset the mask */
+ if (outbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ outbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
+ if (inbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ inbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
- for (i = 0; i < num_sdma_eng; i++) {
- outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- }
+ } else {
+ uint32_t engine_mask = (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
+ num_xgmi_sdma_engines && to_dev->gpu) ? xgmi_sdma_eng_id_mask :
+ sdma_eng_id_mask;
+
+ outbound_link->rec_sdma_eng_id_mask = engine_mask;
+ inbound_link->rec_sdma_eng_id_mask = engine_mask;
}
}
@@ -1593,7 +1591,8 @@ static int kfd_dev_create_p2p_links(void)
break;
if (!dev->gpu || !dev->gpu->adev ||
(dev->gpu->kfd->hive_id &&
- dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id))
+ dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id &&
+ amdgpu_xgmi_get_is_sharing_enabled(dev->gpu->adev, new_dev->gpu->adev)))
goto next;
/* check if node(s) is/are peer accessible in one direction or bi-direction */
@@ -1683,17 +1682,32 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
int cache_type, unsigned int cu_processor_id,
struct kfd_node *knode)
{
- unsigned int cu_sibling_map_mask;
+ unsigned int cu_sibling_map_mask = 0;
int first_active_cu;
int i, j, k, xcc, start, end;
int num_xcc = NUM_XCC(knode->xcc_mask);
struct kfd_cache_properties *pcache = NULL;
enum amdgpu_memory_partition mode;
struct amdgpu_device *adev = knode->adev;
+ bool found = false;
start = ffs(knode->xcc_mask) - 1;
end = start + num_xcc;
- cu_sibling_map_mask = cu_info->bitmap[start][0][0];
+
+ /* To find the bitmap in the first active cu in the first
+ * xcc, it is based on the assumption that evrey xcc must
+ * have at least one active cu.
+ */
+ for (i = 0; i < gfx_info->max_shader_engines && !found; i++) {
+ for (j = 0; j < gfx_info->max_sh_per_se && !found; j++) {
+ if (cu_info->bitmap[start][i % 4][j % 4]) {
+ cu_sibling_map_mask =
+ cu_info->bitmap[start][i % 4][j % 4];
+ found = true;
+ }
+ }
+ }
+
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
@@ -2000,15 +2014,13 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
- dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+ if (!amdgpu_sriov_vf(dev->gpu->adev))
+ dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
- if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0))
- dev->node_props.capability |=
- HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
-
if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(12, 0, 0))
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_ALU_OPERATIONS_SUPPORTED;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 155b5c410af1..3de8ec0043bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -24,6 +24,7 @@
#ifndef __KFD_TOPOLOGY_H__
#define __KFD_TOPOLOGY_H__
+#include <linux/dmi.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/kfd_sysfs.h>
@@ -50,6 +51,7 @@ struct kfd_node_properties {
uint32_t cpu_core_id_base;
uint32_t simd_id_base;
uint32_t capability;
+ uint32_t capability2;
uint64_t debug_prop;
uint32_t max_waves_per_simd;
uint32_t lds_size_in_kb;
@@ -179,6 +181,22 @@ struct kfd_system_properties {
struct attribute attr_props;
};
+struct dmi_mem_device {
+ struct dmi_header header;
+ u16 physical_handle;
+ u16 error_handle;
+ u16 total_width;
+ u16 data_width;
+ u16 size;
+ u8 form_factor;
+ u8 device_set;
+ u8 device_locator;
+ u8 bank_locator;
+ u8 memory_type;
+ u16 type_detail;
+ u16 speed;
+} __packed;
+
struct kfd_topology_device *kfd_create_topology_device(
struct list_head *device_list);
void kfd_release_topology_device_list(struct list_head *device_list);
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index faed84172dd4..44009aa8216e 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -45,18 +46,29 @@ static const struct drm_driver amdgpu_xcp_driver = {
static int8_t pdev_num;
static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE];
+static DEFINE_MUTEX(xcp_mutex);
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
{
struct platform_device *pdev;
struct xcp_device *pxcp_dev;
char dev_name[20];
- int ret;
+ int ret, i;
+
+ guard(mutex)(&xcp_mutex);
if (pdev_num >= MAX_XCP_PLATFORM_DEVICE)
return -ENODEV;
- snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", pdev_num);
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if (!xcp_dev[i])
+ break;
+ }
+
+ if (i >= MAX_XCP_PLATFORM_DEVICE)
+ return -ENODEV;
+
+ snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", i);
pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -72,8 +84,8 @@ int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
goto out_devres;
}
- xcp_dev[pdev_num] = pxcp_dev;
- xcp_dev[pdev_num]->pdev = pdev;
+ xcp_dev[i] = pxcp_dev;
+ xcp_dev[i]->pdev = pdev;
*ddev = &pxcp_dev->drm;
pdev_num++;
@@ -88,16 +100,43 @@ out_unregister:
}
EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc);
-void amdgpu_xcp_drv_release(void)
+static void free_xcp_dev(int8_t index)
{
- for (--pdev_num; pdev_num >= 0; --pdev_num) {
- struct platform_device *pdev = xcp_dev[pdev_num]->pdev;
+ if ((index < MAX_XCP_PLATFORM_DEVICE) && (xcp_dev[index])) {
+ struct platform_device *pdev = xcp_dev[index]->pdev;
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
- xcp_dev[pdev_num] = NULL;
+
+ xcp_dev[index] = NULL;
+ pdev_num--;
+ }
+}
+
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if ((xcp_dev[i]) && (&xcp_dev[i]->drm == ddev)) {
+ free_xcp_dev(i);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(amdgpu_xcp_drm_dev_free);
+
+void amdgpu_xcp_drv_release(void)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; pdev_num && i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ free_xcp_dev(i);
}
- pdev_num = 0;
}
EXPORT_SYMBOL(amdgpu_xcp_drv_release);
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
index c1c4b679bf95..580a1602c8e3 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
@@ -25,5 +25,6 @@
#define _AMDGPU_XCP_DRV_H_
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev);
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev);
void amdgpu_xcp_drv_release(void);
#endif /* _AMDGPU_XCP_DRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 11e3f2f3b174..abd3b6564373 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,6 +8,8 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
depends on BROKEN || !CC_IS_CLANG || ARM64 || LOONGARCH || RISCV || SPARC64 || X86_64
+ select CEC_CORE
+ select CEC_NOTIFIER
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || LOONGARCH || RISCV))
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 89d605de0595..0084a8d55254 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -44,6 +44,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mmhubbub
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mpc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/opp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/pg
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/soc_and_ip_translator
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index ab2a97e354da..8e949fe77312 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -38,7 +38,9 @@ AMDGPUDM = \
amdgpu_dm_pp_smu.o \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
- amdgpu_dm_wb.o
+ amdgpu_dm_quirks.o \
+ amdgpu_dm_wb.o \
+ amdgpu_dm_colorop.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b631b2eba0f0..740711ac1037 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -39,13 +40,11 @@
#include "dc/dc_stat.h"
#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
-#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"
-#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
@@ -56,7 +55,6 @@
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
#include "amdgpu_dm_wb.h"
-#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
#include "amd_shared.h"
@@ -80,9 +78,9 @@
#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
-#include <linux/dmi.h>
#include <linux/sort.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
@@ -98,22 +96,16 @@
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <media/cec-notifier.h>
#include <acpi/video.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-#include "dcn/dcn_1_0_offset.h"
-#include "dcn/dcn_1_0_sh_mask.h"
-#include "soc15_hw_ip.h"
-#include "soc15_common.h"
-#include "vega10_ip_offset.h"
-
-#include "gc/gc_11_0_0_offset.h"
-#include "gc/gc_11_0_0_sh_mask.h"
-
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
+static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch");
+
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
@@ -154,6 +146,9 @@ MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+#define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB);
+
#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
@@ -178,6 +173,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service, bool oem);
static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
{
@@ -236,6 +233,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@@ -244,6 +242,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -270,7 +272,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -291,7 +293,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -315,7 +317,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
return 0;
}
-static bool dm_is_idle(void *handle)
+static bool dm_is_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX todo */
return true;
@@ -362,6 +364,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
+ if (new_state->stream->adjust.timing_adjust_pending)
+ return true;
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
@@ -414,8 +418,7 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
/*
* Previous frame finished and HW is ready for optimization.
*/
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
+ dc_post_update_surfaces_to_stream(dc);
return dc_update_planes_and_stream(dc,
array_of_surface_update,
@@ -528,6 +531,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+ struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+ struct amdgpu_device *adev = work->adev;
+ struct dc_stream_state *stream = work->stream;
+ struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ dc_stream_release(stream);
+ kfree(work->adjust);
+ kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
+ if (!offload_work) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+ return;
+ }
+
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
+ if (!adjust_copy) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+ kfree(offload_work);
+ return;
+ }
+
+ dc_stream_retain(stream);
+ memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+ INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+ offload_work->adev = adev;
+ offload_work->stream = stream;
+ offload_work->adjust = adjust_copy;
+
+ queue_work(system_wq, &offload_work->work);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -565,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
- if (vrr_active) {
+ if (vrr_active && acrtc->dm_irq_params.stream) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state
+ == VRR_STATE_ACTIVE_VARIABLE;
+
amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
- if (acrtc->dm_irq_params.stream &&
- adev->family < AMDGPU_FAMILY_AI) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
@@ -663,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -739,6 +796,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
complete(&adev->dm.dmub_aux_transfer_done);
}
+static void dmub_aux_fused_io_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (!adev || !notify) {
+ ASSERT(false);
+ return;
+ }
+
+ const struct dmub_cmd_fused_request *req = &notify->fused_request;
+ const uint8_t ddc_line = req->u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
+ ASSERT(false);
+ return;
+ }
+
+ struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];
+
+ static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch");
+ memcpy(sync->reply_data, req, sizeof(*req));
+ complete(&sync->replied);
+}
+
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
@@ -762,18 +842,18 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
return;
if (notify == NULL) {
- DRM_ERROR("DMUB HPD callback notification was NULL");
+ drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
- DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
- DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
+ drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n");
return;
}
@@ -790,11 +870,11 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
- DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index);
else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
- DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
else
- DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n",
notify->type, link_index);
hpd_aconnector = aconnector;
@@ -806,7 +886,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
if (hpd_aconnector) {
if (notify->type == DMUB_NOTIFICATION_HPD) {
if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
- DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
+ drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
} else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
@@ -825,7 +905,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
struct dmub_notification *notify)
{
- DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
+ drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}
/**
@@ -861,7 +941,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
- DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL");
return;
}
@@ -875,6 +955,30 @@ static void dm_handle_hpd_work(struct work_struct *work)
}
+static const char *dmub_notification_type_str(enum dmub_notification_type e)
+{
+ switch (e) {
+ case DMUB_NOTIFICATION_NO_DATA:
+ return "NO_DATA";
+ case DMUB_NOTIFICATION_AUX_REPLY:
+ return "AUX_REPLY";
+ case DMUB_NOTIFICATION_HPD:
+ return "HPD";
+ case DMUB_NOTIFICATION_HPD_IRQ:
+ return "HPD_IRQ";
+ case DMUB_NOTIFICATION_SET_CONFIG_REPLY:
+ return "SET_CONFIG_REPLY";
+ case DMUB_NOTIFICATION_DPIA_NOTIFICATION:
+ return "DPIA_NOTIFICATION";
+ case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY:
+ return "HPD_SENSE_NOTIFY";
+ case DMUB_NOTIFICATION_FUSED_IO:
+ return "FUSED_IO";
+ default:
+ return "<unknown>";
+ }
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -892,22 +996,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct dmcub_trace_buf_entry entry = { 0 };
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
- static const char *const event_type[] = {
- "NO_DATA",
- "AUX_REPLY",
- "HPD",
- "HPD_IRQ",
- "SET_CONFIGC_REPLY",
- "DPIA_NOTIFICATION",
- "HPD_SENSE_NOTIFY",
- };
do {
if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
entry.param0, entry.param1);
- DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
entry.trace_code, entry.tick_count, entry.param0, entry.param1);
} else
break;
@@ -917,7 +1012,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
if (count > DMUB_TRACE_MAX_READ)
- DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+ drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -925,25 +1020,25 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
- DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type);
continue;
}
if (!dm->dmub_callback[notify.type]) {
- DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
- event_type[notify.type]);
+ drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n",
+ dmub_notification_type_str(notify.type));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
- DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk");
return;
}
dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
GFP_ATOMIC);
if (!dmub_hpd_wrk->dmub_notify) {
kfree(dmub_hpd_wrk);
- DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
@@ -1001,10 +1096,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
&compressor->gpu_addr, &compressor->cpu_addr);
if (r)
- DRM_ERROR("DM: Failed to initialize FBC\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n");
else {
adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
- DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
}
}
@@ -1169,13 +1264,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
if (!fb_info) {
- DRM_ERROR("No framebuffer info for DMUB service.\n");
+ drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n");
return -EINVAL;
}
if (!dmub_fw) {
/* Firmware required for DMUB support. */
- DRM_ERROR("No firmware provided for DMUB.\n");
+ drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n");
return -EINVAL;
}
@@ -1185,19 +1280,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status);
return -EINVAL;
}
if (!has_hw_support) {
- DRM_INFO("DMUB unsupported on ASIC\n");
+ drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n");
return 0;
}
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Error resetting DMUB HW: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
@@ -1266,6 +1361,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
@@ -1277,7 +1373,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ hw_params.lower_hbr3_phy_ssc = true;
break;
default:
break;
@@ -1285,14 +1383,14 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status);
return -EINVAL;
}
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
/* Init DMCU and ABM if available. */
if (dmcu && abm) {
@@ -1303,11 +1401,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
if (!adev->dm.dc->ctx->dmub_srv)
adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
- DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
}
- DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n",
adev->dm.dmcub_fw_version);
/* Keeping sanity checks off if
@@ -1350,18 +1448,18 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
status = dmub_srv_is_hw_init(dmub_srv, &init);
if (status != DMUB_STATUS_OK)
- DRM_WARN("DMUB hardware init check failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status);
if (status == DMUB_STATUS_OK && init) {
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
r = dm_dmub_hw_init(adev);
if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
}
}
@@ -1471,18 +1569,18 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
+ adev = offload_work->adev;
if (!aconnector) {
- DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work");
goto skip;
}
- adev = drm_to_adev(aconnector->base.dev);
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
if (new_connection_type == dc_connection_none)
@@ -1551,8 +1649,9 @@ skip:
}
-static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev)
{
+ struct dc *dc = adev->dm.dc;
int max_caps = dc->caps.max_links;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
@@ -1568,7 +1667,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
if (hpd_rx_offload_wq[i].wq == NULL) {
- DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!");
goto out_err;
}
@@ -1617,77 +1716,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
- },
- },
- {}
- /* TODO: refactor this from a fixed table to a dynamic option */
-};
-
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
-{
- const struct dmi_system_id *dmi_id;
-
- dm->aux_hpd_discon_quirk = false;
-
- dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
- if (dmi_id) {
- dm->aux_hpd_discon_quirk = true;
- DRM_INFO("aux_hpd_discon_quirk attached\n");
- }
-}
void*
dm_allocate_gpu_mem(
@@ -1774,10 +1802,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
return DMUB_STATUS_TIMEOUT;
}
-static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
{
- struct dml2_soc_bb *bb;
+ void *bb;
long long addr;
+ unsigned int bb_size;
int i = 0;
uint16_t chunk;
enum dmub_gpint_command send_addrs[] = {
@@ -1790,6 +1819,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(4, 0, 1):
+ bb_size = sizeof(struct dml2_soc_bb);
break;
default:
return NULL;
@@ -1797,7 +1827,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
bb = dm_allocate_gpu_mem(adev,
DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dml2_soc_bb),
+ bb_size,
&addr);
if (!bb)
return NULL;
@@ -1831,26 +1861,7 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode(
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
- /*
- * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
- * cause a hard hang. A fix exists for newer PMFW.
- *
- * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
- * IPS state in all cases, except for s0ix and all displays off (DPMS),
- * where IPS2 is allowed.
- *
- * When checking pmfw version, use the major and minor only.
- */
- if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
- ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
- /*
- * Other ASICs with DCN35 that have residency issues with
- * IPS2 in idle.
- * We want them to use IPS2 only in display off cases.
- */
- ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- break;
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(3, 5, 1):
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
break;
@@ -1882,7 +1893,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n");
goto error;
}
@@ -1967,7 +1978,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_device_seamless_boot_supported(adev)) {
init_data.flags.seamless_boot_edp_requested = true;
init_data.flags.allow_seamless_boot_optimization = true;
- DRM_INFO("Seamless boot condition check passed\n");
+ drm_dbg(adev->dm.ddev, "Seamless boot requested\n");
}
init_data.flags.enable_mipi_converter_optimization = true;
@@ -1989,11 +2000,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.disable_ips_in_vpb = 0;
+ /* DCN35 and above supports dynamic DTBCLK switch */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+ init_data.flags.allow_0_dtb_clk = true;
+
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
retrieve_dmi_info(&adev->dm);
+ if (adev->dm.edp0_on_dp1_quirk)
+ init_data.flags.support_edp0_on_dp1 = true;
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -2004,10 +2021,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER,
dce_version_to_string(adev->dm.dc->ctx->dce_version));
} else {
- DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -2033,30 +2050,44 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) {
+ adev->dm.dc->debug.force_disable_subvp = true;
+ adev->dm.dc->debug.fams2_config.bits.enable = false;
+ }
+
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
adev->dm.dc->debug.using_dml21 = true;
}
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE)
+ adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;
+
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
+ adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
+
+ if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT)
+ adev->dm.dc->debug.skip_detection_link_training = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
- DRM_INFO("DP-HDMI FRL PCON supported\n");
+ drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n");
r = dm_dmub_hw_init(adev);
if (r) {
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
goto error;
}
dc_hardware_init(adev->dm.dc);
- adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2071,10 +2102,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
- DRM_ERROR(
- "amdgpu: failed to initialize freesync_module.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize freesync_module.\n");
} else
- DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
amdgpu_dm_init_color_mod();
@@ -2083,7 +2114,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2094,9 +2125,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n");
else
- DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
dc_init_callbacks(adev->dm.dc, &init_params);
}
@@ -2104,20 +2135,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub aux callback");
+ goto error;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++)
+ init_completion(&adev->dm.fused_io[i].replied);
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
+ dmub_aux_fused_io_callback, false)) {
+ drm_err(adev_to_drm(adev), "fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2134,8 +2174,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize sw for display support.\n");
goto error;
}
@@ -2149,18 +2189,22 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize vblank for display support.\n");
goto error;
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctx.crtc_ctx)
- DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n");
+
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
+ adev->dm.secure_display_ctx.support_mul_roi = true;
+
#endif
- DRM_DEBUG_DRIVER("KMS initialized.\n");
+ drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n");
return 0;
error:
@@ -2327,12 +2371,13 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
return 0;
default:
break;
}
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -2350,7 +2395,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
@@ -2409,6 +2454,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
};
int r;
@@ -2452,6 +2498,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
dmub_asic = DMUB_ASIC_DCN35;
break;
+ case IP_VERSION(3, 6, 0):
+ dmub_asic = DMUB_ASIC_DCN36;
+ break;
case IP_VERSION(4, 0, 1):
dmub_asic = DMUB_ASIC_DCN401;
break;
@@ -2472,7 +2521,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n",
adev->dm.dmcub_fw_version);
}
@@ -2481,7 +2530,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_srv = adev->dm.dmub_srv;
if (!dmub_srv) {
- DRM_ERROR("Failed to allocate DMUB service!\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n");
return -ENOMEM;
}
@@ -2494,7 +2543,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
/* Create the DMUB service. */
status = dmub_srv_create(dmub_srv, &create_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error creating DMUB service: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status);
return -EINVAL;
}
@@ -2519,7 +2568,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
&region_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status);
return -EINVAL;
}
@@ -2548,14 +2597,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
fb_info = adev->dm.dmub_fb_info;
if (!fb_info) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Failed to allocate framebuffer info for DMUB service!\n");
return -ENOMEM;
}
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
}
@@ -2572,7 +2621,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "failed to create cgs device.\n");
return -EINVAL;
}
@@ -2750,6 +2799,48 @@ out_fail:
mutex_unlock(&mgr->lock);
}
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_phys_addr_invalidate(n);
+}
+
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_set_phys_addr(n,
+ connector->display_info.source_physical_address);
+}
+
+static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(ddev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (suspend)
+ hdmi_cec_unset_edid(aconnector);
+ else
+ hdmi_cec_set_edid(aconnector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
@@ -2836,13 +2927,40 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
ret = amdgpu_dpm_write_watermarks_table(adev);
if (ret) {
- DRM_ERROR("Failed to update WMTABLE!\n");
+ drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n");
return ret;
}
return 0;
}
+static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_i2c_adapter *oem_i2c;
+ struct ddc_service *oem_ddc_service;
+ int r;
+
+ oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc);
+ if (oem_ddc_service) {
+ oem_i2c = create_i2c(oem_ddc_service, true);
+ if (!oem_i2c) {
+ drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base);
+ if (r) {
+ drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
+ kfree(oem_i2c);
+ return r;
+ }
+ dm->oem_i2c = oem_i2c;
+ }
+
+ return 0;
+}
+
/**
* dm_hw_init() - Initialize DC device
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
@@ -2874,6 +2992,10 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block)
return r;
amdgpu_dm_hpd_init(adev);
+ r = dm_oem_i2c_hw_init(adev);
+ if (r)
+ drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n");
+
return 0;
}
@@ -2913,33 +3035,40 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
if (rc)
- DRM_WARN("Failed to %s pflip interrupts\n",
+ drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
- if (enable) {
- if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
- } else
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
-
- if (rc)
- DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) {
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(
+ to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, false);
+
+ if (rc)
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n",
+ enable ? "en" : "dis");
+ }
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
* don't use amdgpu_irq_get/put() to avoid refcount change.
*/
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
}
}
}
+DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T))
+
static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
{
- struct dc_state *context = NULL;
- enum dc_status res = DC_ERROR_UNEXPECTED;
+ struct dc_state *context __free(state_release) = NULL;
int i;
struct dc_stream_state *del_streams[MAX_PIPES];
int del_streams_count = 0;
@@ -2949,7 +3078,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
context = dc_state_create_current_copy(dc);
if (context == NULL)
- goto context_alloc_fail;
+ return DC_ERROR_UNEXPECTED;
/* First remove from context all streams */
for (i = 0; i < context->stream_count; i++) {
@@ -2960,25 +3089,20 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
/* Remove all planes for removed streams and then remove the streams */
for (i = 0; i < del_streams_count; i++) {
- if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
- res = DC_FAIL_DETACH_SURFACES;
- goto fail;
- }
+ enum dc_status res;
+
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context))
+ return DC_FAIL_DETACH_SURFACES;
res = dc_state_remove_stream(dc, context, del_streams[i]);
if (res != DC_OK)
- goto fail;
+ return res;
}
params.streams = context->streams;
params.stream_count = context->stream_count;
- res = dc_commit_streams(dc, &params);
-fail:
- dc_state_release(context);
-
-context_alloc_fail:
- return res;
+ return dc_commit_streams(dc, &params);
}
static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
@@ -2991,13 +3115,78 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
+static int dm_cache_state(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state)) {
+ r = PTR_ERR(adev->dm.cached_state);
+ adev->dm.cached_state = NULL;
+ }
+
+ return adev->dm.cached_state ? 0 : r;
+}
+
+static void dm_destroy_cached_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct dm_plane_state *dm_new_plane_state;
+ struct drm_plane_state *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i;
+
+ if (!dm->cached_state)
+ return;
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ new_crtc_state->active_changed = true;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+ }
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ dm_new_crtc_state->base.color_mgmt_changed = true;
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+}
+
static int dm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_display_manager *dm = &adev->dm;
- int ret = 0;
if (amdgpu_in_reset(adev)) {
+ enum dc_status res;
+
mutex_lock(&dm->dc_lock);
dc_allow_idle_optimizations(adev->dm.dc, false);
@@ -3007,19 +3196,27 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
if (dm->cached_dc_state)
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
- amdgpu_dm_commit_zero_streams(dm->dc);
+ res = amdgpu_dm_commit_zero_streams(dm->dc);
+ if (res != DC_OK) {
+ drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res);
+ return -EINVAL;
+ }
amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
- return ret;
+ return 0;
}
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ if (!adev->dm.cached_state) {
+ int r = dm_cache_state(adev);
+
+ if (r)
+ return r;
+ }
+
+ s3_handle_hdmi_cec(adev_to_drm(adev), true);
s3_handle_mst(adev_to_drm(adev), true);
@@ -3148,36 +3345,112 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } *bundle;
+ } *bundle __free(kfree);
int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
if (!bundle) {
drm_err(dm->ddev, "Failed to allocate update bundle\n");
- goto cleanup;
+ return;
}
for (k = 0; k < dc_state->stream_count; k++) {
bundle->stream_update.stream = dc_state->streams[k];
- for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
bundle->surface_updates[m].surface =
- dc_state->stream_status->plane_states[m];
+ dc_state->stream_status[k].plane_states[m];
bundle->surface_updates[m].surface->force_full_update =
true;
}
update_planes_and_stream_adapter(dm->dc,
UPDATE_TYPE_FULL,
- dc_state->stream_status->plane_count,
+ dc_state->stream_status[k].plane_count,
dc_state->streams[k],
&bundle->stream_update,
bundle->surface_updates);
}
+}
-cleanup:
- kfree(bundle);
+static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
+ struct dc_sink *sink)
+{
+ struct dc_panel_patch *ppatch = NULL;
+
+ if (!sink)
+ return;
+
+ ppatch = &sink->edid_caps.panel_patch;
+ if (ppatch->wait_after_dpcd_poweroff_ms) {
+ msleep(ppatch->wait_after_dpcd_poweroff_ms);
+ drm_dbg_driver(adev_to_drm(adev),
+ "%s: adding a %ds delay as w/a for panel\n",
+ __func__,
+ ppatch->wait_after_dpcd_poweroff_ms / 1000);
+ }
+}
+
+/**
+ * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks
+ * @adev: amdgpu device pointer
+ *
+ * Iterates through all DC links and dumps information about local and remote
+ * (MST) sinks. Should be called after connector detection is complete to see
+ * the final state of all links.
+ */
+static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct drm_device *dev = adev_to_drm(adev);
+ int li;
+
+ if (!dc)
+ return;
+
+ for (li = 0; li < dc->link_count; li++) {
+ struct dc_link *l = dc->links[li];
+ const char *name = NULL;
+ int rs;
+
+ if (!l)
+ continue;
+ if (l->local_sink && l->local_sink->edid_caps.display_name[0])
+ name = l->local_sink->edid_caps.display_name;
+ else
+ name = "n/a";
+
+ drm_dbg_kms(dev,
+ "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n",
+ li,
+ l->local_sink,
+ l->type,
+ l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE,
+ l->sink_count,
+ name,
+ l->dpcd_caps.is_mst_capable,
+ l->mst_stream_alloc_table.stream_count);
+
+ /* Dump remote (MST) sinks if any */
+ for (rs = 0; rs < l->sink_count; rs++) {
+ struct dc_sink *rsink = l->remote_sinks[rs];
+ const char *rname = NULL;
+
+ if (!rsink)
+ continue;
+ if (rsink->edid_caps.display_name[0])
+ rname = rsink->edid_caps.display_name;
+ else
+ rname = "n/a";
+ drm_dbg_kms(dev,
+ " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n",
+ li, rs,
+ rsink,
+ rsink->sink_signal,
+ rname);
+ }
+ }
}
static int dm_resume(struct amdgpu_ip_block *ip_block)
@@ -3188,12 +3461,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct dm_crtc_state *dm_new_crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
@@ -3225,8 +3492,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
r = dm_dmub_hw_init(adev);
- if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ if (r) {
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
+ return r;
+ }
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3264,6 +3533,12 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
mutex_unlock(&dm->dc_lock);
+ /* set the backlight after a reset */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
return 0;
}
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
@@ -3293,12 +3568,15 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
*/
amdgpu_dm_irq_resume_early(adev);
+ s3_handle_hdmi_cec(ddev, false);
+
/* On resume we need to rewrite the MSTM control bits to enable MST*/
s3_handle_mst(ddev, false);
/* Do detection*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool ret;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3315,17 +3593,20 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
if (aconnector->mst_root)
continue;
- mutex_lock(&aconnector->hpd_lock);
+ guard(mutex)(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
} else {
- mutex_lock(&dm->dc_lock);
+ guard(mutex)(&dm->dc_lock);
dc_exit_ips_for_hw_access(dm->dc);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
- mutex_unlock(&dm->dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
+ if (ret) {
+ /* w/a delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ }
}
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
@@ -3335,48 +3616,15 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
- mutex_unlock(&aconnector->hpd_lock);
}
drm_connector_list_iter_end(&iter);
- /* Force mode set in atomic commit */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- new_crtc_state->active_changed = true;
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- reset_freesync_config_for_crtc(dm_new_crtc_state);
- }
-
- /*
- * atomic_check is expected to create the dc states. We need to release
- * them here, since they were duplicated as part of the suspend
- * procedure.
- */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (dm_new_crtc_state->stream) {
- WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
- dc_stream_release(dm_new_crtc_state->stream);
- dm_new_crtc_state->stream = NULL;
- }
- dm_new_crtc_state->base.color_mgmt_changed = true;
- }
-
- for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
- dm_new_plane_state = to_dm_plane_state(new_plane_state);
- if (dm_new_plane_state->dc_state) {
- WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
- dc_plane_state_release(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = NULL;
- }
- }
-
- drm_atomic_helper_resume(ddev, dm->cached_state);
-
- dm->cached_state = NULL;
+ dm_destroy_cached_state(adev);
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool init = false;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3386,10 +3634,23 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
aconnector->mst_root)
continue;
- drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
+ scoped_guard(mutex, &aconnector->mst_mgr.lock) {
+ init = !aconnector->mst_mgr.mst_primary;
+ }
+ if (init)
+ dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link, false);
+ else
+ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
}
drm_connector_list_iter_end(&iter);
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
@@ -3452,23 +3713,25 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
- .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+ .atomic_commit_setup = amdgpu_dm_atomic_setup_commit,
};
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
+ const struct drm_panel_backlight_quirk *panel_backlight_quirk;
struct amdgpu_dm_backlight_caps *caps;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
- int min_input_signal_override;
+ struct drm_device *drm;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
return;
conn_base = &aconnector->base;
- adev = drm_to_adev(conn_base->dev);
+ drm = conn_base->dev;
+ adev = drm_to_adev(drm);
caps = &adev->dm.backlight_caps[aconnector->bl_idx];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
@@ -3491,25 +3754,44 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
luminance_range = &conn_base->display_info.luminance_range;
- if (luminance_range->max_luminance) {
- caps->aux_min_input_signal = luminance_range->min_luminance;
+ if (luminance_range->max_luminance)
caps->aux_max_input_signal = luminance_range->max_luminance;
- } else {
- caps->aux_min_input_signal = 0;
+ else
caps->aux_max_input_signal = 512;
- }
- min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
- if (min_input_signal_override >= 0)
- caps->min_input_signal = min_input_signal_override;
+ if (luminance_range->min_luminance)
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ else
+ caps->aux_min_input_signal = 1;
+
+ panel_backlight_quirk =
+ drm_get_panel_backlight_quirk(aconnector->drm_edid);
+ if (!IS_ERR_OR_NULL(panel_backlight_quirk)) {
+ if (panel_backlight_quirk->min_brightness) {
+ caps->min_input_signal =
+ panel_backlight_quirk->min_brightness - 1;
+ drm_info(drm,
+ "Applying panel backlight quirk, min_brightness: %d\n",
+ caps->min_input_signal);
+ }
+ if (panel_backlight_quirk->brightness_mask) {
+ drm_info(drm,
+ "Applying panel backlight quirk, brightness_mask: 0x%X\n",
+ panel_backlight_quirk->brightness_mask);
+ caps->brightness_mask =
+ panel_backlight_quirk->brightness_mask;
+ }
+ }
}
+DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
+
void amdgpu_dm_update_connector_after_detect(
struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_sink *sink __free(sink_release) = NULL;
struct drm_device *dev = connector->dev;
- struct dc_sink *sink;
/* MST handled by drm_mst framework */
if (aconnector->mst_mgr.mst_state == true)
@@ -3531,7 +3813,7 @@ void amdgpu_dm_update_connector_after_detect(
* For S3 resume with headless use eml_sink to fake stream
* because on resume connector->sink is set to NULL
*/
- mutex_lock(&dev->mode_config.mutex);
+ guard(mutex)(&dev->mode_config.mutex);
if (sink) {
if (aconnector->dc_sink) {
@@ -3556,10 +3838,6 @@ void amdgpu_dm_update_connector_after_detect(
}
}
- mutex_unlock(&dev->mode_config.mutex);
-
- if (sink)
- dc_sink_release(sink);
return;
}
@@ -3567,10 +3845,8 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: temporary guard to look for proper fix
* if this sink is MST sink, we should not do anything
*/
- if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- dc_sink_release(sink);
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
return;
- }
if (aconnector->dc_sink == sink) {
/*
@@ -3579,15 +3855,15 @@ void amdgpu_dm_update_connector_after_detect(
*/
drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n",
aconnector->connector_id);
- if (sink)
- dc_sink_release(sink);
return;
}
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
- mutex_lock(&dev->mode_config.mutex);
+ /* When polling, DRM has already locked the mutex for us. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_lock(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
@@ -3607,6 +3883,7 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->drm_edid = NULL;
+ hdmi_cec_unset_edid(aconnector);
if (aconnector->dc_link->aux_mode) {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
}
@@ -3616,6 +3893,7 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length);
drm_edid_connector_update(connector, aconnector->drm_edid);
+ hdmi_cec_set_edid(aconnector);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_attach(&aconnector->dm_dp_aux.aux,
connector->display_info.source_physical_address);
@@ -3632,6 +3910,7 @@ void amdgpu_dm_update_connector_after_detect(
amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
update_connector_ext_caps(aconnector);
} else {
+ hdmi_cec_unset_edid(aconnector);
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
aconnector->num_modes = 0;
@@ -3646,12 +3925,102 @@ void amdgpu_dm_update_connector_after_detect(
connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
}
- mutex_unlock(&dev->mode_config.mutex);
-
update_subconnector_property(aconnector);
- if (sink)
- dc_sink_release(sink);
+ /* When polling, the mutex will be unlocked for us by DRM. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
+{
+ if (!sink1 || !sink2)
+ return false;
+ if (sink1->sink_signal != sink2->sink_signal)
+ return false;
+
+ if (sink1->dc_edid.length != sink2->dc_edid.length)
+ return false;
+
+ if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
+ sink1->dc_edid.length) != 0)
+ return false;
+ return true;
+}
+
+
+/**
+ * DOC: hdmi_hpd_debounce_work
+ *
+ * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
+ * (such as during power save transitions), this delay determines how long to
+ * wait before processing the HPD event. This allows distinguishing between a
+ * physical unplug (>hdmi_hpd_debounce_delay)
+ * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
+ *
+ * If the toggle is less than this delay, the driver compares sink capabilities
+ * and permits a hotplug event if they changed.
+ *
+ * The default value of 1500ms was chosen based on experimental testing with
+ * various monitors that exhibit spontaneous HPD toggling behavior.
+ */
+static void hdmi_hpd_debounce_work(struct work_struct *work)
+{
+ struct amdgpu_dm_connector *aconnector =
+ container_of(to_delayed_work(work), struct amdgpu_dm_connector,
+ hdmi_hpd_debounce_work);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = aconnector->dc_link->ctx->dc;
+ bool fake_reconnect = false;
+ bool reallow_idle = false;
+ bool ret = false;
+ guard(mutex)(&aconnector->hpd_lock);
+
+ /* Re-detect the display */
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
+ dc_allow_idle_optimizations(dc, false);
+ reallow_idle = true;
+ }
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
+
+ if (ret) {
+ /* Apply workaround delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ /* Compare sinks to determine if this was a spontaneous HPD toggle */
+ if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
+ /*
+ * Sinks match - this was a spontaneous HDMI HPD toggle.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
+ fake_reconnect = true;
+ }
+
+ /* Update connector state */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ /* Only notify OS if sink actually changed */
+ if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+
+ /* Release the cached sink reference */
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (reallow_idle && dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, true);
+ }
}
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
@@ -3663,6 +4032,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
struct dc *dc = aconnector->dc_link->ctx->dc;
bool ret = false;
+ bool debounce_required = false;
if (adev->dm.disable_hpd_irq)
return;
@@ -3671,7 +4041,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
*/
- mutex_lock(&aconnector->hpd_lock);
+ guard(mutex)(&aconnector->hpd_lock);
if (adev->dm.hdcp_workqueue) {
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
@@ -3683,7 +4053,15 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
aconnector->timing_changed = false;
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
+
+ /*
+ * Check for HDMI disconnect with debounce enabled.
+ */
+ debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
+ dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
+ new_connection_type == dc_connection_none &&
+ aconnector->dc_link->local_sink != NULL);
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
@@ -3694,12 +4072,41 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
+ } else if (debounce_required) {
+ /*
+ * HDMI disconnect detected - schedule delayed work instead of
+ * processing immediately. This allows us to coalesce spurious
+ * HDMI signals from physical unplugs.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
+ aconnector->hdmi_hpd_debounce_delay_ms);
+
+ /* Cache the current sink for later comparison */
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_retain(aconnector->hdmi_prev_sink);
+
+ /* Schedule delayed detection. */
+ if (mod_delayed_work(system_wq,
+ &aconnector->hdmi_hpd_debounce_work,
+ msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
+ drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
+
} else {
- mutex_lock(&adev->dm.dc_lock);
- dc_exit_ips_for_hw_access(dc);
- ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
- mutex_unlock(&adev->dm.dc_lock);
+
+ /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
+ if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
+ return;
+
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ dc_exit_ips_for_hw_access(dc);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
if (ret) {
+ /* w/a delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
@@ -3710,8 +4117,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_kms_helper_connector_hotplug_event(connector);
}
}
- mutex_unlock(&aconnector->hpd_lock);
-
}
static void handle_hpd_irq(void *param)
@@ -3722,20 +4127,21 @@ static void handle_hpd_irq(void *param)
}
-static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
struct hpd_rx_irq_offload_work *offload_work =
kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
- DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n");
return;
}
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
offload_work->data = hpd_irq_data;
offload_work->offload_wq = offload_wq;
+ offload_work->adev = adev;
queue_work(offload_wq->wq, &offload_work->work);
DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
@@ -3777,7 +4183,7 @@ static void handle_hpd_rx_irq(void *param)
goto out;
if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -3799,7 +4205,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -3816,7 +4222,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -3826,7 +4232,7 @@ out:
if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
@@ -3889,19 +4295,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -3922,7 +4328,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1 ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6) {
- DRM_ERROR("Failed to register hpd irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n");
return -EINVAL;
}
@@ -3940,7 +4346,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1RX ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) {
- DRM_ERROR("Failed to register hpd rx irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n");
return -EINVAL;
}
@@ -3982,7 +4388,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -3993,7 +4399,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4012,7 +4418,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4023,7 +4429,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4041,7 +4447,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4083,7 +4489,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4094,7 +4500,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4112,7 +4518,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4123,7 +4529,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4142,7 +4548,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4153,7 +4559,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4171,7 +4577,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4221,7 +4627,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4232,7 +4638,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4253,7 +4659,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
vrtl_int_srcid[i], &adev->vline0_irq);
if (r) {
- DRM_ERROR("Failed to add vline0 irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n");
return r;
}
@@ -4264,7 +4670,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
- DRM_ERROR("Failed to register vline0 irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n");
return -EINVAL;
}
@@ -4292,7 +4698,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4303,7 +4709,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4323,7 +4729,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4334,7 +4740,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4352,7 +4758,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4374,7 +4780,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
&adev->dmub_outbox_irq);
if (r) {
- DRM_ERROR("Failed to add outbox irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n");
return r;
}
@@ -4551,48 +4957,40 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
int bl_idx)
{
-#if defined(CONFIG_ACPI)
- struct amdgpu_dm_backlight_caps caps;
-
- memset(&caps, 0, sizeof(caps));
+ struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx];
- if (dm->backlight_caps[bl_idx].caps_valid)
+ if (caps->caps_valid)
return;
- amdgpu_acpi_get_backlight_caps(&caps);
+#if defined(CONFIG_ACPI)
+ amdgpu_acpi_get_backlight_caps(caps);
/* validate the firmware value is sane */
- if (caps.caps_valid) {
- int spread = caps.max_input_signal - caps.min_input_signal;
+ if (caps->caps_valid) {
+ int spread = caps->max_input_signal - caps->min_input_signal;
- if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
- caps.min_input_signal < 0 ||
+ if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ caps->min_input_signal < 0 ||
spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
spread < AMDGPU_DM_MIN_SPREAD) {
DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
- caps.min_input_signal, caps.max_input_signal);
- caps.caps_valid = false;
+ caps->min_input_signal, caps->max_input_signal);
+ caps->caps_valid = false;
}
}
- if (caps.caps_valid) {
- dm->backlight_caps[bl_idx].caps_valid = true;
- if (caps.aux_support)
- return;
- dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
- dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
- } else {
- dm->backlight_caps[bl_idx].min_input_signal =
- AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
- dm->backlight_caps[bl_idx].max_input_signal =
- AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ if (!caps->caps_valid) {
+ caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->caps_valid = true;
}
#else
- if (dm->backlight_caps[bl_idx].aux_support)
+ if (caps->aux_support)
return;
- dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
- dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->caps_valid = true;
#endif
}
@@ -4614,6 +5012,82 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
+/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
+static inline u32 scale_input_to_fw(int min, int max, u64 input)
+{
+ return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
+}
+
+/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
+static inline u32 scale_fw_to_input(int min, int max, u64 input)
+{
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
+}
+
+static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned int min, unsigned int max,
+ uint32_t *user_brightness)
+{
+ u32 brightness = scale_input_to_fw(min, max, *user_brightness);
+ u8 lower_signal, upper_signal, upper_lum, lower_lum, lum;
+ int left, right;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
+ return;
+
+ if (!caps->data_points)
+ return;
+
+ /*
+ * Handle the case where brightness is below the first data point
+ * Interpolate between (0,0) and (first_signal, first_lum)
+ */
+ if (brightness < caps->luminance_data[0].input_signal) {
+ lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness,
+ caps->luminance_data[0].input_signal);
+ goto scale;
+ }
+
+ left = 0;
+ right = caps->data_points - 1;
+ while (left <= right) {
+ int mid = left + (right - left) / 2;
+ u8 signal = caps->luminance_data[mid].input_signal;
+
+ /* Exact match found */
+ if (signal == brightness) {
+ lum = caps->luminance_data[mid].luminance;
+ goto scale;
+ }
+
+ if (signal < brightness)
+ left = mid + 1;
+ else
+ right = mid - 1;
+ }
+
+ /* verify bound */
+ if (left >= caps->data_points)
+ left = caps->data_points - 1;
+
+ /* At this point, left > right */
+ lower_signal = caps->luminance_data[right].input_signal;
+ upper_signal = caps->luminance_data[left].input_signal;
+ lower_lum = caps->luminance_data[right].luminance;
+ upper_lum = caps->luminance_data[left].luminance;
+
+ /* interpolate */
+ if (right == left || !lower_lum)
+ lum = upper_lum;
+ else
+ lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) *
+ (brightness - lower_signal),
+ upper_signal - lower_signal);
+scale:
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
+}
+
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
@@ -4622,9 +5096,10 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
if (!get_brightness_range(caps, &min, &max))
return brightness;
- // Rescale 0..255 to min..max
- return min + DIV_ROUND_CLOSEST((max - min) * brightness,
- AMDGPU_MAX_BL_LEVEL);
+ convert_custom_brightness(caps, min, max, &brightness);
+
+ // Rescale 0..max to min..max
+ return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max);
}
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4637,8 +5112,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
if (brightness < min)
return 0;
- // Rescale min..max to 0..255
- return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ // Rescale min..max to 0..max
+ return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min),
max - min);
}
@@ -4646,21 +5121,40 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
- struct amdgpu_dm_backlight_caps caps;
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link;
u32 brightness;
bool rc, reallow_idle = false;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector->bl_idx != bl_idx)
+ continue;
+
+ /* if connector is off, save the brightness for next time it's on */
+ if (!aconnector->base.encoder) {
+ dm->brightness[bl_idx] = user_brightness;
+ dm->actual_brightness[bl_idx] = 0;
+ return;
+ }
+ }
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- caps = dm->backlight_caps[bl_idx];
+ caps = &dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
/* update scratch register */
if (bl_idx == 0)
amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
- brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
+ brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
+ /* Apply brightness quirk */
+ if (caps->brightness_mask)
+ brightness |= caps->brightness_mask;
+
/* Change brightness based on AUX property */
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
@@ -4668,7 +5162,15 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
reallow_idle = true;
}
- if (caps.aux_support) {
+ if (trace_amdgpu_dm_brightness_enabled()) {
+ trace_amdgpu_dm_brightness(__builtin_return_address(0),
+ user_brightness,
+ brightness,
+ caps->aux_support,
+ power_supply_is_system_supplied() > 0);
+ }
+
+ if (caps->aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
if (!rc)
@@ -4721,10 +5223,8 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (caps.aux_support) {
u32 avg, peak;
- bool rc;
- rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
- if (!rc)
+ if (!dc_link_get_backlight_level_nits(link, &avg, &peak))
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
}
@@ -4763,8 +5263,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
- struct amdgpu_dm_backlight_caps caps = { 0 };
+ struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
+ int min, max;
if (aconnector->bl_idx == -1)
return;
@@ -4776,16 +5277,24 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
return;
}
- amdgpu_acpi_get_backlight_caps(&caps);
- if (caps.caps_valid) {
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+ if (get_brightness_range(caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
- props.brightness = caps.ac_level;
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100);
else
- props.brightness = caps.dc_level;
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100);
+ /* min is zero, so max needs to be adjusted */
+ props.max_brightness = max - min;
+ drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
+ caps->ac_level, caps->dc_level);
} else
- props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
- props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
+ drm_info(drm, "Using custom brightness curve\n");
+ props.scale = BACKLIGHT_SCALE_NON_LINEAR;
+ } else
+ props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -4794,12 +5303,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->backlight_dev[aconnector->bl_idx] =
backlight_device_register(bl_name, aconnector->base.kdev, dm,
&amdgpu_dm_backlight_ops, &props);
+ dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
- DRM_ERROR("DM: Backlight registration failed!\n");
+ drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
} else
- DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+ drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -4813,7 +5323,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
if (!plane) {
- DRM_ERROR("KMS: Failed to allocate plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n");
return -ENOMEM;
}
plane->type = plane_type;
@@ -4831,7 +5341,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
- DRM_ERROR("KMS: Failed to initialize plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n");
kfree(plane);
return ret;
}
@@ -4846,6 +5356,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
static void setup_backlight_device(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link = aconnector->dc_link;
int bl_idx = dm->num_of_edps;
@@ -4861,11 +5372,17 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
aconnector->bl_idx = bl_idx;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
dm->backlight_link[bl_idx] = link;
dm->num_of_edps++;
update_connector_ext_caps(aconnector);
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+
+ /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
+ if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
+ drm_object_attach_property(&aconnector->base.base,
+ dm->adev->mode_info.abm_level_property,
+ ABM_SYSFS_CONTROL);
}
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
@@ -4901,14 +5418,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize mode config\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n");
return -EINVAL;
}
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
if (primary_planes > AMDGPU_MAX_PLANES) {
- DRM_ERROR("DM: Plane nums out of 6 planes\n");
+ drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n");
return -EINVAL;
}
@@ -4921,7 +5438,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, mode_info, i,
DRM_PLANE_TYPE_PRIMARY, plane)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n");
goto fail;
}
}
@@ -4953,14 +5470,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n");
goto fail;
}
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
- DRM_ERROR("KMS: Failed to initialize crtc\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n");
goto fail;
}
@@ -4977,9 +5494,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5000,6 +5518,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
psr_feature_enabled = true;
break;
@@ -5017,6 +5536,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
replay_feature_enabled = true;
break;
@@ -5027,7 +5547,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
if (link_cnt > MAX_LINKS) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"KMS: Cannot support more than %d display indexes\n",
MAX_LINKS);
goto fail;
@@ -5043,12 +5563,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
if (!wbcon) {
- DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
continue;
}
if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
- DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n");
kfree(wbcon);
continue;
}
@@ -5068,12 +5588,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
- DRM_ERROR("KMS: Failed to initialize encoder\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n");
goto fail;
}
if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
- DRM_ERROR("KMS: Failed to initialize connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n");
goto fail;
}
@@ -5082,7 +5602,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
@@ -5104,13 +5624,26 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (amdgpu_dm_set_replay_caps(link, aconnector))
psr_feature_enabled = false;
- if (psr_feature_enabled)
+ if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
+ drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ link->psr_settings.psr_feature_enabled,
+ link->psr_settings.psr_version,
+ link->dpcd_caps.psr_info.psr_version,
+ link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
+ link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
+ }
}
}
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -5119,7 +5652,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_OLAND:
if (dce60_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5141,7 +5674,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5166,14 +5699,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
goto fail;
}
@@ -5190,7 +5724,8 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
- drm_atomic_private_obj_fini(&dm->atomic_obj);
+ if (dm->atomic_obj.state)
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
}
/******************************************************************************
@@ -5308,6 +5843,9 @@ static int dm_init_microcode(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_351_DMUB;
break;
+ case IP_VERSION(3, 6, 0):
+ fw_name_dmub = FIRMWARE_DCN_36_DMUB;
+ break;
case IP_VERSION(4, 0, 1):
fw_name_dmub = FIRMWARE_DCN_401_DMUB;
break;
@@ -5331,7 +5869,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
/* if there is no object header, skip DM */
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
- dev_info(adev->dev, "No object header, skipping DM\n");
+ drm_info(adev_to_drm(adev), "No object header, skipping DM\n");
return -ENOENT;
}
@@ -5436,13 +5974,14 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
return -EINVAL;
}
@@ -5491,6 +6030,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
*color_space = COLOR_SPACE_SRGB;
+ /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */
+ if (plane_state->state && plane_state->state->plane_color_pipeline)
+ return 0;
+
/* DRM color properties only affect non-RGB formats. */
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0;
@@ -5514,9 +6057,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
case DRM_COLOR_YCBCR_BT2020:
if (full_range)
- *color_space = COLOR_SPACE_2020_YCBCR;
+ *color_space = COLOR_SPACE_2020_YCBCR_FULL;
else
- return -EINVAL;
+ *color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
break;
default:
@@ -5591,7 +6134,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
break;
default:
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Unsupported screen format %p4cc\n",
&fb->format->format);
return -EINVAL;
@@ -6012,12 +6555,14 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
else
- color_space = COLOR_SPACE_2020_YCBCR;
+ color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
break;
case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
default:
if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
color_space = COLOR_SPACE_SRGB;
+ if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED)
+ color_space = COLOR_SPACE_SRGB_LIMITED;
/*
* 27030khz is the separation point between HDTV and SDTV
* according to HDMI spec, we use YCbCr709 and YCbCr601
@@ -6110,6 +6655,7 @@ static void fill_stream_properties_from_drm_display_mode(
struct amdgpu_dm_connector *aconnector = NULL;
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
+ ssize_t err;
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
aconnector = to_amdgpu_dm_connector(connector);
@@ -6129,6 +6675,10 @@ static void fill_stream_properties_from_drm_display_mode(
&& aconnector
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ && aconnector
+ && aconnector->force_yuv422_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -6156,9 +6706,19 @@ static void fill_stream_properties_from_drm_display_mode(
}
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
- drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->vic = avi_frame.video_code;
- drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -6271,19 +6831,19 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
const struct drm_display_mode *native_mode,
bool scale_enabled)
{
- if (scale_enabled) {
- copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
- } else if (native_mode->clock == drm_mode->clock &&
- native_mode->htotal == drm_mode->htotal &&
- native_mode->vtotal == drm_mode->vtotal) {
- copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ if (scale_enabled || (
+ native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal)) {
+ if (native_mode->crtc_clock)
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
} else {
/* no scaling nor amdgpu inserted, no need to patch */
}
}
static struct dc_sink *
-create_fake_sink(struct dc_link *link)
+create_fake_sink(struct drm_device *dev, struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
@@ -6293,7 +6853,7 @@ create_fake_sink(struct dc_link *link)
sink = dc_sink_create(&sink_init_data);
if (!sink) {
- DRM_ERROR("Failed to create sink!\n");
+ drm_err(dev, "Failed to create sink!\n");
return NULL;
}
sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
@@ -6426,7 +6986,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
m_pref = list_first_entry_or_null(
&aconnector->base.modes, struct drm_display_mode, head);
if (!m_pref) {
- DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n");
return NULL;
}
}
@@ -6601,7 +7161,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n",
__func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
@@ -6621,7 +7181,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -6649,6 +7209,7 @@ create_stream_for_sink(struct drm_connector *connector,
const struct dc_stream_state *old_stream,
int requested_bpc)
{
+ struct drm_device *dev = connector->dev;
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_display_mode *preferred_mode = NULL;
const struct drm_connector_state *con_state = &dm_state->base;
@@ -6671,11 +7232,6 @@ create_stream_for_sink(struct drm_connector *connector,
drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
- if (connector == NULL) {
- DRM_ERROR("connector is NULL!\n");
- return stream;
- }
-
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
aconnector = NULL;
aconnector = to_amdgpu_dm_connector(connector);
@@ -6690,7 +7246,7 @@ create_stream_for_sink(struct drm_connector *connector,
}
if (!aconnector || !aconnector->dc_sink) {
- sink = create_fake_sink(link);
+ sink = create_fake_sink(dev, link);
if (!sink)
return stream;
@@ -6702,7 +7258,7 @@ create_stream_for_sink(struct drm_connector *connector,
stream = dc_create_stream_for_sink(sink);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(dev, "Failed to create stream for sink!\n");
goto finish;
}
@@ -6734,7 +7290,7 @@ create_stream_for_sink(struct drm_connector *connector,
* case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in time.
*/
- DRM_DEBUG_DRIVER("No preferred mode found\n");
+ drm_dbg_driver(dev, "No preferred mode found\n");
} else if (aconnector) {
recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
@@ -6830,29 +7386,117 @@ finish:
return stream;
}
+/**
+ * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display
+ * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link)
+ * @force: if true, force polling even when DAC load detection was used
+ *
+ * Used for connectors that don't support HPD (hotplug detection) to
+ * periodically check whether the connector is connected to a display.
+ *
+ * When connection was determined via DAC load detection, we avoid
+ * re-running it on normal polls to prevent visible glitches, unless
+ * @force is set.
+ *
+ * Return: The probed connector status (connected/disconnected/unknown).
+ */
static enum drm_connector_status
-amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force)
{
- bool connected;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc_link *link = aconnector->dc_link;
+ enum dc_connection_type conn_type = dc_connection_none;
+ enum drm_connector_status status = connector_status_disconnected;
- /*
- * Notes:
- * 1. This interface is NOT called in context of HPD irq.
- * 2. This interface *is called* in context of user-mode ioctl. Which
- * makes it a bad place for *any* MST-related activity.
+ /* When we determined the connection using DAC load detection,
+ * do NOT poll the connector do detect disconnect because
+ * that would run DAC load detection again which can cause
+ * visible visual glitches.
+ *
+ * Only allow to poll such a connector again when forcing.
*/
+ if (!force && link->local_sink && link->type == dc_connection_dac_load)
+ return connector->status;
- if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
- !aconnector->fake_enable)
- connected = (aconnector->dc_sink != NULL);
- else
- connected = (aconnector->base.force == DRM_FORCE_ON ||
- aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) &&
+ conn_type != dc_connection_none) {
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Only call full link detection when a sink isn't created yet,
+ * ie. just when the display is plugged in, otherwise we risk flickering.
+ */
+ if (link->local_sink ||
+ dc_link_detect(link, DETECT_REASON_HPD))
+ status = connector_status_connected;
+
+ mutex_unlock(&adev->dm.dc_lock);
+ }
+
+ if (connector->status != status) {
+ if (status == connector_status_disconnected) {
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ link->local_sink = NULL;
+ link->dpcd_sink_count = 0;
+ link->type = dc_connection_none;
+ }
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ mutex_unlock(&aconnector->hpd_lock);
+ return status;
+}
+
+/**
+ * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display
+ *
+ * A connector is considered connected when it has a sink that is not NULL.
+ * For connectors that support HPD (hotplug detection), the connection is
+ * handled in the HPD interrupt.
+ * For connectors that may not support HPD, such as analog connectors,
+ * DRM will call this function repeatedly to poll them.
+ *
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ *
+ * @connector: The DRM connector we are checking. We convert it to
+ * amdgpu_dm_connector so we can read the DC link and state.
+ * @force: If true, do a full detect again. This is used even when
+ * a lighter check would normally be used to avoid flicker.
+ *
+ * Return: The connector status (connected, disconnected, or unknown).
+ *
+ */
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
update_subconnector_property(aconnector);
- return (connected ? connector_status_connected :
+ if (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL)
+ return connector_status_connected;
+ else if (aconnector->base.force == DRM_FORCE_OFF)
+ return connector_status_disconnected;
+
+ /* Poll analog connectors and only when either
+ * disconnected or connected to an analog display.
+ */
+ if (drm_kms_helper_is_poll_worker() &&
+ dc_connector_supports_analog(aconnector->dc_link->link_id.id) &&
+ (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog))
+ return amdgpu_dm_connector_poll(aconnector, force);
+
+ return (aconnector->dc_sink ? connector_status_connected :
connector_status_disconnected);
}
@@ -6903,6 +7547,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ switch (val) {
+ case ABM_SYSFS_CONTROL:
+ dm_new_state->abm_sysfs_forbidden = false;
+ break;
+ case ABM_LEVEL_OFF:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ break;
+ default:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = val;
+ }
+ ret = 0;
}
return ret;
@@ -6945,6 +7603,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ if (!dm_state->abm_sysfs_forbidden)
+ *val = ABM_SYSFS_CONTROL;
+ else
+ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
+ dm_state->abm_level : 0;
+ ret = 0;
}
return ret;
@@ -6997,10 +7662,16 @@ static ssize_t panel_power_savings_store(struct device *device,
return -EINVAL;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- to_dm_connector_state(connector->state)->abm_level = val ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden)
+ ret = -EBUSY;
+ else
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
drm_kms_helper_hotplug_event(dev);
return count;
@@ -7048,6 +7719,7 @@ static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector))
sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+ cec_notifier_conn_unregister(amdgpu_dm_connector->notifier);
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
@@ -7064,6 +7736,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+ /* Cancel and flush any pending HDMI HPD debounce work */
+ cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
if (aconnector->bl_idx != -1) {
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
dm->backlight_dev[aconnector->bl_idx] = NULL;
@@ -7079,10 +7758,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (aconnector->i2c) {
- i2c_del_adapter(&aconnector->i2c->base);
- kfree(aconnector->i2c);
- }
kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
@@ -7182,11 +7857,18 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
+ struct i2c_adapter *ddc;
+ struct drm_device *dev = connector->dev;
- drm_edid = drm_edid_read(connector);
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7228,17 +7910,24 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
const struct drm_edid *drm_edid;
const struct edid *edid;
+ struct i2c_adapter *ddc;
- drm_edid = drm_edid_read(connector);
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7330,7 +8019,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_result = DC_FAIL_ATTACH_SURFACES;
if (dc_result == DC_OK)
- dc_result = dc_validate_global_state(dc, dc_state, true);
+ dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY);
cleanup:
if (dc_state)
@@ -7343,12 +8032,12 @@ cleanup:
}
struct dc_stream_state *
-create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+create_validate_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream)
{
- struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct dc_stream_state *stream;
const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
@@ -7359,23 +8048,29 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!dm_state)
return NULL;
- if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
- aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector &&
+ (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER))
bpc_limit = 8;
do {
+ drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc);
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
requested_bpc);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n");
break;
}
- if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+
+ if (!aconnector) /* writeback connector */
return stream;
- dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
@@ -7383,7 +8078,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
+ DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
@@ -7398,28 +8093,57 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
} while (stream == NULL && requested_bpc >= bpc_limit);
- if ((dc_result == DC_FAIL_ENC_VALIDATE ||
- dc_result == DC_EXCEED_DONGLE_CAP) &&
- !aconnector->force_yuv420_output) {
- DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
- __func__, __LINE__);
-
- aconnector->force_yuv420_output = true;
- stream = create_validate_stream_for_sink(aconnector, drm_mode,
- dm_state, old_stream);
+ switch (dc_result) {
+ /*
+ * If we failed to validate DP bandwidth stream with the requested RGB color depth,
+ * we try to fallback and configure in order:
+ * YUV422 (8bpc, 6bpc)
+ * YUV420 (8bpc, 6bpc)
+ */
+ case DC_FAIL_ENC_VALIDATE:
+ case DC_EXCEED_DONGLE_CAP:
+ case DC_NO_DP_LINK_BANDWIDTH:
+ /* recursively entered twice and already tried both YUV422 and YUV420 */
+ if (aconnector->force_yuv422_output && aconnector->force_yuv420_output)
+ break;
+ /* first failure; try YUV422 */
+ if (!aconnector->force_yuv422_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv422_output = true;
+ /* recursively entered and YUV422 failed, try YUV420 */
+ } else if (!aconnector->force_yuv420_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv420_output = true;
+ }
+ stream = create_validate_stream_for_sink(connector, drm_mode,
+ dm_state, old_stream);
+ aconnector->force_yuv422_output = false;
aconnector->force_yuv420_output = false;
+ break;
+ case DC_OK:
+ break;
+ default:
+ drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n",
+ __func__, __LINE__, dc_result);
+ break;
}
return stream;
}
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int result = MODE_ERROR;
struct dc_sink *dc_sink;
+ struct drm_display_mode *test_mode;
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
+ /* we always have an amdgpu_dm_connector here since we got
+ * here via the amdgpu_dm_connector_helper_funcs
+ */
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
@@ -7438,15 +8162,20 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
aconnector->base.force != DRM_FORCE_ON) {
- DRM_ERROR("dc_sink is NULL!\n");
+ drm_err(connector->dev, "dc_sink is NULL!\n");
goto fail;
}
- drm_mode_set_crtcinfo(mode, 0);
+ test_mode = drm_mode_duplicate(connector->dev, mode);
+ if (!test_mode)
+ goto fail;
- stream = create_validate_stream_for_sink(aconnector, mode,
+ drm_mode_set_crtcinfo(test_mode, 0);
+
+ stream = create_validate_stream_for_sink(connector, test_mode,
to_dm_connector_state(connector->state),
NULL);
+ drm_mode_destroy(connector->dev, test_mode);
if (stream) {
dc_stream_release(stream);
result = MODE_OK;
@@ -7529,6 +8258,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret;
+ if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
+ return -EINVAL;
+
trace_amdgpu_dm_connector_atomic_check(new_con_state);
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
@@ -7540,6 +8272,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
+ if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
if (new_con_state->colorspace != old_con_state->colorspace) {
new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(new_crtc_state))
@@ -7641,6 +8381,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ enum drm_mode_status result;
+
+ result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode);
+ if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) {
+ drm_dbg_driver(encoder->dev,
+ "mode %dx%d@%dHz is not native, enabling scaling\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+ drm_mode_vrefresh(adjusted_mode));
+ dm_new_connector_state->scaling = RMX_ASPECT;
+ }
+ return 0;
+ }
+
if (!aconnector->mst_output_port)
return 0;
@@ -7654,7 +8411,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+ mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -7761,7 +8518,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
return 0;
}
-static int to_drm_connector_type(enum signal_type st)
+static int to_drm_connector_type(enum signal_type st, uint32_t connector_id)
{
switch (st) {
case SIGNAL_TYPE_HDMI_TYPE_A:
@@ -7777,6 +8534,10 @@ static int to_drm_connector_type(enum signal_type st)
return DRM_MODE_CONNECTOR_DisplayPort;
case SIGNAL_TYPE_DVI_DUAL_LINK:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ connector_id == CONNECTOR_ID_DUAL_LINK_DVII)
+ return DRM_MODE_CONNECTOR_DVII;
+
return DRM_MODE_CONNECTOR_DVID;
case SIGNAL_TYPE_VIRTUAL:
return DRM_MODE_CONNECTOR_VIRTUAL;
@@ -7828,7 +8589,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
- char *name,
+ const char *name,
int hdisplay, int vdisplay)
{
struct drm_device *dev = encoder->dev;
@@ -7850,6 +8611,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
}
+static const struct amdgpu_dm_mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+} common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+};
+
static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
@@ -7860,23 +8639,10 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
to_amdgpu_dm_connector(connector);
int i;
int n;
- struct mode_size {
- char name[DRM_DISPLAY_MODE_LEN];
- int w;
- int h;
- } common_modes[] = {
- { "640x480", 640, 480},
- { "800x600", 800, 600},
- { "1024x768", 1024, 768},
- { "1280x720", 1280, 720},
- { "1280x800", 1280, 800},
- {"1280x1024", 1280, 1024},
- { "1440x900", 1440, 900},
- {"1680x1050", 1680, 1050},
- {"1600x1200", 1600, 1200},
- {"1920x1080", 1920, 1080},
- {"1920x1200", 1920, 1200}
- };
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ return;
n = ARRAY_SIZE(common_modes);
@@ -8073,6 +8839,16 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
if (!(amdgpu_freesync_vid_mode && drm_edid))
return;
+ if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link)
+ return;
+
+ if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
+ return;
+
+ if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog)
+ return;
+
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
amdgpu_dm_connector->num_modes +=
add_fs_modes(amdgpu_dm_connector);
@@ -8082,11 +8858,11 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
struct drm_encoder *encoder;
const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
- struct dc_link_settings *verified_link_cap =
- &amdgpu_dm_connector->dc_link->verified_link_cap;
- const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+ struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap;
+ const struct dc *dc = dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
@@ -8096,6 +8872,17 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
amdgpu_dm_connector->num_modes +=
drm_add_modes_noedid(connector, 1920, 1080);
+
+ if (amdgpu_dm_connector->dc_sink &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog &&
+ dc_connector_supports_analog(dc_link->link_id.id)) {
+ /* Analog monitor connected by DAC load detection.
+ * Add common modes. It will be up to the user to select one that works.
+ */
+ for (int i = 0; i < ARRAY_SIZE(common_modes); i++)
+ amdgpu_dm_connector->num_modes += drm_add_modes_noedid(
+ connector, common_modes[i].w, common_modes[i].h);
+ }
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
if (encoder)
@@ -8143,6 +8930,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
+ aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+
/*
* configure support HPD hot plug connector_>polled default value is 0
* which means HPD hot plug not supported
@@ -8164,6 +8955,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_VGA:
+ aconnector->base.polled =
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ break;
default:
break;
}
@@ -8172,6 +8968,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
dm->ddev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA
+ || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root))
+ drm_connector_attach_broadcast_rgb_property(&aconnector->base);
+
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.underscan_property,
UNDERSCAN_OFF);
@@ -8213,6 +9013,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
}
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&aconnector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n");
+ }
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -8224,7 +9036,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i;
int result = -EIO;
- if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ if (!ddc_service->ddc_pin)
return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
@@ -8243,11 +9055,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
cmd.payloads[i].data = msgs[i].buf;
}
- if (dc_submit_i2c(
- ddc_service->ctx->dc,
- ddc_service->link->link_index,
- &cmd))
- result = num;
+ if (i2c->oem) {
+ if (dc_submit_i2c_oem(
+ ddc_service->ctx->dc,
+ &cmd))
+ result = num;
+ } else {
+ if (dc_submit_i2c(
+ ddc_service->ctx->dc,
+ ddc_service->link->link_index,
+ &cmd))
+ result = num;
+ }
kfree(cmd.payloads);
return result;
@@ -8264,9 +9083,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
};
static struct amdgpu_i2c_adapter *
-create_i2c(struct ddc_service *ddc_service,
- int link_index,
- int *res)
+create_i2c(struct ddc_service *ddc_service, bool oem)
{
struct amdgpu_device *adev = ddc_service->ctx->driver_context;
struct amdgpu_i2c_adapter *i2c;
@@ -8277,13 +9094,39 @@ create_i2c(struct ddc_service *ddc_service,
i2c->base.owner = THIS_MODULE;
i2c->base.dev.parent = &adev->pdev->dev;
i2c->base.algo = &amdgpu_dm_i2c_algo;
- snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ if (oem)
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus");
+ else
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d",
+ ddc_service->link->link_index);
i2c_set_adapdata(&i2c->base, i2c);
i2c->ddc_service = ddc_service;
+ i2c->oem = oem;
return i2c;
}
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_connector_info conn_info;
+ struct drm_device *ddev = aconnector->base.dev;
+ struct device *hdmi_dev = ddev->dev;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) {
+ drm_info(ddev, "HDMI-CEC feature masked\n");
+ return -EINVAL;
+ }
+
+ cec_fill_conn_info_from_drm(&conn_info, &aconnector->base);
+ aconnector->notifier =
+ cec_notifier_conn_register(hdmi_dev, NULL, &conn_info);
+ if (!aconnector->notifier) {
+ drm_err(ddev, "Failed to create cec notifier\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
/*
* Note: this function assumes that dc_link_detect() was called for the
@@ -8304,21 +9147,21 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link->priv = aconnector;
- i2c = create_i2c(link->ddc, link->link_index, &res);
+ i2c = create_i2c(link->ddc, false);
if (!i2c) {
- DRM_ERROR("Failed to create i2c adapter data\n");
+ drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n");
return -ENOMEM;
}
aconnector->i2c = i2c;
- res = i2c_add_adapter(&i2c->base);
+ res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base);
if (res) {
- DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
goto out_free;
}
- connector_type = to_drm_connector_type(link->connector_signal);
+ connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id);
res = drm_connector_init_with_ddc(
dm->ddev,
@@ -8328,7 +9171,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
&i2c->base);
if (res) {
- DRM_ERROR("connector_init failed\n");
+ drm_err(adev_to_drm(dm->adev), "connector_init failed\n");
aconnector->connector_id = -1;
goto out_free;
}
@@ -8347,6 +9190,10 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ amdgpu_dm_initialize_hdmi_connector(aconnector);
+
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
@@ -8405,30 +9252,54 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
-{
- /*
- * We have no guarantee that the frontend index maps to the same
+{ /*
+ * We cannot be sure that the frontend index maps to the same
* backend index - some even map to more than one.
- *
- * TODO: Use a different interrupt or check DC itself for the mapping.
+ * So we have to go through the CRTC to find the right IRQ.
*/
- int irq_type =
- amdgpu_display_crtc_idx_to_irq_type(
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
adev,
acrtc->crtc_id);
+ struct drm_device *dev = adev_to_drm(adev);
+
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
if (acrtc_state) {
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
- IP_VERSION(3, 5, 0) ||
- acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED ||
- !(adev->flags & AMD_IS_APU)) {
- timing = &acrtc_state->stream->timing;
-
- /* at least 2 frames */
+ timing = &acrtc_state->stream->timing;
+
+ /*
+ * Depending on when the HW latching event of double-buffered
+ * registers happen relative to the PSR SDP deadline, and how
+ * bad the Panel clock has drifted since the last ALPM off
+ * event, there can be up to 3 frames of delay between sending
+ * the PSR exit cmd to DMUB fw, and when the panel starts
+ * displaying live frames.
+ *
+ * We can set:
+ *
+ * 20/100 * offdelay_ms = 3_frames_ms
+ * => offdelay_ms = 5 * 3_frames_ms
+ *
+ * This ensures that `3_frames_ms` will only be experienced as a
+ * 20% delay on top how long the display has been static, and
+ * thus make the delay less perceivable.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version <
+ DC_PSR_VERSION_UNSUPPORTED) {
+ offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 *
+ timing->v_total *
+ timing->h_total,
+ timing->pix_clk_100hz);
+ config.offdelay_ms = offdelay ?: 30;
+ } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
+ IP_VERSION(3, 5, 0) ||
+ !(adev->flags & AMD_IS_APU)) {
+ /*
+ * Older HW and DGPU have issues with instant off;
+ * use a 2 frame offdelay.
+ */
offdelay = DIV64_U64_ROUND_UP((u64)20 *
timing->v_total *
timing->h_total,
@@ -8436,33 +9307,42 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
config.offdelay_ms = offdelay ?: 30;
} else {
+ /* offdelay_ms = 0 will never disable vblank */
+ config.offdelay_ms = 1;
config.disable_immediate = true;
}
drm_crtc_vblank_on_config(&acrtc->base,
&config);
-
- amdgpu_irq_get(
- adev,
- &adev->pageflip_irq,
- irq_type);
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_get(
- adev,
- &adev->vline0_irq,
- irq_type);
+ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
#endif
+ }
+
} else {
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_put(
- adev,
- &adev->vline0_irq,
- irq_type);
+ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
#endif
- amdgpu_irq_put(
- adev,
- &adev->pageflip_irq,
- irq_type);
+ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
+ }
+
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -8818,7 +9698,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
@@ -8826,7 +9706,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
}
}
@@ -8913,13 +9793,13 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
if (crtc_state->stream) {
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
- DRM_ERROR("DC failed to set cursor attributes\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n");
update->cursor_attributes = &crtc_state->stream->cursor_attributes;
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
- DRM_ERROR("DC failed to set cursor position\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor position\n");
update->cursor_position = &crtc_state->stream->cursor_position;
}
@@ -8933,6 +9813,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
@@ -8959,14 +9840,15 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
- if (acrtc_attach->dm_irq_params.allow_sr_entry &&
+ if (!vrr_active &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
- if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
+ if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -9137,7 +10019,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
timestamp_ns;
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
mutex_unlock(&dm->dc_lock);
}
}
@@ -9168,7 +10050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (!bundle->surface_updates[planes_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
+ drm_err(dev, "No surface for CRTC: id=%d\n",
acrtc_attach->crtc_id);
continue;
}
@@ -9303,11 +10185,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
}
mutex_unlock(&dm->dc_lock);
@@ -9684,20 +10566,20 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
if (!wb_info) {
- DRM_ERROR("Failed to allocate wb_info\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n");
return;
}
acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
if (!acrtc) {
- DRM_ERROR("no amdgpu_crtc found\n");
+ drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n");
kfree(wb_info);
return;
}
afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
if (!afb) {
- DRM_ERROR("No amdgpu_framebuffer found\n");
+ drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n");
kfree(wb_info);
return;
}
@@ -9768,69 +10650,40 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
drm_writeback_queue_job(wb_conn, new_con_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state)
{
+ struct drm_connector_state *old_con_state, *new_con_state;
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL;
- u32 i, j;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- amdgpu_dm_commit_streams(state, dc_state);
- }
+ if (!adev->dm.hdcp_workqueue)
+ return;
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
struct amdgpu_dm_connector *aconnector;
- if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
-
- if (!connector)
- continue;
+ drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i);
- pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
connector->index, connector->status, connector->dpms);
- pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n",
old_con_state->content_protection, new_con_state->content_protection);
if (aconnector->dc_sink) {
if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
- pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n",
aconnector->dc_sink->edid_caps.display_name);
}
}
@@ -9844,7 +10697,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
if (old_crtc_state)
- pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
old_crtc_state->enable,
old_crtc_state->active,
old_crtc_state->mode_changed,
@@ -9852,29 +10705,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
old_crtc_state->connectors_changed);
if (new_crtc_state)
- pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
new_crtc_state->enable,
new_crtc_state->active,
new_crtc_state->mode_changed,
new_crtc_state->active_changed,
new_crtc_state->connectors_changed);
- }
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- new_crtc_state = NULL;
- old_crtc_state = NULL;
-
- if (acrtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -9918,7 +10755,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -9926,6 +10763,78 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
+}
+
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int i, ret;
+
+ ret = drm_dp_mst_atomic_setup_commit(state);
+ if (ret)
+ return ret;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream &&
+ (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret) {
+ drm_dbg_atomic(state->dev, "Failed to update color state\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ amdgpu_dm_update_hdcp(state);
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -9936,7 +10845,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
- bool abm_changed, hdr_changed, scaling_changed;
+ bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false;
memset(&stream_update, 0, sizeof(stream_update));
@@ -9955,13 +10864,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
scaling_changed = is_scaling_state_different(dm_new_con_state,
dm_old_con_state);
+ if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) &&
+ (dm_old_crtc_state->stream->output_color_space !=
+ get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state)))
+ output_color_space_changed = true;
+
abm_changed = dm_new_crtc_state->abm_level !=
dm_old_crtc_state->abm_level;
hdr_changed =
!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
- if (!scaling_changed && !abm_changed && !hdr_changed)
+ if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed)
continue;
stream_update.stream = dm_new_crtc_state->stream;
@@ -9973,6 +10887,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
stream_update.dst = dm_new_crtc_state->stream->dst;
}
+ if (output_color_space_changed) {
+ dm_new_crtc_state->stream->output_color_space
+ = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state);
+
+ stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space;
+ }
+
if (abm_changed) {
dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
@@ -9996,9 +10917,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
- dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL);
if (!dummy_updates) {
- DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
}
for (j = 0; j < status->plane_count; j++)
@@ -10016,6 +10937,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
&stream_update);
mutex_unlock(&dm->dc_lock);
kfree(dummy_updates);
+
+ drm_connector_update_privacy_screen(new_con_state);
}
/**
@@ -10066,14 +10989,20 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
- spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- acrtc->dm_irq_params.window_param.update_win = true;
+ uint8_t cnt;
- /**
- * It takes 2 frames for HW to stably generate CRC when
- * resuming from suspend, so we set skip_frame_cnt 2.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
+ if (acrtc->dm_irq_params.window_param[cnt].enable) {
+ acrtc->dm_irq_params.window_param[cnt].update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2;
+ }
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -10201,16 +11130,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
*/
conn_state = drm_atomic_get_connector_state(state, connector);
- ret = PTR_ERR_OR_ZERO(conn_state);
- if (ret)
+ /* Check for error in getting connector state */
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
goto out;
+ }
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
+ /* Check for error in getting crtc state */
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
goto out;
+ }
/* force a restore */
crtc_state->mode_changed = true;
@@ -10218,9 +11151,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
/* Attach plane to drm_atomic_state */
plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (ret)
+ /* Check for error in getting plane state */
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
goto out;
+ }
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
@@ -10228,7 +11163,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
out:
drm_atomic_state_put(state);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(ddev, "Restoring old state failed with %i\n", ret);
return ret;
}
@@ -10312,7 +11247,7 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n",
crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
@@ -10358,6 +11293,8 @@ static void get_freesync_config_for_crtc(
} else {
config.state = VRR_STATE_INACTIVE;
}
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
}
out:
new_crtc_state->freesync_config = config;
@@ -10428,6 +11365,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct dm_atomic_state *dm_state = NULL;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct dc_stream_state *new_stream;
+ struct amdgpu_device *adev = dm->adev;
int ret = 0;
/*
@@ -10457,8 +11395,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
connector);
- if (IS_ERR(drm_new_conn_state)) {
- ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ if (WARN_ON(!drm_new_conn_state)) {
+ ret = -EINVAL;
goto fail;
}
@@ -10468,7 +11406,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- new_stream = create_validate_stream_for_sink(aconnector,
+ new_stream = create_validate_stream_for_sink(connector,
&new_crtc_state->mode,
dm_new_conn_state,
dm_old_crtc_state->stream);
@@ -10481,7 +11419,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
*/
if (!new_stream) {
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n",
__func__, acrtc->base.base.id);
ret = -ENOMEM;
goto fail;
@@ -10519,7 +11457,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d",
new_crtc_state->mode_changed);
}
}
@@ -10557,7 +11495,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER(
+ drm_dbg_driver(adev_to_drm(adev),
"Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
@@ -10578,7 +11516,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (ret)
goto fail;
- DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n",
crtc->base.id);
/* i.e. reset mode */
@@ -10674,7 +11612,7 @@ skip_modeset:
if (dm_new_crtc_state->base.color_mgmt_changed ||
dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
- ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true);
if (ret)
goto fail;
}
@@ -10711,6 +11649,9 @@ static bool should_reset_plane(struct drm_atomic_state *state,
state->allow_modeset)
return true;
+ if (amdgpu_in_reset(adev) && state->allow_modeset)
+ return true;
+
/* Exit early if we know that we're adding or removing the plane. */
if (old_plane_state->crtc != new_plane_state->crtc)
return true;
@@ -11161,8 +12102,8 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
int plane_src_w, plane_src_h;
dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
- *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
- *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+ *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
+ *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
}
/*
@@ -11416,6 +12357,30 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
+static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state, *old_plane_state;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ old_plane_state = drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
+ drm_err(dev, "Failed to get plane state for plane %s\n", plane->name);
+ return false;
+ }
+
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ return true;
+ }
+
+ return false;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
@@ -11613,10 +12578,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
- if (old_plane_state->fb && new_plane_state->fb &&
- get_mem_type(old_plane_state->fb) !=
- get_mem_type(new_plane_state->fb))
- lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
@@ -11854,7 +12815,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
- status = dc_validate_global_state(dc, dm_state->context, true);
+ status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY);
if (status != DC_OK) {
drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
@@ -11883,7 +12844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int j = state->num_private_objs-1;
dm_atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
@@ -11894,7 +12855,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
state->private_objs[j];
state->private_objs[j].ptr = NULL;
- state->private_objs[j].state = NULL;
+ state->private_objs[j].state_to_destroy = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
@@ -11911,9 +12872,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/*
* Only allow async flips for fast updates that don't change
- * the FB pitch, the DCC state, rotation, etc.
+ * the FB pitch, the DCC state, rotation, mem_type, etc.
*/
- if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ if (new_crtc_state->async_flip &&
+ (lock_and_validation_needed ||
+ amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
@@ -11975,7 +12938,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
- DRM_ERROR("EDID CEA parser failed\n");
+ drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n");
return false;
}
@@ -11983,7 +12946,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
if (output->type == DMUB_CMD__EDID_CEA_ACK) {
if (!output->ack.success) {
- DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n",
output->ack.offset);
}
} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
@@ -11995,7 +12958,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
} else {
- DRM_WARN("Unknown EDID CEA parser results\n");
+ drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
return false;
}
@@ -12211,7 +13174,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
- DRM_ERROR("%s - Connector has no state", __func__);
+ drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__);
goto update;
}
@@ -12233,7 +13196,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
dm_con_state = to_dm_connector_state(connector->state);
- if (!adev->dm.freesync_module)
+ if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version))
goto update;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
@@ -12245,10 +13208,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
- amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
- amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
- if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
- freesync_capable = true;
+ if (amdgpu_dm_connector->dc_link &&
+ amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+ }
+
parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (vsdb_info.replay_mode) {
@@ -12392,7 +13359,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
}
if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
*operation_result = AUX_RET_ERROR_TIMEOUT;
goto out;
}
@@ -12402,31 +13369,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
- if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
- DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
+ drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
}
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ *operation_result = p_notify->result;
goto out;
}
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
+ if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
+ /* The reply is stored in the top nibble of the command. */
+ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
- payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
- if (!payload->write && p_notify->aux_reply.length &&
- (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
-
- if (payload->length != p_notify->aux_reply.length) {
- DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
- p_notify->aux_reply.length,
- payload->address, payload->length);
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
- goto out;
- }
-
+ /*write req may receive a byte indicating partially written number as well*/
+ if (p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
- }
/* success */
ret = p_notify->aux_reply.length;
@@ -12437,6 +13397,79 @@ out:
return ret;
}
+static void abort_fused_io(
+ struct dc_context *ctx,
+ const struct dmub_cmd_fused_request *request
+)
+{
+ union dmub_rb_cmd command = { 0 };
+ struct dmub_rb_cmd_fused_io *io = &command.fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT;
+ io->header.payload_bytes = sizeof(*io) - sizeof(io->header);
+ io->request = *request;
+ dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+static bool execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io))
+ return false;
+
+ struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line];
+ struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io;
+ const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ && first->header.ret_status
+ && first->request.status == FUSED_REQUEST_STATUS_SUCCESS;
+
+ if (!result)
+ return false;
+
+ while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) {
+ reinit_completion(&sync->replied);
+
+ struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data;
+
+ static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch");
+
+ if (reply->identifier == first->request.identifier) {
+ first->request = *reply;
+ return true;
+ }
+ }
+
+ reinit_completion(&sync->replied);
+ first->request.status = FUSED_REQUEST_STATUS_TIMEOUT;
+ abort_fused_io(ctx, &first->request);
+ return false;
+}
+
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us)
+{
+ struct amdgpu_display_manager *dm = &dev->dm;
+
+ mutex_lock(&dm->dpia_aux_lock);
+
+ const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us);
+
+ mutex_unlock(&dm->dpia_aux_lock);
+ return result;
+}
+
int amdgpu_dm_process_dmub_set_config_sync(
struct dc_context *ctx,
unsigned int link_index,
@@ -12455,7 +13488,7 @@ int amdgpu_dm_process_dmub_set_config_sync(
ret = 0;
*operation_result = adev->dm.dmub_notify->sc_status;
} else {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
ret = -1;
*operation_result = SET_CONFIG_UNKNOWN_ERROR;
}
@@ -12475,3 +13508,10 @@ bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
{
return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
}
+
+void dm_acpi_process_phy_transition_interlock(
+ const struct dc_context *ctx,
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params)
+{
+ // Not yet implemented
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index e46e1365fe91..ef97cede9926 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
@@ -50,7 +51,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 7
+#define AMDGPU_DMUB_NOTIFICATION_MAX 8
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
@@ -58,6 +59,7 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
+#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -81,6 +83,7 @@ struct amdgpu_bo;
struct dmub_srv;
struct dc_plane_state;
struct dmub_notification;
+struct dmub_cmd_fused_request;
struct amd_vsdb_block {
unsigned char ieee_id[3];
@@ -152,6 +155,32 @@ struct idle_workqueue {
};
/**
+ * struct vupdate_offload_work - Work data for offloading task from vupdate handler
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+ struct work_struct work;
+ struct amdgpu_device *adev;
+ struct dc_stream_state *stream;
+ struct dc_crtc_timing_adjust *adjust;
+};
+
+#define MAX_LUMINANCE_DATA_POINTS 99
+
+/**
+ * struct amdgpu_dm_luminance_data - Custom luminance data
+ * @luminance: Luminance in percent
+ * @input_signal: Input signal in range 0-255
+ */
+struct amdgpu_dm_luminance_data {
+ u8 luminance;
+ u8 input_signal;
+} __packed;
+
+/**
* struct amdgpu_dm_backlight_caps - Information about backlight
*
* Describe the backlight support for ACPI or eDP AUX.
@@ -188,6 +217,11 @@ struct amdgpu_dm_backlight_caps {
*/
bool aux_support;
/**
+ * @brightness_mask: After deriving brightness, OR it with this mask.
+ * Workaround for panels with issues with certain brightness values.
+ */
+ u32 brightness_mask;
+ /**
* @ac_level: the default brightness if booted on AC
*/
u8 ac_level;
@@ -195,6 +229,14 @@ struct amdgpu_dm_backlight_caps {
* @dc_level: the default brightness if booted on DC
*/
u8 dc_level;
+ /**
+ * @data_points: the number of custom luminance data points
+ */
+ u8 data_points;
+ /**
+ * @luminance_data: custom luminance data
+ */
+ struct amdgpu_dm_luminance_data luminance_data[MAX_LUMINANCE_DATA_POINTS];
};
/**
@@ -256,6 +298,10 @@ struct hpd_rx_irq_offload_work {
* @offload_wq: offload work queue that this work is queued to
*/
struct hpd_rx_irq_offload_work_queue *offload_wq;
+ /**
+ * @adev: amdgpu_device pointer
+ */
+ struct amdgpu_device *adev;
};
/**
@@ -594,6 +640,13 @@ struct amdgpu_display_manager {
bool aux_hpd_discon_quirk;
/**
+ * @edp0_on_dp1_quirk:
+ *
+ * quirk for platforms that put edp0 on DP1.
+ */
+ bool edp0_on_dp1_quirk;
+
+ /**
* @dpia_aux_lock:
*
* Guards access to DPIA AUX
@@ -604,8 +657,26 @@ struct amdgpu_display_manager {
* @bb_from_dmub:
*
* Bounding box data read from dmub during early initialization for DCN4+
+ * Data is stored as a byte array that should be casted to the appropriate bb struct
*/
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
+
+ /**
+ * @oem_i2c:
+ *
+ * OEM i2c bus
+ */
+ struct amdgpu_i2c_adapter *oem_i2c;
+
+ /**
+ * @fused_io:
+ *
+ * dmub fused io interface
+ */
+ struct fused_io_sync {
+ struct completion replied;
+ char reply_data[0x40]; // Cannot include dmub_cmd here
+ } fused_io[8];
};
enum dsc_clock_force_state {
@@ -671,6 +742,8 @@ struct amdgpu_dm_connector {
uint32_t connector_id;
int bl_idx;
+ struct cec_notifier *notifier;
+
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
const struct drm_edid *drm_edid;
@@ -697,8 +770,13 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ uint32_t mst_local_bw;
+ uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
+ /* branch device specific data */
+ uint32_t branch_ieee_oui;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -722,6 +800,7 @@ struct amdgpu_dm_connector {
bool fake_enable;
bool force_yuv420_output;
+ bool force_yuv422_output;
struct dsc_preferred_settings dsc_settings;
union dp_downstream_port_present mst_downstream_port_present;
/* Cached display modes */
@@ -741,6 +820,11 @@ struct amdgpu_dm_connector {
bool pack_sdp_v1_3;
enum adaptive_sync_type as_type;
struct amdgpu_hdmi_vsdb_info vsdb_info;
+
+ /* HDMI HPD debounce support */
+ unsigned int hdmi_hpd_debounce_delay_ms;
+ struct delayed_work hdmi_hpd_debounce_work;
+ struct dc_sink *hdmi_prev_sink;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
@@ -915,6 +999,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
bool update_hdcp;
+ bool abm_sysfs_forbidden;
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
@@ -945,7 +1030,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
int link_index);
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector);
@@ -969,6 +1054,8 @@ void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_create_color_properties(struct amdgpu_device *adev);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state);
@@ -981,11 +1068,19 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
struct aux_payload *payload, enum aux_return_code_type *operation_result);
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
struct dc_stream_state *
- create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ create_validate_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream);
@@ -1010,4 +1105,10 @@ void dm_free_gpu_mem(struct amdgpu_device *adev,
bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+
+void retrieve_dmi_info(struct amdgpu_display_manager *dm);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index ebabfe3a512f..1dcc79b35225 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -25,13 +26,39 @@
#include "amdgpu.h"
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
+#include "amdgpu_dm_colorop.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
-#include "basics/conversion.h"
/**
* DOC: overview
*
+ * We have three types of color management in the AMD display driver.
+ * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties
+ * 2. AMD driver private color management on &drm_plane and &drm_crtc
+ * 3. AMD plane color pipeline
+ *
+ * The CRTC properties are the original color management. When they were
+ * implemented per-plane color management was not a thing yet. Because
+ * of that we could get away with plumbing the DEGAMMA and CTM
+ * properties to pre-blending HW functions. This is incompatible with
+ * per-plane color management, such as via the AMD private properties or
+ * the new drm_plane color pipeline. The only compatible CRTC property
+ * with per-plane color management is the GAMMA property as it is
+ * applied post-blending.
+ *
+ * The AMD driver private color management properties are only exposed
+ * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They
+ * are temporary building blocks on the path to full-fledged &drm_plane
+ * and &drm_crtc color pipelines and lay the driver's groundwork for the
+ * color pipelines.
+ *
+ * The AMD plane color pipeline describes AMD's &drm_colorops via the
+ * &drm_plane's COLOR_PIPELINE property.
+ *
+ * drm_crtc Properties
+ * -------------------
+ *
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@@ -42,36 +69,93 @@
* - Surface regamma LUT (normalized)
* - Output CSC (normalized)
*
- * But these aren't a direct mapping to DRM color properties. The current DRM
- * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware
- * is essentially giving:
+ * But these aren't a direct mapping to DRM color properties. The
+ * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma
+ * while our hardware is essentially giving:
*
* Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM
*
- * The input gamma LUT block isn't really applicable here since it operates
- * on the actual input data itself rather than the HW fp representation. The
- * input and output CSC blocks are technically available to use as part of
- * the DC interface but are typically used internally by DC for conversions
- * between color spaces. These could be blended together with user
- * adjustments in the future but for now these should remain untouched.
+ * The input gamma LUT block isn't really applicable here since it
+ * operates on the actual input data itself rather than the HW fp
+ * representation. The input and output CSC blocks are technically
+ * available to use as part of the DC interface but are typically used
+ * internally by DC for conversions between color spaces. These could be
+ * blended together with user adjustments in the future but for now
+ * these should remain untouched.
+ *
+ * The pipe blending also happens after these blocks so we don't
+ * actually support any CRTC props with correct blending with multiple
+ * planes - but we can still support CRTC color management properties in
+ * DM in most single plane cases correctly with clever management of the
+ * DC interface in DM.
+ *
+ * As per DRM documentation, blocks should be in hardware bypass when
+ * their respective property is set to NULL. A linear DGM/RGM LUT should
+ * also considered as putting the respective block into bypass mode.
+ *
+ * This means that the following configuration is assumed to be the
+ * default:
+ *
+ * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC
+ * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
+ *
+ * AMD Private Color Management on drm_plane
+ * -----------------------------------------
+ *
+ * The AMD private color management properties on a &drm_plane are:
+ *
+ * - AMD_PLANE_DEGAMMA_LUT
+ * - AMD_PLANE_DEGAMMA_LUT_SIZE
+ * - AMD_PLANE_DEGAMMA_TF
+ * - AMD_PLANE_HDR_MULT
+ * - AMD_PLANE_CTM
+ * - AMD_PLANE_SHAPER_LUT
+ * - AMD_PLANE_SHAPER_LUT_SIZE
+ * - AMD_PLANE_SHAPER_TF
+ * - AMD_PLANE_LUT3D
+ * - AMD_PLANE_LUT3D_SIZE
+ * - AMD_PLANE_BLEND_LUT
+ * - AMD_PLANE_BLEND_LUT_SIZE
+ * - AMD_PLANE_BLEND_TF
+ *
+ * The AMD private color management property on a &drm_crtc is:
*
- * The pipe blending also happens after these blocks so we don't actually
- * support any CRTC props with correct blending with multiple planes - but we
- * can still support CRTC color management properties in DM in most single
- * plane cases correctly with clever management of the DC interface in DM.
+ * - AMD_CRTC_REGAMMA_TF
*
- * As per DRM documentation, blocks should be in hardware bypass when their
- * respective property is set to NULL. A linear DGM/RGM LUT should also
- * considered as putting the respective block into bypass mode.
+ * Use of these properties is discouraged.
*
- * This means that the following
- * configuration is assumed to be the default:
+ * AMD plane color pipeline
+ * ------------------------
+ *
+ * The AMD &drm_plane color pipeline is advertised for DCN generations
+ * 3.0 and newer. It exposes these elements in this order:
+ *
+ * 1. 1D curve colorop
+ * 2. Multiplier
+ * 3. 3x4 CTM
+ * 4. 1D curve colorop
+ * 5. 1D LUT
+ * 6. 3D LUT
+ * 7. 1D curve colorop
+ * 8. 1D LUT
+ *
+ * The multiplier (#2) is a simple multiplier that is applied to all
+ * channels.
+ *
+ * The 3x4 CTM (#3) is a simple 3x4 matrix.
+ *
+ * #1, and #7 are non-linear to linear curves. #4 is a linear to
+ * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or
+ * their inverse.
+ *
+ * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs.
+ *
+ * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT.
*
- * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ...
- * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
*/
#define MAX_DRM_LUT_VALUE 0xFFFF
+#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF
#define SDR_WHITE_LEVEL_INIT_VALUE 80
/**
@@ -342,6 +426,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
}
/**
+ * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob.
+ * @blob: DRM color mgmt property blob
+ * @size: lut size
+ *
+ * Returns:
+ * DRM LUT or NULL
+ */
+static const struct drm_color_lut32 *
+__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size)
+{
+ *size = blob ? drm_color_lut32_size(blob) : 0;
+ return blob ? (struct drm_color_lut32 *)blob->data : NULL;
+}
+
+/**
* __is_lut_linear - check if the given lut is a linear mapping of values
* @lut: given lut to check values
* @size: lut size
@@ -415,6 +514,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
/**
+ * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma.
+ * @lut: DRM lookup table for color conversion
+ * @gamma: DC gamma to set entries
+ *
+ * The conversion depends on the size of the lut - whether or not it's legacy.
+ */
+static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma)
+{
+ int i;
+
+ for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
+ gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE);
+ gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE);
+ gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE);
+ }
+}
+
+/**
* __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
* @ctm: DRM color transformation matrix
* @matrix: DC CSC float matrix
@@ -566,12 +683,68 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
+/**
+ * __set_output_tf_32 - calculates the output transfer function based on expected input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_output_tf_32(struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size,
+ bool has_rom)
+{
+ struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
+ bool res;
+
+ cal_buffer.buffer_index = -1;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ if (func->tf == TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * Color module doesn't like calculating regamma params
+ * on top of a linear input. But degamma params can be used
+ * instead to simulate this.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CUSTOM;
+ res = mod_color_calculate_degamma_params(NULL, func,
+ gamma, gamma != NULL);
+ } else {
+ /*
+ * Assume sRGB. The actual mapping will depend on whether the
+ * input was legacy or not.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CS_TFM_1D;
+ res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
+ has_rom, NULL, &cal_buffer);
+ }
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
+
+static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -639,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f
return res ? 0 : -ENOMEM;
}
+/**
+ * __set_input_tf_32 - calculates the input transfer function based on expected
+ * input space.
+ * @caps: dc color capabilities
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut.
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size)
+{
+ struct dc_gamma *gamma = NULL;
+ bool res;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->type = GAMMA_CUSTOM;
+ gamma->num_entries = lut_size;
+
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
static enum dc_transfer_func_predefined
amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
{
@@ -668,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
}
}
+static enum dc_transfer_func_predefined
+amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf)
+{
+ switch (tf) {
+ case DRM_COLOROP_1D_CURVE_SRGB_EOTF:
+ case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
+ return TRANSFER_FUNCTION_SRGB;
+ case DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
+ case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
+ return TRANSFER_FUNCTION_PQ;
+ case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
+ case DRM_COLOROP_1D_CURVE_BT2020_OETF:
+ return TRANSFER_FUNCTION_BT709;
+ case DRM_COLOROP_1D_CURVE_GAMMA22:
+ case DRM_COLOROP_1D_CURVE_GAMMA22_INV:
+ return TRANSFER_FUNCTION_GAMMA22;
+ default:
+ return TRANSFER_FUNCTION_LINEAR;
+ }
+}
+
static void __to_dc_lut3d_color(struct dc_rgb *rgb,
const struct drm_color_lut lut,
int bit_precision)
@@ -721,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
__to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
}
+static void __to_dc_lut3d_32_color(struct dc_rgb *rgb,
+ const struct drm_color_lut32 lut,
+ int bit_precision)
+{
+ rgb->red = drm_color_lut32_extract(lut.red, bit_precision);
+ rgb->green = drm_color_lut32_extract(lut.green, bit_precision);
+ rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision);
+}
+
+static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut,
+ uint32_t lut3d_size,
+ struct tetrahedral_params *params,
+ bool use_tetrahedral_9,
+ int bit_depth)
+{
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_i, i;
+
+
+ if (use_tetrahedral_9) {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ } else {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ }
+
+ for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
+ /*
+ * We should consider the 3D LUT RGB values are distributed
+ * along four arrays lut0-3 where the first sizes 1229 and the
+ * other 1228. The bit depth supported for 3dlut channel is
+ * 12-bit, but DC also supports 10-bit.
+ *
+ * TODO: improve color pipeline API to enable the userspace set
+ * bit depth and 3D LUT size/stride, as specified by VA-API.
+ */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+ __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth);
+ __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth);
+ __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth);
+ }
+ /* lut0 has 1229 points (lut_size/4 + 1) */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+}
+
/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream
* @drm_lut3d: user 3D LUT
* @drm_lut3d_size: size of 3D LUT
@@ -821,7 +1104,7 @@ int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
- bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut;
+ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
/* shaper LUT is only available if 3D LUT color caps */
exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
@@ -885,33 +1168,33 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
}
/**
- * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * amdgpu_dm_check_crtc_color_mgmt: Check if DRM color props are programmable by DC.
* @crtc: amdgpu_dm crtc state
+ * @check_only: only check color state without update dc stream
*
- * With no plane level color management properties we're free to use any
- * of the HW blocks as long as the CRTC CTM always comes before the
- * CRTC RGM and after the CRTC DGM.
- *
- * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * This function just verifies CRTC LUT sizes, if there is enough space for
+ * output transfer function and if its parameters can be calculated by AMD
+ * color module. It also adjusts some settings for programming CRTC degamma at
+ * plane stage, using plane DGM block.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
*
* For supporting both plane level color management and CRTC level color
- * management at once we have to either restrict the usage of CRTC properties
- * or blend adjustments together.
+ * management at once we have to either restrict the usage of some CRTC
+ * properties or blend adjustments together.
*
* Returns:
- * 0 on success. Error code if setup fails.
+ * 0 on success. Error code if validation fails.
*/
-int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only)
{
struct dc_stream_state *stream = crtc->stream;
struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
bool has_rom = adev->asic_type <= CHIP_RAVEN;
- struct drm_color_ctm *ctm = NULL;
+ struct dc_transfer_func *out_tf;
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
@@ -940,6 +1223,14 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
crtc->cm_has_degamma = false;
crtc->cm_is_degamma_srgb = false;
+ if (check_only) {
+ out_tf = kvzalloc(sizeof(*out_tf), GFP_KERNEL);
+ if (!out_tf)
+ return -ENOMEM;
+ } else {
+ out_tf = &stream->out_transfer_func;
+ }
+
/* Setup regamma and degamma. */
if (is_legacy) {
/*
@@ -954,8 +1245,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ out_tf->tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,16 +1254,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(out_tf, regamma_lut,
regamma_size, has_rom);
- if (r)
- return r;
} else {
regamma_size = has_regamma ? regamma_size : 0;
- r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut,
+ r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut,
regamma_size, has_rom, tf);
- if (r)
- return r;
}
/*
@@ -981,6 +1268,43 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* have to place the CTM in the OCSC in that case.
*/
crtc->cm_has_degamma = has_degamma;
+ if (check_only)
+ kvfree(out_tf);
+
+ return r;
+}
+
+/**
+ * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * With no plane level color management properties we're free to use any
+ * of the HW blocks as long as the CRTC CTM always comes before the
+ * CRTC RGM and after the CRTC DGM.
+ *
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ *
+ * The RGM block is typically more fully featured and accurate across
+ * all ASICs - DCE can't support a custom non-linear CRTC DGM.
+ *
+ * For supporting both plane level color management and CRTC level color
+ * management at once we have to either restrict the usage of CRTC properties
+ * or blend adjustments together.
+ *
+ * Returns:
+ * 0 on success. Error code if setup fails.
+ */
+int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+{
+ struct dc_stream_state *stream = crtc->stream;
+ struct drm_color_ctm *ctm = NULL;
+ int ret;
+
+ ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false);
+ if (ret)
+ return ret;
/* Setup CRTC CTM. */
if (crtc->base.ctm) {
@@ -1138,6 +1462,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
}
static int
+__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state,
+ struct drm_colorop_state *colorop_state)
+{
+ struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func;
+ struct drm_colorop *colorop = colorop_state->colorop;
+ struct drm_device *drm = colorop->dev;
+
+ if (colorop->type != DRM_COLOROP_1D_CURVE)
+ return -EINVAL;
+
+ if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs))
+ return -EINVAL;
+
+ if (colorop_state->bypass) {
+ tf->type = TF_TYPE_BYPASS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ return 0;
+ }
+
+ drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id);
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ int i = 0;
+
+ old_colorop = colorop;
+
+ /* 1st op: 1d curve - degamma */
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (!colorop_state)
+ return -EINVAL;
+
+ return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state);
+}
+
+static int
+__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_property_blob *blob;
+ struct drm_color_ctm_3x4 *ctm = NULL;
+ int i = 0;
+
+ /* 3x4 matrix */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) {
+ drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id);
+ blob = colorop_state->data;
+ if (blob->length == sizeof(struct drm_color_ctm_3x4)) {
+ ctm = (struct drm_color_ctm_3x4 *) blob->data;
+ __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
+ dc_plane_state->gamut_remap_matrix.enable_remap = true;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ } else {
+ drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n",
+ blob->length, sizeof(struct drm_color_ctm_3x4));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ int i = 0;
+
+ /* Multiplier */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) {
+ drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id);
+ dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier);
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ const struct drm_color_lut32 *shaper_lut;
+ struct drm_device *dev = colorop->dev;
+ bool enabled = false;
+ u32 shaper_size;
+ int i = 0, ret = 0;
+
+ /* 1D Curve - SHAPER TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) {
+ drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf(tf, 0, 0, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+
+ /* 1D LUT - SHAPER LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) {
+ drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size);
+ shaper_size = shaper_lut != NULL ? shaper_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (shaper_size == colorop->size) {
+ ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+ }
+
+ if (!enabled)
+ tf->type = TF_TYPE_BYPASS;
+
+ return 0;
+}
+
+/* __set_colorop_3dlut - set DRM 3D LUT to DC stream
+ * @drm_lut3d: user 3D LUT
+ * @drm_lut3d_size: size of 3D LUT
+ * @lut3d: DC 3D LUT
+ *
+ * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it
+ * on DCN accordingly.
+ *
+ * Returns:
+ * 0 on success. -EINVAL if drm_lut3d_size is zero.
+ */
+static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d,
+ uint32_t drm_lut3d_size,
+ struct dc_3dlut *lut)
+{
+ if (!drm_lut3d_size) {
+ lut->state.bits.initialized = 0;
+ return -EINVAL;
+ }
+
+ /* Only supports 17x17x17 3D LUT (12-bit) now */
+ lut->lut_3d.use_12bits = true;
+ lut->lut_3d.use_tetrahedral_9 = false;
+
+ lut->state.bits.initialized = 1;
+ __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
+ lut->lut_3d.use_tetrahedral_9, 12);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct amdgpu_device *adev = drm_to_adev(colorop->dev);
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_color_lut32 *lut3d;
+ uint32_t lut3d_size;
+ int i = 0, ret = 0;
+
+ /* 3D LUT */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) {
+ if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ drm_dbg(dev, "3D LUT is not supported by hardware\n");
+ return -EINVAL;
+ }
+
+ drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id);
+ lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size);
+ lut3d_size = lut3d != NULL ? lut3d_size : 0;
+ ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
+ if (ret) {
+ drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n",
+ colorop->base.id, lut3d_size);
+ return ret;
+ }
+
+ /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve
+ * with TRANSFER_FUNCTION_LINEAR
+ */
+ if (tf->type == TF_TYPE_BYPASS) {
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf_32(tf, NULL, 0, false);
+ }
+ }
+
+ return ret;
+}
+
+static int
+__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->blend_tf;
+ const struct drm_color_lut32 *blend_lut = NULL;
+ struct drm_device *dev = colorop->dev;
+ uint32_t blend_size = 0;
+ int i = 0;
+
+ /* 1D Curve - BLND TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ /* 1D Curve - BLND LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size);
+ blend_size = blend_lut != NULL ? blend_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (blend_size == colorop->size)
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ return 0;
+}
+
+static int
amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
@@ -1187,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
return 0;
}
+static int
+amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct drm_colorop *colorop = plane_state->color_pipeline;
+ struct drm_device *dev = plane_state->plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+
+ /* 1D Curve - DEGAM TF */
+ if (!colorop)
+ return -EINVAL;
+
+ ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Multiplier */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no multiplier colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* 3x4 matrix */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3x4 matrix colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D Curve & LUT - SHAPER TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Shaper LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ /* 3D LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3D LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+ }
+
+ /* 1D Curve & LUT - BLND TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* BLND LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
* @crtc: amdgpu_dm crtc state
@@ -1283,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
}
+ if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state))
+ return 0;
+
return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
new file mode 100644
index 000000000000..d585618b8064
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drm_print.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_property.h>
+#include <drm/drm_colorop.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm_colorop.h"
+#include "dc.h"
+
+const u64 amdgpu_dm_supported_degam_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+const u64 amdgpu_dm_supported_shaper_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22);
+
+const u64 amdgpu_dm_supported_blnd_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+#define MAX_COLOR_PIPELINE_OPS 10
+
+#define LUT3D_SIZE 17
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list)
+{
+ struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+ int i = 0;
+
+ memset(ops, 0, sizeof(ops));
+
+ /* 1D curve - DEGAM TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_degam_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ list->type = ops[i]->base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
+
+ i++;
+
+ /* Multiplier */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3x4 matrix */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D curve - SHAPER TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_shaper_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - SHAPER LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3D LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE,
+ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+ }
+
+ /* 1D curve - BLND TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_blnd_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i - 1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - BLND LUT */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+ return 0;
+
+cleanup:
+ if (ret == -ENOMEM)
+ drm_err(plane->dev, "KMS: Failed to allocate colorop\n");
+
+ drm_colorop_pipeline_destroy(dev);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
index d72b3aae9a2b..2e1617ffc8ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +19,18 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-#include <nvif/class.h>
+#ifndef __AMDGPU_DM_COLOROP_H__
+#define __AMDGPU_DM_COLOROP_H__
-static const struct nvkm_engine_func
-ad102_nvdec = {
- .sclass = {
- { -1, -1, NVC9B0_VIDEO_DECODER },
- {}
- }
-};
+extern const u64 amdgpu_dm_supported_degam_tfs;
+extern const u64 amdgpu_dm_supported_shaper_tfs;
+extern const u64 amdgpu_dm_supported_blnd_tfs;
-int
-ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec);
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list);
- return -ENODEV;
-}
+#endif /* __AMDGPU_DM_COLOROP_H__*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 6fdc306a4a86..e20aa7438066 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -30,6 +31,7 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "amdgpu_securedisplay.h"
+#include "amdgpu_dm_psr.h"
static const char *const pipe_crc_sources[] = {
"none",
@@ -295,33 +297,41 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_st
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool was_activated;
struct amdgpu_dm_connector *aconnector;
+ bool was_activated;
uint8_t phy_id;
+ unsigned long flags;
+ int i;
- spin_lock_irq(&drm_dev->event_lock);
- was_activated = acrtc->dm_irq_params.window_param.activated;
- acrtc->dm_irq_params.window_param.x_start = 0;
- acrtc->dm_irq_params.window_param.y_start = 0;
- acrtc->dm_irq_params.window_param.x_end = 0;
- acrtc->dm_irq_params.window_param.y_end = 0;
- acrtc->dm_irq_params.window_param.activated = false;
- acrtc->dm_irq_params.window_param.update_win = false;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- spin_unlock_irq(&drm_dev->event_lock);
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ was_activated = acrtc->dm_irq_params.crc_window_activated;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ acrtc->dm_irq_params.window_param[i].x_start = 0;
+ acrtc->dm_irq_params.window_param[i].y_start = 0;
+ acrtc->dm_irq_params.window_param[i].x_end = 0;
+ acrtc->dm_irq_params.window_param[i].y_end = 0;
+ acrtc->dm_irq_params.window_param[i].enable = false;
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
+ }
+ acrtc->dm_irq_params.crc_window_activated = false;
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
/* Disable secure_display if it was enabled */
- if (was_activated) {
+ if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
/* stop ROI update on this crtc */
flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
-
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (aconnector && get_phy_id(dm, aconnector, &phy_id))
- dc_stream_forward_crc_window(stream, NULL, phy_id, true);
- else
+ if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
+ else
+ dc_stream_forward_crc_window(stream, NULL, phy_id, true);
+ } else {
DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
+ }
}
}
@@ -335,7 +345,11 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
struct amdgpu_dm_connector *aconnector;
uint8_t phy_inst;
struct amdgpu_display_manager *dm;
+ struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t roi_idx = 0;
int ret;
+ int i;
crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
crtc = crtc_ctx->crtc;
@@ -364,18 +378,36 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
}
mutex_unlock(&crtc->dev->mode_config.mutex);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
/* need lock for multiple crtcs to use the command buffer */
mutex_lock(&psp->securedisplay_context.mutex);
-
- psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
- TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
-
- securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
-
/* PSP TA is expected to finish data transmission over I2C within current frame,
* even there are up to 4 crtcs request to send in this frame.
*/
- ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (dm->secure_display_ctx.support_mul_roi) {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
+
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ if (crc_cpy[i].crc_ready)
+ roi_idx |= 1 << i;
+ }
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+ } else {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ }
if (!ret) {
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
@@ -393,6 +425,8 @@ amdgpu_dm_forward_crc_window(struct work_struct *work)
struct drm_crtc *crtc;
struct dc_stream_state *stream;
struct amdgpu_dm_connector *aconnector;
+ struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
uint8_t phy_id;
crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
@@ -416,9 +450,17 @@ amdgpu_dm_forward_crc_window(struct work_struct *work)
}
mutex_unlock(&crtc->dev->mode_config.mutex);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
mutex_lock(&dm->dc_lock);
- dc_stream_forward_crc_window(stream, &crtc_ctx->rect,
- phy_id, false);
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, roi_cpy,
+ phy_id, false);
+ else
+ dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
+ phy_id, false);
mutex_unlock(&dm->dc_lock);
}
@@ -429,7 +471,7 @@ bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
bool ret = false;
spin_lock_irq(&drm_dev->event_lock);
- ret = acrtc->dm_irq_params.window_param.activated;
+ ret = acrtc->dm_irq_params.crc_window_activated;
spin_unlock_irq(&drm_dev->event_lock);
return ret;
@@ -467,10 +509,14 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
+ /* For PSR1, check that the panel has exited PSR */
+ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ amdgpu_dm_psr_wait_disable(stream_state);
+
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, NULL, enable, enable)) {
+ stream_state, NULL, enable, enable, 0, true)) {
ret = -EINVAL;
goto unlock;
}
@@ -604,6 +650,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
+ /*
+ * Reading the CRC requires the vblank interrupt handler to be
+ * enabled. Keep a reference until CRC capture stops.
+ */
+ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+ }
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
@@ -614,16 +671,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- /*
- * Reading the CRC requires the vblank interrupt handler to be
- * enabled. Keep a reference until CRC capture stops.
- */
- enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
if (!enabled && enable) {
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto cleanup;
-
if (dm_is_crc_source_dprx(source)) {
if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
@@ -651,7 +699,8 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Initialize phy id mapping table for secure display*/
- if (!dm->secure_display_ctx.phy_mapping_updated)
+ if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
+ !dm->secure_display_ctx.phy_mapping_updated)
update_phy_id_mapping(adev);
#endif
@@ -709,7 +758,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
}
if (dm_is_crc_source_crtc(cur_crc_src)) {
- if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
&crcs[0], &crcs[1], &crcs[2]))
return;
@@ -726,7 +775,16 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_device *adev = NULL;
struct secure_display_crtc_context *crtc_ctx = NULL;
+ bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
+ uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
unsigned long flags1;
+ bool forward_roi_change = false;
+ bool notify_ta = false;
+ bool all_crc_ready = true;
+ struct dc_stream_state *stream_state;
+ int i;
if (crtc == NULL)
return;
@@ -734,21 +792,21 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
acrtc = to_amdgpu_crtc(crtc);
adev = drm_to_adev(crtc->dev);
drm_dev = crtc->dev;
+ stream_state = to_dm_crtc_state(crtc->state)->stream;
spin_lock_irqsave(&drm_dev->event_lock, flags1);
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
- !dm_is_crc_source_crtc(cur_crc_src))
- goto cleanup;
-
- if (!acrtc->dm_irq_params.window_param.activated)
- goto cleanup;
+ !dm_is_crc_source_crtc(cur_crc_src)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
+ }
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
+ if (!acrtc->dm_irq_params.crc_window_activated) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
}
crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
@@ -759,32 +817,110 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
crtc_ctx->crtc = crtc;
}
- if (acrtc->dm_irq_params.window_param.update_win) {
- /* prepare work for dmub to update ROI */
- crtc_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
- crtc_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
- crtc_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
- acrtc->dm_irq_params.window_param.x_start;
- crtc_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
- acrtc->dm_irq_params.window_param.y_start;
- schedule_work(&crtc_ctx->forward_roi_work);
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ struct crc_params crc_window = {
+ .windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ .windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ };
+
+ crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
+
+ if (!acrtc->dm_irq_params.window_param[i].enable) {
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- acrtc->dm_irq_params.window_param.update_win = false;
+ if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- /* Statically skip 1 frame, because we may need to wait below things
- * before sending ROI to dmub:
- * 1. We defer the work by using system workqueue.
- * 2. We may need to wait for dc_lock before accessing dmub.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
+ if (acrtc->dm_irq_params.window_param[i].update_win) {
+ crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
+ crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
+ crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
+ crc_window.windowa_y_start;
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to dmub to update ROI */
+ forward_roi_change = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* update ROI via dm*/
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ reset_crc_frame_count[i] = true;
+
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+
+ /* Statically skip 1 frame, because we may need to wait below things
+ * before sending ROI to dmub:
+ * 1. We defer the work by using system workqueue.
+ * 2. We may need to wait for dc_lock before accessing dmub.
+ */
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ } else {
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
+ &crc_r[i], &crc_g[i], &crc_b[i]))
+ DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to psp to read ROI/CRC and output via I2C */
+ notify_ta = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* Avoid ROI window get changed, keep overwriting. */
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ /* crc ready for psp to read out */
+ crtc_ctx->crc_info.crc[i].crc_ready = true;
+ }
+ }
- } else {
- /* prepare work for psp to read ROI/CRC and send to I2C */
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+
+ if (forward_roi_change)
+ schedule_work(&crtc_ctx->forward_roi_work);
+
+ if (notify_ta)
schedule_work(&crtc_ctx->notify_ta_work);
+
+ spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
+ crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
+ crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
+
+ if (!crtc_ctx->roi[i].enable) {
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ continue;
+ }
+
+ if (!crtc_ctx->crc_info.crc[i].crc_ready)
+ all_crc_ready = false;
+
+ if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
+ /* Reset the reference frame count after user update the ROI
+ * or it reaches the maximum value.
+ */
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ else
+ crtc_ctx->crc_info.crc[i].frame_count += 1;
}
+ spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
-cleanup:
- spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ if (all_crc_ready)
+ complete_all(&crtc_ctx->crc_info.completion);
}
void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
@@ -805,9 +941,11 @@ void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
+ spin_lock_init(&crtc_ctx[i].crc_info.lock);
}
adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
- return;
+
+ adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 4ec2510bc312..95bdb8699d7f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
@@ -42,6 +43,14 @@ enum amdgpu_dm_pipe_crc_source {
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
#define MAX_CRTC 6
+enum secure_display_mode {
+ /* via dmub + psp */
+ LEGACY_MODE = 0,
+ /* driver directly */
+ DISPLAY_CRC_MODE,
+ SECURE_DISPLAY_MODE_MAX,
+};
+
struct phy_id_mapping {
bool assigned;
bool is_mst;
@@ -51,13 +60,27 @@ struct phy_id_mapping {
u8 rad[8];
};
+struct crc_data {
+ uint32_t crc_R;
+ uint32_t crc_G;
+ uint32_t crc_B;
+ uint32_t frame_count;
+ bool crc_ready;
+};
+
+struct crc_info {
+ struct crc_data crc[MAX_CRC_WINDOW_NUM];
+ struct completion completion;
+ spinlock_t lock;
+};
+
struct crc_window_param {
uint16_t x_start;
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
/* CRC window is activated or not*/
- bool activated;
+ bool enable;
/* Update crc window during vertical blank or not */
bool update_win;
/* skip reading/writing for few frames */
@@ -74,13 +97,17 @@ struct secure_display_crtc_context {
struct drm_crtc *crtc;
/* Region of Interest (ROI) */
- struct rect rect;
+ struct crc_window roi[MAX_CRC_WINDOW_NUM];
+
+ struct crc_info crc_info;
};
struct secure_display_context {
struct secure_display_crtc_context *crtc_ctx;
-
+ /* Whether dmub support multiple ROI setting */
+ bool support_mul_roi;
+ enum secure_display_mode op_mode;
bool phy_mapping_updated;
int phy_id_mapping_cnt;
struct phy_id_mapping phy_id_mapping[MAX_CRTC];
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 64a041c2af05..697e232acebf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
*
* Panel Replay and PSR SU
* - Enable when:
+ * - VRR is disabled
* - vblank counter is disabled
* - entry is allowed: usermode demonstrates an adequate number of fast
* commits)
@@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
bool is_sr_active = (link->replay_settings.replay_allow_active ||
link->psr_settings.psr_allow_active);
bool is_crc_window_active = false;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
is_crc_window_active =
amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
#endif
- if (link->replay_settings.replay_feature_enabled &&
+ if (link->replay_settings.replay_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
- } else if (link->psr_settings.psr_feature_enabled &&
+ amdgpu_dm_psr_disable(vblank_work->stream, false);
+ } else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
struct amdgpu_dm_connector *aconn =
@@ -216,8 +218,10 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
break;
}
- if (idle_work->enable)
+ if (idle_work->enable) {
+ dc_post_update_surfaces_to_stream(idle_work->dm->dc);
dc_allow_idle_optimizations(idle_work->dm->dc, true);
+ }
mutex_unlock(&idle_work->dm->dc_lock);
}
idle_work->dm->idle_workqueue->running = false;
@@ -244,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -271,9 +277,20 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
dc_allow_idle_optimizations(dm->dc, true);
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
+ }
+
mutex_unlock(&dm->dc_lock);
dc_stream_release(vblank_work->stream);
@@ -291,18 +308,45 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
- if (acrtc->otg_inst == -1)
- goto skip;
+ if (enable && !acrtc->base.enabled) {
+ drm_dbg_vbl(crtc->dev,
+ "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
+ acrtc->crtc_id, acrtc->base.enabled);
+ return -EINVAL;
+ }
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
if (enable) {
- /* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_crtc_vrr_active(acrtc_state))
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
- } else {
- /* vblank irq off -> vupdate irq off */
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ struct dc *dc = adev->dm.dc;
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
+ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
+ pr->config.replay_supported;
+
+ /*
+ * IPS & self-refresh feature can cause vblank counter resets between
+ * vblank disable and enable.
+ * It may cause system stuck due to waiting for the vblank counter.
+ * Call this function to estimate missed vblanks by using timestamps and
+ * update the vblank counter in DRM.
+ */
+ if (dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
+ sr_supported && vblank->config.disable_immediate)
+ drm_crtc_vblank_restore(crtc);
+ }
+
+ if (dc_supports_vrr(dm->dc->ctx->dce_version)) {
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ }
}
if (rc)
@@ -354,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
-skip:
+
if (amdgpu_in_reset(adev))
return 0;
@@ -659,6 +703,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
+ struct drm_plane_state *primary_state;
+
+ /* Pull in primary plane for correct VRR handling */
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
@@ -683,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
- bool is_dcn;
+ bool has_degamma;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -722,11 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- /* Don't enable DRM CRTC degamma property for DCE since it doesn't
- * support programmable degamma anywhere.
+ /* Don't enable DRM CRTC degamma property for
+ * 1. Degamma is replaced by color pipeline.
+ * 2. DCE since it doesn't support programmable degamma anywhere.
+ * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
*/
- is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
- drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
+ if (plane->color_pipeline_property)
+ has_degamma = false;
+ else
+ has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
+ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
+
+ drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 17e948753f59..c1212947a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index bc3e7a82b402..a9839485f2a2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -25,6 +26,7 @@
#include <linux/string_helpers.h>
#include <linux/uaccess.h>
+#include <media/cec-notifier.h>
#include "dc.h"
#include "amdgpu.h"
@@ -757,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
int max_param_num = 11;
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
bool disable_hpd = false;
+ bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID;
bool valid_test_pattern = false;
uint8_t param_nums = 0;
/* init with default 80bit custom pattern */
@@ -848,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* because it might have been disabled after a test pattern was set.
* AUX depends on HPD * sequence dependent, do not move!
*/
- if (!disable_hpd)
+ if (supports_hpd && !disable_hpd)
dc_link_enable_hpd(link);
prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
@@ -886,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* Need disable interrupt to avoid SW driver disable DP output. This is
* done after the test pattern is set.
*/
- if (valid_test_pattern && disable_hpd)
+ if (valid_test_pattern && supports_hpd && disable_hpd)
dc_link_disable_hpd(link);
kfree(wr_buf);
@@ -1168,7 +1171,7 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
case COLOR_SPACE_2020_RGB_FULLRANGE:
seq_puts(m, "BT2020_RGB");
break;
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
seq_puts(m, "BT2020_YCC");
break;
default:
@@ -1300,7 +1303,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused)
if (connector->status != connector_status_connected)
return -ENODEV;
- if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments);
seq_printf(m, "%d\n", segments);
@@ -2848,6 +2852,67 @@ static int is_dpia_link_show(struct seq_file *m, void *data)
return 0;
}
+/**
+ * hdmi_cec_state_show - Read out the HDMI-CEC feature status
+ * @m: sequence file.
+ * @data: unused.
+ *
+ * Return 0 on success
+ */
+static int hdmi_cec_state_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ seq_printf(m, "%s:%d\n", connector->name, connector->base.id);
+ seq_printf(m, "HDMI-CEC status: %d\n", aconnector->notifier ? 1 : 0);
+
+ return 0;
+}
+
+/**
+ * hdmi_cec_state_write - Enable/Disable HDMI-CEC feature from driver side
+ * @f: file structure.
+ * @buf: userspace buffer. set to '1' to enable; '0' to disable cec feature.
+ * @size: size of buffer from userpsace.
+ * @pos: unused.
+ *
+ * Return size on success, error code on failure
+ */
+static ssize_t hdmi_cec_state_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int ret;
+ bool enable;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct drm_device *ddev = aconnector->base.dev;
+
+ if (size == 0)
+ return -EINVAL;
+
+ ret = kstrtobool_from_user(buf, size, &enable);
+ if (ret) {
+ drm_dbg_driver(ddev, "invalid user data !\n");
+ return ret;
+ }
+
+ if (enable) {
+ if (aconnector->notifier)
+ return -EINVAL;
+ ret = amdgpu_dm_initialize_hdmi_connector(aconnector);
+ if (ret)
+ return ret;
+ hdmi_cec_set_edid(aconnector);
+ } else {
+ if (!aconnector->notifier)
+ return -EINVAL;
+ cec_notifier_conn_unregister(aconnector->notifier);
+ aconnector->notifier = NULL;
+ }
+
+ return size;
+}
+
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
@@ -2860,6 +2925,7 @@ DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
DEFINE_SHOW_ATTRIBUTE(is_dpia_link);
+DEFINE_SHOW_STORE_ATTRIBUTE(hdmi_cec_state);
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
@@ -2995,7 +3061,8 @@ static const struct {
char *name;
const struct file_operations *fops;
} hdmi_debugfs_entries[] = {
- {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+ {"hdmi_cec_state", &hdmi_cec_state_fops}
};
/*
@@ -3042,6 +3109,35 @@ static int replay_get_state(void *data, u64 *val)
}
/*
+ * Start / Stop capture Replay residency
+ */
+static int replay_set_residency(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ bool is_start = (val != 0);
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY);
+ return 0;
+}
+
+/*
+ * Read Replay residency
+ */
+static int replay_get_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY);
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Read PSR state
*/
static int psr_get(void *data, u64 *val)
@@ -3260,7 +3356,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n");
-
+DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency,
+ "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
@@ -3438,6 +3535,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("replay_capability", 0444, dir, connector,
&replay_capability_fops);
debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops);
+ debugfs_create_file_unsafe("replay_residency", 0444, dir,
+ connector, &replay_residency_fops);
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file_unsafe("psr_residency", 0444, dir,
@@ -3480,8 +3579,8 @@ static int crc_win_x_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3497,7 +3596,7 @@ static int crc_win_x_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_start;
+ *val = acrtc->dm_irq_params.window_param[0].x_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3517,8 +3616,8 @@ static int crc_win_y_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3534,7 +3633,7 @@ static int crc_win_y_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_start;
+ *val = acrtc->dm_irq_params.window_param[0].y_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3553,8 +3652,8 @@ static int crc_win_x_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3570,7 +3669,7 @@ static int crc_win_x_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_end;
+ *val = acrtc->dm_irq_params.window_param[0].x_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3589,8 +3688,8 @@ static int crc_win_y_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3606,7 +3705,7 @@ static int crc_win_y_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_end;
+ *val = acrtc->dm_irq_params.window_param[0].y_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3629,13 +3728,14 @@ static int crc_win_update_set(void *data, u64 val)
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
- acrtc->dm_irq_params.window_param.activated = true;
- acrtc->dm_irq_params.window_param.update_win = true;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
+ acrtc->dm_irq_params.window_param[0].enable = true;
+ acrtc->dm_irq_params.window_param[0].update_win = true;
+ acrtc->dm_irq_params.window_param[0].skip_frame_cnt = 0;
+ acrtc->dm_irq_params.crc_window_activated = true;
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
mutex_unlock(&adev->dm.dc_lock);
@@ -3923,7 +4023,7 @@ static int capabilities_show(struct seq_file *m, void *unused)
struct hubbub *hubbub = dc->res_pool->hubbub;
- if (hubbub->funcs->get_mall_en)
+ if (hubbub && hubbub->funcs->get_mall_en)
hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
if (dc->cap_funcs.get_subvp_en)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 071200473c27..122cdc124b3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index e339c7a8d541..85ce558cefc5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
@@ -26,6 +27,7 @@
#include "amdgpu_dm_hdcp.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
+#include "dc_fused_io.h"
#include "dm_helpers.h"
#include <drm/display/drm_hdcp_helper.h>
#include "hdcp_psp.h"
@@ -76,6 +78,34 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static bool lp_atomic_write_poll_read_i2c(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
+static bool lp_atomic_write_poll_read_aux(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
struct ta_hdcp_shared_memory *hdcp_cmd;
@@ -171,8 +201,12 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_link_adjustment link_adjust;
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
+ const struct dc *dc = aconnector->dc_link->dc;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -190,6 +224,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
link_adjust.auth_delay = 2;
+ link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -197,6 +232,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
link_adjust.hdcp1.disable = 1;
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
}
+ link_adjust.hdcp2.use_fw_locality_check =
+ (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
@@ -209,7 +247,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
@@ -220,8 +257,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
struct drm_connector_state *conn_state = aconnector->base.state;
unsigned int conn_index = aconnector->base.index;
- mutex_lock(&hdcp_w->mutex);
- hdcp_w->aconnector[conn_index] = aconnector;
+ guard(mutex)(&hdcp_w->mutex);
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -237,9 +273,11 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
-
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -247,7 +285,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
unsigned int conn_index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
@@ -256,11 +294,13 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
hdcp_w->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
}
process_output(hdcp_w);
-
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -277,7 +317,7 @@ static void event_callback(struct work_struct *work)
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
callback_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->callback_dwork);
@@ -285,8 +325,6 @@ static void event_callback(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_property_update(struct work_struct *work)
@@ -323,7 +361,7 @@ static void event_property_update(struct work_struct *work)
continue;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
if (conn_state->commit) {
ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
@@ -355,7 +393,6 @@ static void event_property_update(struct work_struct *work)
drm_hdcp_update_content_protection(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
- mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
@@ -368,7 +405,7 @@ static void event_property_validate(struct work_struct *work)
struct amdgpu_dm_connector *aconnector;
unsigned int conn_index;
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
conn_index++) {
@@ -408,8 +445,6 @@ static void event_property_validate(struct work_struct *work)
schedule_work(&hdcp_work->property_update_work);
}
}
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_watchdog_timer(struct work_struct *work)
@@ -420,7 +455,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue,
watchdog_timer_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
@@ -429,8 +464,6 @@ static void event_watchdog_timer(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_cpirq(struct work_struct *work)
@@ -439,13 +472,11 @@ static void event_cpirq(struct work_struct *work)
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
@@ -455,6 +486,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
for (i = 0; i < hdcp_work->max_link; i++) {
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
}
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
@@ -469,7 +501,6 @@ static bool enable_assr(void *handle, struct dc_link *link)
struct mod_hdcp hdcp = hdcp_work->hdcp;
struct psp_context *psp = hdcp.config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- bool res = true;
if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
@@ -478,7 +509,7 @@ static bool enable_assr(void *handle, struct dc_link *link)
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
- mutex_lock(&psp->dtm_context.mutex);
+ guard(mutex)(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
@@ -490,12 +521,10 @@ static bool enable_assr(void *handle, struct dc_link *link)
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
DRM_INFO("Failed to enable ASSR");
- res = false;
+ return false;
}
- mutex_unlock(&psp->dtm_context.mutex);
-
- return res;
+ return true;
}
static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -503,11 +532,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_work = handle;
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
int link_index = aconnector->dc_link->link_index;
+ unsigned int conn_index = aconnector->base.index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct dc_sink *sink = NULL;
bool link_is_hdcp14 = false;
+ const struct dc *dc = aconnector->dc_link->dc;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -547,7 +578,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 2;
+ link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
link->adjust.hdcp1.disable = 0;
+ link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
@@ -556,13 +590,14 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
(!!aconnector->base.state) ?
aconnector->base.state->hdcp_content_type : -1);
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
-
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = aconnector;
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
-
}
/**
@@ -614,7 +649,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* incorrect/corrupted and we should correct our SRM by getting it from PSP
*/
static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
@@ -638,7 +673,7 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
}
static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
@@ -736,19 +771,30 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
- if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
+ struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config;
+ struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
+
+ config->psp.handle = &adev->psp;
+ if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
- dc->ctx->dce_version == DCN_VERSION_3_5 ||
+ dc->ctx->dce_version == DCN_VERSION_3_16 ||
+ dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21 ||
+ dc->ctx->dce_version == DCN_VERSION_3_5 ||
dc->ctx->dce_version == DCN_VERSION_3_51 ||
- dc->ctx->dce_version == DCN_VERSION_3_16)
- hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
- hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
- hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
- hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ dc->ctx->dce_version == DCN_VERSION_3_6 ||
+ dc->ctx->dce_version == DCN_VERSION_4_01)
+ config->psp.caps.dtm_v3_supported = 1;
+
+ config->ddc.handle = dc_get_link_at_index(dc, i);
+
+ ddc_funcs->write_i2c = lp_write_i2c;
+ ddc_funcs->read_i2c = lp_read_i2c;
+ ddc_funcs->write_dpcd = lp_write_dpcd;
+ ddc_funcs->read_dpcd = lp_read_dpcd;
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 69b445b011c8..4faa344f196e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6cbbb71d752b..ac98c746c3de 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -55,16 +56,21 @@ static u32 edid_extract_panel_id(struct edid *edid)
(u32)EDID_PRODUCT_ID(edid);
}
-static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
+static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps)
{
uint32_t panel_id = edid_extract_panel_id(edid);
switch (panel_id) {
+ /* Workaround for monitors that need a delay after detecting the link */
+ case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215):
+ drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id);
+ edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000;
+ break;
/* Workaround for some monitors which does not work well with FAMS */
case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
- DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_fams = true;
break;
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
@@ -73,11 +79,12 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003):
- DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
- DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
+ drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
default:
@@ -101,6 +108,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
{
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
struct cea_sad *sads;
int sad_count = -1;
@@ -123,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->serial_number = edid_buf->serial;
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
+ edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
drm_edid_get_monitor_name(edid_buf,
edid_caps->display_name,
@@ -130,7 +139,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
- apply_edid_quirks(edid_buf, edid_caps);
+ apply_edid_quirks(dev, edid_buf, edid_caps);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
@@ -624,6 +633,19 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ struct amdgpu_device *dev = ctx->driver_context;
+
+ return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us);
+}
+
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
@@ -885,6 +907,12 @@ bool dm_helpers_dp_write_dsc_enable(
return ret;
}
+bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+}
+
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
{
bool dp_sink_present;
@@ -906,7 +934,7 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
{
struct drm_connector *connector = data;
struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
- unsigned char start = block * EDID_LENGTH;
+ unsigned short start = block * EDID_LENGTH;
struct edid *edid;
int r;
@@ -970,8 +998,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
- int retry = 3;
- enum dc_edid_status edid_status;
+ int retry = 25;
+ enum dc_edid_status edid_status = EDID_NO_RESPONSE;
const struct drm_edid *drm_edid;
const struct edid *edid;
@@ -1001,9 +1029,13 @@ enum dc_edid_status dm_helpers_read_local_edid(
}
if (!drm_edid)
- return EDID_NO_RESPONSE;
+ continue;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
+ if (!edid ||
+ edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH)
+ return EDID_BAD_INPUT;
+
sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
@@ -1015,7 +1047,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
&sink->dc_edid,
&sink->edid_caps);
- } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+ } while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0);
if (edid_status != EDID_OK)
DRM_ERROR("EDID err: %d, on connector: %s",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 3390f0d8420a..0a2a3f233a0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -473,8 +474,9 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
unregister_all_irq_handlers(adev);
}
-int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h;
struct list_head *hnd_list_l;
@@ -511,10 +513,12 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
- return 0;
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_disable(dev);
}
-int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
{
int src;
struct list_head *hnd_list_h, *hnd_list_l;
@@ -522,7 +526,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+ drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n");
/* re-enable short pulse interrupts HW interrupt */
for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
@@ -533,19 +537,18 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-
- return 0;
}
-int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h, *hnd_list_l;
unsigned long irq_table_flags;
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- DRM_DEBUG_KMS("DM_IRQ: resume\n");
+ drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n");
/**
* Renable HW interrupt for HPD and only since FLIP and VBLANK
@@ -559,7 +562,9 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
- return 0;
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
}
/*
@@ -894,12 +899,24 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
+ int i;
+ bool use_polling = false;
+
+ /* First, clear all hpd and hpdrx interrupts */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
+ if (!dc_interrupt_set(adev->dm.dc, i, false))
+ drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
+ i);
+ }
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector;
const struct dc_link *dc_link;
+ use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
+
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -907,10 +924,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
+ /*
+ * Get a base driver irq reference for hpd ints for the lifetime
+ * of dm. Note that only hpd interrupt types are registered with
+ * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
+ * hpd_rx isn't available. DM currently controls hpd_rx
+ * explicitly with dc_interrupt_set()
+ */
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- true);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+ /*
+ * TODO: There's a mismatch between mode_info.num_hpd
+ * and what bios reports as the # of connectors with hpd
+ * sources. Since the # of hpd source types registered
+ * with base driver == mode_info.num_hpd, we have to
+ * fallback to dc_interrupt_set for the remaining types.
+ */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -920,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (use_polling)
+ drm_kms_helper_poll_init(dev);
}
/**
@@ -935,6 +976,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -948,9 +990,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- false);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+
+ /* TODO: See same TODO in amdgpu_dm_hpd_init() */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ false);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -960,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_fini(dev);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index 2349238a626b..4f6b58f4f90d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -90,14 +91,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
* amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
*
*/
-int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+void amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
* amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
*
*/
-int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
-int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
+void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6a7ecc1e4602..3c9995275cbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
@@ -39,7 +40,9 @@ struct dm_irq_params {
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source crc_src;
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- struct crc_window_param window_param;
+ struct crc_window_param window_param[MAX_CRC_WINDOW_NUM];
+ /* At least one CRC window is activated or not*/
+ bool crc_window_activated;
#endif
#endif
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 56fa2b9b5d7a..dbd1da4d85d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -51,6 +52,9 @@
#define PEAK_FACTOR_X1000 1006
+/*
+ * This function handles both native AUX and I2C-Over-AUX transactions.
+ */
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -59,6 +63,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
+ uint8_t copy[16];
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -74,6 +79,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
+ if (payload.write) {
+ memcpy(copy, msg->buffer, msg->size);
+ payload.data = copy;
+ }
+
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
@@ -87,15 +97,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
- result = 0;
+ result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
- if (payload.write && result >= 0)
- result = msg->size;
+ /*
+ * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
+ */
+ if (payload.write && result >= 0) {
+ if (result) {
+ /*one byte indicating partially written bytes*/
+ drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n");
+ result = payload.data[0];
+ } else if (!payload.reply[0])
+ /*I2C_ACK|AUX_ACK*/
+ result = msg->size;
+ }
- if (result < 0)
+ if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -114,6 +134,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
+ drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result);
+ }
+
+ if (payload.reply[0])
+ drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.",
+ payload.reply[0]);
+
return result;
}
@@ -155,6 +182,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
return 0;
}
+
+static inline void
+amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->drm_edid = NULL;
+ aconnector->dsc_aux = NULL;
+ aconnector->mst_output_port->passthrough_aux = NULL;
+ aconnector->mst_local_bw = 0;
+ aconnector->vc_full_pbn = 0;
+}
+
static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
@@ -182,9 +220,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -294,6 +330,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
+static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_aux *immediate_upstream_aux;
+ struct drm_dp_desc branch_desc;
+
+ if (!port->parent)
+ return false;
+
+ port_parent = port->parent->port_parent;
+
+ immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true))
+ return false;
+
+ aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) +
+ (branch_desc.ident.oui[1] << 8) +
+ (branch_desc.ident.oui[2]);
+
+ drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n",
+ aconnector->branch_ieee_oui, connector->name);
+
+ return true;
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -504,9 +568,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -635,6 +697,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_set_path_property(connector, pathprop);
+ if (!retrieve_branch_specific_data(aconnector))
+ aconnector->branch_ieee_oui = 0;
+
/*
* Initialize connector state before adding the connectror to drm and
* framebuffer lists
@@ -776,6 +841,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
+ drm_dp_dpcd_set_probe(&aconnector->dm_dp_aux.aux, false);
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
return;
@@ -788,13 +854,20 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
-int dm_mst_get_pbn_divider(struct dc_link *link)
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link)
{
+ uint32_t pbn_div_x100;
+ uint64_t dividend, divisor;
+
if (!link)
return 0;
- return dc_link_bandwidth_kbps(link,
- dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+ dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100;
+ divisor = 8 * 1000 * 54;
+
+ pbn_div_x100 = div64_u64(dividend, divisor);
+
+ return dfixed_const(pbn_div_x100) / 100;
}
struct dsc_mst_fairness_params {
@@ -811,26 +884,28 @@ struct dsc_mst_fairness_params {
};
#if defined(CONFIG_DRM_AMD_DC_FP)
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+ uint64_t effective_kbps = (uint64_t)kbps;
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+ if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
+ effective_kbps *= 1006;
+ effective_kbps = div_u64(effective_kbps, 1000);
+ }
- return fec_overhead_multiplier_x1000;
+ return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
}
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
{
- u64 peak_kbps = kbps;
+ uint64_t pbn_effective = (uint64_t)pbn;
+
+ if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
+ pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
+ else
+ pbn_effective *= 1000;
- peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
- return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
}
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
@@ -901,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
- kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ kbps = pbn_to_kbps(pbn, false);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
@@ -930,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1031,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
int var_pbn;
for (i = 0; i < count; i++) {
@@ -1064,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
var_pbn = vars[next_index].pbn;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1124,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params));
@@ -1205,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1227,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1235,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1618,7 +1690,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (ind >= 0) {
struct drm_connector *connector;
- struct amdgpu_dm_connector *aconnector;
struct drm_connector_state *drm_new_conn_state;
struct dm_connector_state *dm_new_conn_state;
struct dm_crtc_state *dm_old_crtc_state;
@@ -1626,15 +1697,17 @@ int pre_validate_dsc(struct drm_atomic_state *state,
connector =
amdgpu_dm_find_first_crtc_matching_connector(state,
state->crtcs[ind].ptr);
- aconnector = to_amdgpu_dm_connector(connector);
+ if (!connector)
+ continue;
+
drm_new_conn_state =
drm_atomic_get_new_connector_state(state,
- &aconnector->base);
+ connector);
dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
local_dc_state->streams[i] =
- create_validate_stream_for_sink(aconnector,
+ create_validate_stream_for_sink(connector,
&state->crtcs[ind].new_state->mode,
dm_new_conn_state,
dm_old_crtc_state->stream);
@@ -1689,32 +1762,21 @@ clean_exit:
return ret;
}
-static uint32_t kbps_from_pbn(unsigned int pbn)
-{
- uint64_t kbps = (uint64_t)pbn;
-
- kbps *= (1000000 / PEAK_FACTOR_X1000);
- kbps *= 8;
- kbps *= 54;
- kbps /= 64;
-
- return (uint32_t)kbps;
-}
-
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
struct dc_dsc_policy dsc_policy = {0};
+ bool is_dsc_possible;
dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
- dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
- stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp * 16,
- dsc_policy.max_target_bpp * 16,
- &stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
-
- return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+ is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
+
+ return is_dsc_possible;
}
#endif
@@ -1727,14 +1789,20 @@ static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_lin
union lane_count_set lane_count;
u8 dp_link_encoding;
u8 link_bw_set = 0;
+ u8 data[16] = {0};
*cur_link_bw = 0;
- if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16)
return false;
+ dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET];
+ link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET];
+ lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET];
+
+ drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n",
+ dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET);
+
switch (dp_link_encoding) {
case DP_8b_10b_ENCODING:
link_rate = link_bw_set;
@@ -1792,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link));
cur_link_settings = stream->link->verified_link_cap;
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
- virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
/* pick the end to end bw bottleneck */
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
@@ -1820,10 +1888,21 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct drm_dp_mst_port *immediate_upstream_port = NULL;
uint32_t end_link_bw = 0;
- /*Get last DP link BW capability*/
- if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
- if (stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
+ if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
+ if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
+ dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
+ aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
+ aconnector->mst_local_bw = end_link_bw;
+ } else {
+ end_link_bw = aconnector->mst_local_bw;
+ }
+
+ if (end_link_bw > 0 &&
+ stream_kbps > end_link_bw &&
+ aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. "
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1834,13 +1913,17 @@ enum dc_status dm_dp_mst_is_port_support_mode(
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
if (immediate_upstream_port) {
- virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
- if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
- "Max dsc compression can't fit into MST available bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
+ virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
+ }
+
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 600d6e221011..6f7ea684b555 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -59,7 +60,7 @@ enum mst_msg_ready_type {
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
-int dm_mst_get_pbn_divider(struct dc_link *link);
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link);
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 1ec4e4b9ea94..2e3ee78999d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -37,6 +37,7 @@
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_colorop.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -92,9 +93,9 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier)
{
- return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
+ return amdgpu_lookup_format_info(pixel_format, modifier);
}
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
@@ -146,7 +147,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64
if (*cap - *size < 1) {
uint64_t new_cap = *cap * 2;
- uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
+ uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
if (!new_mods) {
kfree(*mods);
@@ -177,7 +178,7 @@ static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier
return AMD_FMT_MOD_GET(TILE, modifier);
}
-static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
+static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info,
uint64_t tiling_flags)
{
/* Fill GFX8 params */
@@ -190,6 +191,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+ tiling_info->gfxversion = DcGfxVersion8;
/* XXX fix me for VI */
tiling_info->gfx8.num_banks = num_banks;
tiling_info->gfx8.array_mode =
@@ -210,7 +212,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
/* Fill GFX9 params */
tiling_info->gfx9.num_pipes =
@@ -231,7 +233,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgp
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
uint64_t modifier)
{
unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
@@ -261,7 +263,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amd
static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
- const union dc_tiling_info *tiling_info,
+ const struct dc_tiling_info *tiling_info,
const struct dc_plane_dcc_param *dcc,
const struct dc_plane_address *address,
const struct plane_size *plane_size)
@@ -276,8 +278,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
if (!dcc->enable)
return 0;
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
- !dc->cap_funcs.get_dcc_compression_cap)
+ if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
+ format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return -EINVAL;
+
+ if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
@@ -308,7 +313,7 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address)
{
@@ -317,6 +322,7 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxVersion9;
if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
@@ -358,7 +364,7 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address)
{
@@ -369,6 +375,7 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxAddr3;
if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
@@ -694,7 +701,7 @@ static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
- uint8_t max_comp_block[] = {1, 0};
+ uint8_t max_comp_block[] = {2, 1, 0};
uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
uint8_t i = 0, j = 0;
uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
@@ -726,7 +733,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig
if (adev->family < AMDGPU_FAMILY_AI)
return 0;
- *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
+ *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
@@ -834,7 +841,7 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
@@ -942,13 +949,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
@@ -1255,21 +1262,24 @@ static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
}
static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_crtc_state *new_crtc_state;
struct drm_plane_state *new_plane_state;
struct dm_crtc_state *dm_new_crtc_state;
- /* Only support async updates on cursor planes. */
- if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ if (flip) {
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -EINVAL;
+ } else if (plane->type != DRM_PLANE_TYPE_CURSOR) {
return -EINVAL;
+ }
new_plane_state = drm_atomic_get_new_plane_state(state, plane);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
/* Reject overlay cursors for now*/
- if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+ if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
return -EINVAL;
return 0;
@@ -1427,7 +1437,7 @@ static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
dc_plane_state = dm_plane_state->dc_state;
- dc_plane_force_update_for_panic(dc_plane_state, fb->modifier ? true : false);
+ dc_plane_force_dcc_and_tiling_disable(dc_plane_state, fb->modifier ? true : false);
}
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
@@ -1624,7 +1634,7 @@ dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
drm_object_attach_property(&plane->base,
dm->adev->mode_info.plane_ctm_property, 0);
- if (dpp_color_caps.hw_3d_lut) {
+ if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) {
drm_object_attach_property(&plane->base,
mode_info.plane_shaper_lut_property, 0);
drm_object_attach_property(&plane->base,
@@ -1773,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
+#else
+
+#define MAX_COLOR_PIPELINES 5
+
+static int
+dm_plane_init_colorops(struct drm_plane *plane)
+{
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int len = 0;
+ int ret;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
+
+ /* initialize pipeline */
+ if (dc->ctx->dce_version >= DCN_VERSION_3_0) {
+ ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]);
+ if (ret) {
+ drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
+ plane->base.id, ret);
+ return ret;
+ }
+ len++;
+
+ /* Create COLOR_PIPELINE property and attach */
+ drm_plane_create_color_pipeline_property(plane, pipelines, len);
+ }
+
+ return 0;
+}
#endif
static const struct drm_plane_funcs dm_plane_funcs = {
@@ -1881,7 +1924,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
+#else
+ res = dm_plane_init_colorops(plane);
+ if (res)
+ return res;
#endif
+
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 2eef13b1c05a..ea2619b507db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -47,7 +47,7 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
@@ -58,7 +58,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier);
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 848c5b4bb301..11b2ea6edf95 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -97,6 +98,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_single_disp_config *dc_cfg =
&pp_display_cfg->disp_configs[i];
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
}
amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f40240aafe98..fd491b7a3cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -26,7 +27,6 @@
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
-#include "dm_helpers.h"
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
@@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
return false;
- return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
+ /* Temporarily disable PSR-SU to avoid glitches */
+ return false;
}
/*
@@ -86,14 +87,6 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = true;
}
-
- DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
- link->psr_settings.psr_feature_enabled,
- link->psr_settings.psr_version,
- link->dpcd_caps.psr_info.psr_version,
- link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
- link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
-
}
/*
@@ -126,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
- if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
- return false;
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+ return false;
+ }
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
@@ -201,14 +196,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*
* Return: true if success
*/
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
{
- unsigned int power_opt = 0;
bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
}
/*
@@ -251,3 +245,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
return allow_active;
}
+
+/**
+ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
+ * @stream: stream state attached to the eDP link
+ *
+ * Waits for a max of 500ms for the eDP panel to exit PSR.
+ *
+ * Return: true if panel exited PSR, false otherwise.
+ */
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
+{
+ enum dc_psr_state psr_state = PSR_STATE0;
+ struct dc_link *link = stream->link;
+ int retry_count;
+
+ if (link == NULL)
+ return false;
+
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dc_link_get_psr_state(link, &psr_state);
+ if (psr_state == PSR_STATE0)
+ break;
+ udelay(500);
+ }
+
+ if (retry_count == 1000)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index cd2d45c2b5ef..4fb8626913cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -34,8 +35,9 @@
void amdgpu_dm_set_psr_caps(struct dc_link *link);
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
new file mode 100644
index 000000000000..1da07ebf9217
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/dmi.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ struct drm_device *dev = dm->ddev;
+ int dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+ dm->edp0_on_dp1_quirk = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
+
+ if (quirk_entries.aux_hpd_discon) {
+ dm->aux_hpd_discon_quirk = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ dm->edp0_on_dp1_quirk = true;
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 41f07f13a7b5..da94e3544b65 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
@@ -30,7 +31,7 @@
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
#include "dmub/inc/dmub_cmd.h"
-#include "dc/inc/link.h"
+#include "dc/inc/link_service.h"
/*
* amdgpu_dm_link_supports_replay() - check if the link supports replay
@@ -161,7 +162,7 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
if (link) {
link->dc->link_srv->edp_setup_replay(link, stream);
- link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+ link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total, 0);
DRM_DEBUG_DRIVER("Enabling replay...\n");
link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
return true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 8126bdb1eb6b..73b6c67ae5e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 0005f5f8f34f..8550d5e8b753 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -52,11 +53,11 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx)
{
}
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx)
{
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 4686d4b0cbad..aa56fd6d56c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -726,6 +727,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state,
)
);
+TRACE_EVENT(amdgpu_dm_brightness,
+ TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac),
+ TP_ARGS(function, user_brightness, converted_brightness, aux, ac),
+ TP_STRUCT__entry(
+ __field(void *, function)
+ __field(u32, user_brightness)
+ __field(u32, converted_brightness)
+ __field(bool, aux)
+ __field(bool, ac)
+ ),
+ TP_fast_assign(
+ __entry->function = function;
+ __entry->user_brightness = user_brightness;
+ __entry->converted_brightness = converted_brightness;
+ __entry->aux = aux;
+ __entry->ac = ac;
+ ),
+ TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s",
+ (void *)__entry->function,
+ (u32)__entry->user_brightness,
+ (u32)__entry->converted_brightness,
+ (__entry->aux) ? "true" : "false",
+ (__entry->ac) ? "AC" : "DC"
+ )
+);
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 0d5fefb0f591..d9527c05fc87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -102,13 +102,13 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 8992e697759f..7277ed21552f 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -36,7 +36,8 @@ DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dml
-DC_LIBS += dml2
+DC_LIBS += dml2_0
+DC_LIBS += soc_and_ip_translator
endif
DC_LIBS += dce120
@@ -52,32 +53,31 @@ endif
DC_LIBS += hdcp
ifdef CONFIG_DRM_AMD_DC_FP
-DC_LIBS += spl
-DC_SPL_TRANS += dc_spl_translate.o
+DC_LIBS += sspl
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, dc_spl_translate.o)
endif
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
-DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
+FILES =
+FILES += dc_dmub_srv.o
+FILES += dc_edid_parser.o
+FILES += dc_fused_io.o
+FILES += dc_helper.o
+FILES += core/dc.o
+FILES += core/dc_debug.o
+FILES += core/dc_hw_sequencer.o
+FILES += core/dc_link_enc_cfg.o
+FILES += core/dc_link_exports.o
+FILES += core/dc_resource.o
+FILES += core/dc_sink.o
+FILES += core/dc_stat.o
+FILES += core/dc_state.o
+FILES += core/dc_stream.o
+FILES += core/dc_surface.o
+FILES += core/dc_vm_helper.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, $(FILES))
-DISPLAY_CORE += dc_vm_helper.o
-
-AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
-
-AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
-
-AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS))
-
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
-AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
-
-DC_DMUB += dc_dmub_srv.o
-DC_EDID += dc_edid_parser.o
-AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
-AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
-
-AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
index b2fc4f8e6482..a51c2701da24 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -40,7 +40,8 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
+ case COLOR_SPACE_2020_YCBCR_FULL:
return false;
default:
/* Add a case to switch */
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index 681799468487..4da5adab799c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -1136,7 +1136,7 @@ static void calculate_bandwidth(
}
}
}
- data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed((uint64_t)vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
data->total_display_reads_required_data = bw_int_to_fixed(0);
data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
@@ -1393,7 +1393,7 @@ static void calculate_bandwidth(
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each set of clock frequencies*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
- /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
@@ -1407,7 +1407,7 @@ static void calculate_bandwidth(
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each display pipe*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
- /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 88d3f9d7dd55..6073cadde76c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -51,8 +51,6 @@ static inline unsigned long long complete_integer_division_u64(
{
unsigned long long result;
- ASSERT(divisor);
-
result = div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -213,9 +211,6 @@ struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg)
* @note
* Good idea to use Newton's method
*/
-
- ASSERT(arg.value);
-
return dc_fixpt_from_fraction(
dc_fixpt_one.value,
arg.value);
@@ -289,7 +284,7 @@ struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
dc_fixpt_mul(
square,
res),
- n * (n - 1)));
+ (long long)n * (n - 1)));
n -= 2;
} while (n != 0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 6d2924114a3e..b413a672c2c0 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -170,7 +170,7 @@ bool dal_vector_remove_at_index(
memmove(
vector->container + (index * vector->struct_size),
vector->container + ((index + 1) * vector->struct_size),
- (vector->count - index - 1) * vector->struct_size);
+ (size_t)(vector->count - index - 1) * vector->struct_size);
vector->count -= 1;
return true;
@@ -219,7 +219,7 @@ bool dal_vector_insert_at(
memmove(
insert_address + vector->struct_size,
insert_address,
- vector->struct_size * (vector->count - position));
+ (size_t)vector->struct_size * (vector->count - position));
memmove(
insert_address,
@@ -271,7 +271,7 @@ struct vector *dal_vector_clone(
/* copy vector's data */
memmove(vec_cloned->container, vector->container,
- vec_cloned->struct_size * vec_cloned->capacity);
+ (size_t)vec_cloned->struct_size * vec_cloned->capacity);
return vec_cloned;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 3bacf470f7c5..d1471f34e419 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
ATOM_OBJECT *object);
static struct device_id device_type_from_device_id(uint16_t device_id);
static uint32_t signal_to_ss_id(enum as_signal_type signal);
-static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id);
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
struct bios_parser *bp,
ATOM_OBJECT *object);
@@ -174,11 +176,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
return object_id;
}
- if (tbl->ucNumberOfObjects <= i) {
- dm_error("Can't find connector id %d in connector table of size %d.\n",
- i, tbl->ucNumberOfObjects);
+ if (tbl->ucNumberOfObjects <= i)
return object_id;
- }
id = le16_to_cpu(tbl->asObjects[i].usObjectID);
object_id = object_id_from_bios_object_id(id);
@@ -444,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4(
le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
info->pll_info.max_output_pxl_clk_pll_frequency =
le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+ info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10;
if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
/* Since there is no information on the SS, report conservative
@@ -500,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1(
info->external_clock_source_frequency_for_dp =
le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+ info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10;
/* There should be only one entry in the SS info table for Memory Clock
*/
@@ -739,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control(
return bp->cmd_tbl.transmitter_control(bp, cntl);
}
+static enum bp_result bios_parser_select_crtc_source(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
static enum bp_result bios_parser_encoder_control(
struct dc_bios *dcb,
struct bp_encoder_control *cntl)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
+ if (cntl->engine_id == ENGINE_ID_DACA) {
+ if (!bp->cmd_tbl.dac1_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac1_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ } else if (cntl->engine_id == ENGINE_ID_DACB) {
+ if (!bp->cmd_tbl.dac2_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac2_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ }
+
if (!bp->cmd_tbl.dig_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dig_encoder_control(bp, cntl);
}
+static enum bp_result bios_parser_dac_load_detection(
+ struct dc_bios *dcb,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct dc_context *ctx = dcb->ctx;
+ struct bp_load_detection_parameters bp_params = {0};
+ enum bp_result bp_result;
+ uint32_t bios_0_scratch;
+ uint32_t device_id_mask = 0;
+
+ bp_params.engine_id = engine_id;
+ bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id);
+
+ if (engine_id != ENGINE_ID_DACA &&
+ engine_id != ENGINE_ID_DACB)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (!bp->cmd_tbl.dac_load_detection)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
+ device_id_mask = ATOM_S0_CRT1_MASK;
+ else if (bp_params.device_id == ATOM_DEVICE_CRT2_SUPPORT)
+ device_id_mask = ATOM_S0_CRT2_MASK;
+ else
+ return BP_RESULT_UNSUPPORTED;
+
+ /* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+ bios_0_scratch &= ~device_id_mask;
+ dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch);
+
+ bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params);
+
+ if (bp_result != BP_RESULT_OK)
+ return bp_result;
+
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+
+ if (bios_0_scratch & device_id_mask)
+ return BP_RESULT_OK;
+
+ return BP_RESULT_FAILURE;
+}
+
static enum bp_result bios_parser_adjust_pixel_clock(
struct dc_bios *dcb,
struct bp_adjust_pixel_clock_parameters *bp_params)
@@ -861,7 +938,7 @@ static bool bios_parser_is_device_id_supported(
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- uint32_t mask = get_support_mask_for_device_id(id);
+ uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id);
return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
}
@@ -2152,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal)
return clk_id_ss;
}
-static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id)
{
- enum dal_device_type device_type = device_id.device_type;
- uint32_t enum_id = device_id.enum_id;
-
switch (device_type) {
case DEVICE_TYPE_LCD:
switch (enum_id) {
@@ -2384,10 +2460,10 @@ static enum bp_result get_integrated_info_v8(
}
/*
- * get_integrated_info_v8
+ * get_integrated_info_v9
*
* @brief
- * Get V8 integrated BIOS information
+ * Get V9 integrated BIOS information
*
* @param
* bios_parser *bp - [in]BIOS parser handler to get master data table
@@ -2832,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = {
.is_device_id_supported = bios_parser_is_device_id_supported,
/* COMMANDS */
+ .select_crtc_source = bios_parser_select_crtc_source,
+
.encoder_control = bios_parser_encoder_control,
+ .dac_load_detection = bios_parser_dac_load_detection,
+
.transmitter_control = bios_parser_transmitter_control,
.enable_crtc = bios_parser_enable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index c9a6de110b74..550a9f1d03f8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1480,10 +1480,10 @@ static enum bp_result get_embedded_panel_info_v2_1(
/* not provided by VBIOS */
info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
- info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
- & ATOM_HSYNC_POLARITY);
- info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
- & ATOM_VSYNC_POLARITY);
+ info->lcd_timing.misc_info.H_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
+ ATOM_HSYNC_POLARITY);
+ info->lcd_timing.misc_info.V_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
+ ATOM_VSYNC_POLARITY);
/* not provided by VBIOS */
info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
@@ -1778,6 +1778,7 @@ static enum bp_result get_firmware_info_v3_1(
struct dc_firmware_info *info)
{
struct atom_firmware_info_v3_1 *firmware_info;
+ struct atom_firmware_info_v3_2 *firmware_info32;
struct atom_display_controller_info_v4_1 *dce_info = NULL;
if (!info)
@@ -1785,11 +1786,13 @@ static enum bp_result get_firmware_info_v3_1(
firmware_info = GET_IMAGE(struct atom_firmware_info_v3_1,
DATA_TABLES(firmwareinfo));
+ firmware_info32 = GET_IMAGE(struct atom_firmware_info_v3_2,
+ DATA_TABLES(firmwareinfo));
dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
DATA_TABLES(dce_info));
- if (!firmware_info || !dce_info)
+ if (!firmware_info || !firmware_info32 || !dce_info)
return BP_RESULT_BADBIOSTABLE;
memset(info, 0, sizeof(*info));
@@ -1817,7 +1820,15 @@ static enum bp_result get_firmware_info_v3_1(
bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
}
- info->oem_i2c_present = false;
+ /* These fields are marked as reserved in v3_1, but they appear to be populated
+ * properly.
+ */
+ if (firmware_info32 && firmware_info32->board_i2c_feature_id == 0x2) {
+ info->oem_i2c_present = true;
+ info->oem_i2c_obj_id = firmware_info32->board_i2c_feature_gpio_id;
+ } else {
+ info->oem_i2c_present = false;
+ }
return BP_RESULT_OK;
}
@@ -3088,11 +3099,12 @@ static enum bp_result construct_integrated_info(
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
info->ext_disp_conn_info.path[i].caps
);
- if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
- DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ if ((info->ext_disp_conn_info.path[i].caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ DC_LOG_BIOS("BIOS AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
else if (bp->base.ctx->dc->config.force_bios_fixed_vs) {
- info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
- DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ info->ext_disp_conn_info.path[i].caps &= ~AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ info->ext_disp_conn_info.path[i].caps |= AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
+ DC_LOG_BIOS("driver forced AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
}
}
// Log the Checksum and Voltage Swing
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 2bcae0643e61..22457f417e65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp);
static void init_set_pixel_clock(struct bios_parser *bp);
static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_load_detection(struct bios_parser *bp);
static void init_dac_output_control(struct bios_parser *bp);
static void init_set_crtc_timing(struct bios_parser *bp);
static void init_enable_crtc(struct bios_parser *bp);
@@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
init_set_pixel_clock(bp);
init_enable_spread_spectrum_on_ppll(bp);
init_adjust_display_pll(bp);
+ init_select_crtc_source(bp);
init_dac_encoder_control(bp);
+ init_dac_load_detection(bp);
init_dac_output_control(bp);
init_set_crtc_timing(bp);
init_enable_crtc(bp);
@@ -993,7 +997,7 @@ static enum bp_result set_pixel_clock_v3(
allocation.sPCLKInput.usFbDiv =
cpu_to_le16((uint16_t)bp_params->feedback_divider);
allocation.sPCLKInput.ucFracFbDiv =
- (uint8_t)bp_params->fractional_feedback_divider;
+ (uint8_t)(bp_params->fractional_feedback_divider / 100000);
allocation.sPCLKInput.ucPostDiv =
(uint8_t)bp_params->pixel_clock_post_divider;
@@ -1612,6 +1616,198 @@ static enum bp_result adjust_display_pll_v3(
/*******************************************************************************
********************************************************************************
**
+ ** SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+ case 1:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v1;
+ break;
+ case 2:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PS_ALLOCATION params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ switch (bp_params->engine_id) {
+ case ENGINE_ID_DACA:
+ params.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENGINE_ID_DACB:
+ params.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static bool select_crtc_source_v2_encoder_id(
+ enum engine_id engine_id, uint8_t *out_encoder_id)
+{
+ uint8_t encoder_id = 0;
+
+ switch (engine_id) {
+ case ENGINE_ID_DIGA:
+ encoder_id = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGB:
+ encoder_id = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGC:
+ encoder_id = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGD:
+ encoder_id = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGE:
+ encoder_id = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGF:
+ encoder_id = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGG:
+ encoder_id = ASIC_INT_DIG7_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACA:
+ encoder_id = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACB:
+ encoder_id = ASIC_INT_DAC2_ENCODER_ID;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_id = encoder_id;
+ return true;
+}
+
+static bool select_crtc_source_v2_encoder_mode(
+ enum signal_type signal_type, uint8_t *out_encoder_mode)
+{
+ uint8_t encoder_mode = 0;
+
+ switch (signal_type) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ encoder_mode = ATOM_ENCODER_MODE_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ encoder_mode = ATOM_ENCODER_MODE_HDMI;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ encoder_mode = ATOM_ENCODER_MODE_LVDS;
+ break;
+ case SIGNAL_TYPE_RGB:
+ encoder_mode = ATOM_ENCODER_MODE_CRT;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ encoder_mode = ATOM_ENCODER_MODE_DP_MST;
+ break;
+ case SIGNAL_TYPE_EDP:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_mode = encoder_mode;
+ return true;
+}
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ params.ucDstBpc = bp_params->bit_depth;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC ENCODER CONTROL
**
********************************************************************************
@@ -1711,6 +1907,96 @@ static enum bp_result dac2_encoder_control_v1(
/*******************************************************************************
********************************************************************************
**
+ ** DAC LOAD DETECTION
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static void init_dac_load_detection(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) {
+ case 1:
+ case 2:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v1;
+ break;
+ case 3:
+ default:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v3;
+ break;
+ }
+}
+
+static void dac_load_detect_prepare_params(
+ struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params,
+ enum engine_id engine_id,
+ uint16_t device_id,
+ uint8_t misc)
+{
+ uint8_t dac_type = ENGINE_ID_DACA;
+
+ if (engine_id == ENGINE_ID_DACB)
+ dac_type = ATOM_DAC_B;
+
+ params->sDacload.usDeviceID = cpu_to_le16(device_id);
+ params->sDacload.ucDacType = dac_type;
+ params->sDacload.ucMisc = misc;
+}
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ 0);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ uint8_t misc = 0;
+
+ if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT ||
+ bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT)
+ misc = DAC_LOAD_MISC_YPrPb;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ misc);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC OUTPUT CONTROL
**
********************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index ad533775e724..e89b1ba0048b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -52,6 +52,9 @@ struct cmd_tbl {
enum bp_result (*adjust_display_pll)(
struct bios_parser *bp,
struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
bool enable,
@@ -68,6 +71,9 @@ struct cmd_tbl {
enum bp_result (*dac2_output_control)(
struct bios_parser *bp,
bool enable);
+ enum bp_result (*dac_load_detection)(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
enum bp_result (*set_crtc_timing)(
struct bios_parser *bp,
struct bp_hw_crtc_timing_parameters *bp_params);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 7d18f372ce7a..f2b1720a6a66 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -101,7 +101,6 @@ static void init_dig_encoder_control(struct bios_parser *bp)
bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
break;
default:
- dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
break;
}
@@ -210,6 +209,7 @@ static enum bp_result encoder_control_fallback(
******************************************************************************
*****************************************************************************/
+
static enum bp_result transmitter_control_v1_6(
struct bios_parser *bp,
struct bp_transmitter_control *cntl);
@@ -238,7 +238,6 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_7;
break;
default:
- dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
break;
}
@@ -325,6 +324,21 @@ static void transmitter_control_dmcub_v1_7(
dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static struct dc_link *get_link_by_phy_id(struct dc *p_dc, uint32_t phy_id)
+{
+ struct dc_link *link = NULL;
+
+ // Get Transition Bitmask from dc_link structure associated with PHY
+ for (uint8_t link_id = 0; link_id < MAX_LINKS; link_id++) {
+ if (phy_id == p_dc->links[link_id]->link_enc->transmitter) {
+ link = p_dc->links[link_id];
+ break;
+ }
+ }
+
+ return link;
+}
+
static enum bp_result transmitter_control_v1_7(
struct bios_parser *bp,
struct bp_transmitter_control *cntl)
@@ -363,7 +377,38 @@ static enum bp_result transmitter_control_v1_7(
if (bp->base.ctx->dc->ctx->dmub_srv &&
bp->base.ctx->dc->debug.dmub_command_table) {
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params = {0};
+ struct dc_link *link = get_link_by_phy_id(bp->base.ctx->dc, dig_v1_7.phyid);
+ bool is_phy_transition_interlock_allowed = false;
+ uint8_t action = dig_v1_7.action;
+
+ if (link) {
+ if (link->phy_transition_bitmask &&
+ (action == TRANSMITTER_CONTROL_ENABLE || action == TRANSMITTER_CONTROL_DISABLE)) {
+ is_phy_transition_interlock_allowed = true;
+
+ // Prepare input parameters for processing ACPI retimers
+ process_phy_transition_init_params.action = action;
+ process_phy_transition_init_params.display_port_lanes_count = cntl->lanes_number;
+ process_phy_transition_init_params.phy_id = dig_v1_7.phyid;
+ process_phy_transition_init_params.signal = cntl->signal;
+ process_phy_transition_init_params.sym_clock_10khz = dig_v1_7.symclk_units.symclk_10khz;
+ process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate;
+ process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask;
+ }
+ dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction;
+ }
+
+ // Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock
+ if (is_phy_transition_interlock_allowed && action == TRANSMITTER_CONTROL_ENABLE)
+ dm_acpi_process_phy_transition_interlock(bp->base.ctx, process_phy_transition_init_params);
+
transmitter_control_dmcub_v1_7(bp->base.ctx->dmub_srv, &dig_v1_7);
+
+ // Handle POST_ON_TO_OFF: Process ACPI PHY Transition Interlock
+ if (is_phy_transition_interlock_allowed && action == TRANSMITTER_CONTROL_DISABLE)
+ dm_acpi_process_phy_transition_interlock(bp->base.ctx, process_phy_transition_init_params);
+
return BP_RESULT_OK;
}
@@ -408,8 +453,6 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
- dm_output_to_console("Don't have set_pixel_clock for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
break;
}
@@ -554,7 +597,6 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
- dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -671,8 +713,6 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
- dm_output_to_console("Don't have enable_crtc for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -864,8 +904,6 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
- dm_output_to_console("Don't have set_dce_clock for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
@@ -1046,3 +1084,4 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
init_enable_lvtma_control(bp);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index e317a3615147..91bc8a06e2cf 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -293,3 +293,107 @@ uint8_t dal_cmd_table_helper_encoder_id_to_atom(
return ENCODER_OBJECT_ID_NONE;
}
}
+
+uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
index dfd30aaf4032..547700e119a6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -59,4 +59,12 @@ uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
uint8_t dal_cmd_table_helper_encoder_id_to_atom(
enum encoder_id id);
+
+uint8_t phy_id_to_atom(enum transmitter t);
+
+uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id);
+
+bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 73458e295103..268e2414b34f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -82,13 +82,13 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
case DCN_VERSION_3_51:
+ case DCN_VERSION_3_6:
case DCN_VERSION_4_01:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
default:
- /* Unsupported DCE */
- BREAK_TO_DEBUGGER();
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
return false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
index 11bf247bb180..3099128223df 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -31,39 +31,6 @@
#include "../command_table_helper.h"
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -94,32 +61,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -207,51 +148,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
index 755b6e33140a..349f0e5d5856 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -29,40 +29,9 @@
#include "include/bios_parser_types.h"
-#include "../command_table_helper2.h"
-
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
+#include "../command_table_helper.h"
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
+#include "../command_table_helper2.h"
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
@@ -91,32 +60,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -209,51 +152,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
index 06b4f7fa4a50..1a5fefcde8af 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -31,39 +31,6 @@
#include "../command_table_helper.h"
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
@@ -91,32 +58,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -209,51 +150,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
index 710221b4f5c5..01ccc803040c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
@@ -58,51 +58,6 @@ static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
return atom_action;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static bool clock_source_id_to_atom(
enum clock_source_id id,
uint32_t *atom_pll_id)
@@ -149,32 +104,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -270,39 +199,6 @@ static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
return atom_dig_encoder_sel;
}
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t disp_power_gating_action_to_atom(
enum bp_pipe_control_action action)
{
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
index 8b30b558cf1f..2ec5264536c7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
@@ -58,51 +58,6 @@ static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
return atom_action;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static bool clock_source_id_to_atom(
enum clock_source_id id,
uint32_t *atom_pll_id)
@@ -149,32 +104,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -270,39 +199,6 @@ static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
return atom_dig_encoder_sel;
}
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t disp_power_gating_action_to_atom(
enum bp_pipe_control_action action)
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d9955c5d2e5e..60021671b386 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
###############################################################################
# DCN30
###############################################################################
-CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
+CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c3e58c730b1..15cf13ec5302 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -28,7 +28,7 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dc_state_priv.h"
-#include "link.h"
+#include "link_service.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
@@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt(
if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
- if (!stream->dpms_off || (stream_status && stream_status->plane_count))
+ if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count))
display_count++;
}
@@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return NULL;
}
dce60_clk_mgr_construct(ctx, clk_mgr);
- dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26feefbb8990..6131ede2db7a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } };
int dentist_get_divider_from_did(int did)
{
@@ -245,6 +245,11 @@ int dce_set_clock(
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */
+ if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 ||
+ clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4)
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0;
+
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
@@ -386,8 +391,6 @@ static void dce_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -400,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
@@ -462,6 +463,9 @@ void dce_clk_mgr_construct(
clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+
dce_clock_read_integrated_info(clk_mgr);
dce_clock_read_ss_info(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index f8409453434c..d50b9440210e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -120,9 +120,15 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg)
{
+ struct dc *dc = context->clk_mgr->ctx->dc;
int j;
int num_cfgs = 0;
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+ pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+ pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator;
+
for (j = 0; j < context->stream_count; j++) {
int k;
@@ -158,12 +164,29 @@ void dce110_fill_display_configs(
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
- cfg->sym_clock = stream->phy_pix_clk;
+ cfg->pixel_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
/ stream->timing.v_total;
+
+ /* Find first CRTC index and calculate its line time.
+ * This is necessary for DPM on SI GPUs.
+ */
+ if (cfg->pipe_idx < pp_display_cfg->crtc_index) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index = cfg->pipe_idx;
+ pp_display_cfg->line_time_in_us =
+ timing->h_total * 10000 / timing->pix_clk_100hz;
+ }
+ }
+
+ if (!num_cfgs) {
+ pp_display_cfg->crtc_index = 0;
+ pp_display_cfg->line_time_in_us = 0;
}
pp_display_cfg->display_count = num_cfgs;
@@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /* TODO: dce11.2*/
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
- pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
-
dce110_fill_display_configs(context, pp_display_cfg);
- /* TODO: is this still applicable?*/
- if (pp_display_cfg->display_count == 1) {
- const struct dc_crtc_timing *timing =
- &context->streams[0]->timing;
-
- pp_display_cfg->crtc_index =
- pp_display_cfg->disp_configs[0].pipe_idx;
- pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
- }
-
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
index 0267644717b2..69dd80d9f738 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- int dprefclk_wdivider;
- int dp_ref_clk_khz;
- int target_div;
+ struct dc_context *ctx = clk_mgr_base->ctx;
+ int dp_ref_clk_khz = 0;
- /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
-
- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+ if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev))
+ dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency;
+ else
+ dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
@@ -160,6 +147,8 @@ void dce60_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ struct clk_mgr *base = &clk_mgr->base;
+
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
@@ -170,5 +159,8 @@ void dce60_clk_mgr_construct(
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce60_funcs;
+
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 19897fa52e7e..d82a52319088 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -142,17 +142,3 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
return actual_dispclk_set_mhz * 1000;
}
-
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
index 083cb3158859..81d7c912549c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
@@ -27,6 +27,5 @@
#define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 23b390245b5d..5a633333dbb5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -164,20 +164,6 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
return actual_dispclk_set_mhz * 1000;
}
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
-
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index 1ce19d875358..f76fad87f0e1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -30,7 +30,6 @@ enum dcn_pwr_state;
int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
index fa09c594fd36..06da34676965 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
@@ -56,6 +56,7 @@
#define DALSMC_MSG_SetDisplayRefreshFromMall 0xF
#define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10
#define DALSMC_MSG_BacoAudioD3PME 0x11
-#define DALSMC_Message_Count 0x12
+#define DALSMC_MSG_SmartAccess 0x12
+#define DALSMC_Message_Count 0x13
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 8083a553c60e..ef77fcd164ed 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -30,6 +30,7 @@
#include "dce100/dce_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dml/dcn30/dcn30_fpu.h"
+#include "dcn30/dcn30m_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = {
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
- .is_smu_present = dcn3_is_smu_present
+ .is_smu_present = dcn3_is_smu_present,
+ .set_smartmux_switch = dcn30m_set_smartmux_switch
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index 3253115a153d..827bc2431d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
/* handle DALSMC_Result_CmdRejectedBusy? */
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
index 7ac87ef26aec..8e8a11c7437e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,19 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-#include <nvif/class.h>
+#include "clk_mgr_internal.h"
+#include "dcn30/dcn30m_clk_mgr.h"
+#include "dcn30m_clk_mgr_smu_msg.h"
-static const struct nvkm_engine_func
-ad102_ofa = {
- .sclass = {
- { -1, -1, NVC9FA_VIDEO_OFA },
- {}
- }
-};
-int
-ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set)
{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ad102_ofa, device, type, inst, pengine);
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- return -ENODEV;
+ return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
new file mode 100644
index 000000000000..757985b2eadc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30M_CLK_MGR_H__
+#define __DCN30M_CLK_MGR_H__
+
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set);
+
+#endif //__DCN30M_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
new file mode 100644
index 000000000000..0dd0583ff21e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn30m_clk_mgr_smu_msg.h"
+
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+#include "dm_helpers.h"
+
+#include "dalsmc.h"
+
+#define mmDAL_MSG_REG 0x1628A
+#define mmDAL_ARG_REG 0x16273
+#define mmDAL_RESP_REG 0x16274
+
+#define REG(reg_name) \
+ mm ## reg_name
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ CTX->logger
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr,
+ unsigned int delay_us, unsigned int max_retries)
+{
+ uint32_t reg = 0;
+
+ do {
+ reg = REG_READ(DAL_RESP_REG);
+ if (reg)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ } while (max_retries--);
+
+ /* handle DALSMC_Result_CmdRejectedBusy? */
+
+ /* Log? */
+
+ return reg;
+}
+
+static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
+{
+ uint32_t result;
+ /* Wait for response register to be ready */
+ dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ /* Clear response register */
+ REG_WRITE(DAL_RESP_REG, 0);
+
+ /* Set the parameter register for the SMU message */
+ REG_WRITE(DAL_ARG_REG, param_in);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(DAL_MSG_REG, msg_id);
+
+ result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ if (IS_SMU_TIMEOUT(result))
+ dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000);
+
+ /* Wait for response */
+ if (result == DALSMC_Result_OK) {
+ if (param_out)
+ *param_out = REG_READ(DAL_ARG_REG);
+
+ return true;
+ }
+
+ return false;
+}
+
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set);
+
+ dcn30m_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SmartAccess, pins_to_set, &response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
index ef474f61a1b5..8a59a473fc5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,17 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-#include <nvif/class.h>
+#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
+#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
-static const struct nvkm_engine_func
-ga100_ofa = {
- .sclass = {
- { -1, -1, NVC6FA_VIDEO_OFA },
- {}
- }
-};
+#include "core_types.h"
-int
-ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga100_ofa, device, type, inst, pengine);
+struct clk_mgr_internal;
- return -ENODEV;
-}
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set);
+#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9e2ef0e724fc..7aee02d56292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -563,6 +563,7 @@ static void vg_clk_mgr_helper_populate_bw_params(
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
@@ -584,6 +585,15 @@ static void vg_clk_mgr_helper_populate_bw_params(
return;
}
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= VG_NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= VG_NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+ } else {
+ ASSERT(0);
+ }
+
bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
@@ -591,11 +601,17 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
}
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, VG_NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, VG_NUM_DPPCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index bc123f1884da..051052bd10c9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -47,7 +47,7 @@
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 91d872d6d392..db687a13174d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -48,7 +48,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn314_smu.h"
@@ -77,6 +77,7 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
+
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
@@ -87,8 +88,70 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK0_DFS_CNTL 0x0269
+#define regCLK1_CLK0_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DFS_CNTL 0x026c
+#define regCLK1_CLK1_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DFS_CNTL 0x026f
+#define regCLK1_CLK2_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DFS_CNTL 0x0272
+#define regCLK1_CLK3_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DFS_CNTL 0x0275
+#define regCLK1_CLK4_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DFS_CNTL 0x0278
+#define regCLK1_CLK5_DFS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_CURRENT_CNT 0x02fb
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x02fc
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x02fd
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x02fe
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x02ff
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0300
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
+
+#define regCLK1_CLK0_BYPASS_CNTL 0x028a
+#define regCLK1_CLK0_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_BYPASS_CNTL 0x0293
+#define regCLK1_CLK1_BYPASS_CNTL_BASE_IDX 0
#define regCLK1_CLK2_BYPASS_CNTL 0x029c
#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_BYPASS_CNTL 0x02a5
+#define regCLK1_CLK3_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_BYPASS_CNTL 0x02ae
+#define regCLK1_CLK4_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_BYPASS_CNTL 0x02b7
+#define regCLK1_CLK5_BYPASS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_DS_CNTL 0x0283
+#define regCLK1_CLK0_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DS_CNTL 0x028c
+#define regCLK1_CLK1_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DS_CNTL 0x0295
+#define regCLK1_CLK2_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DS_CNTL 0x029e
+#define regCLK1_CLK3_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DS_CNTL 0x02a7
+#define regCLK1_CLK4_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DS_CNTL 0x02b0
+#define regCLK1_CLK5_DS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_ALLOW_DS 0x0284
+#define regCLK1_CLK0_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK1_ALLOW_DS 0x028d
+#define regCLK1_CLK1_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK2_ALLOW_DS 0x0296
+#define regCLK1_CLK2_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK3_ALLOW_DS 0x029f
+#define regCLK1_CLK3_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK4_ALLOW_DS 0x02a8
+#define regCLK1_CLK4_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK5_ALLOW_DS 0x02b1
+#define regCLK1_CLK5_ALLOW_DS_BASE_IDX 0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
@@ -185,6 +248,8 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn314 *clk_mgr_dcn314 = TO_CLK_MGR_DCN314(clk_mgr_int);
+ struct clk_log_info log_info = {0};
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -200,6 +265,9 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
+ dcn314_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn314->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
}
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
@@ -218,6 +286,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
+ display_count = dcn314_get_active_display_cnt_wa(dc, context);
+
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -236,7 +306,6 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn314_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
@@ -293,11 +362,19 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn314_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
@@ -385,10 +462,65 @@ bool dcn314_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+
+static void dcn314_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
- return;
+
+ struct dcn35_clk_internal internal = {0};
+
+ dcn314_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
}
static struct clk_bw_params dcn314_bw_params = {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 002c28e80720..0577eb527bc3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -65,4 +65,9 @@ void dcn314_clk_mgr_construct(struct dc_context *ctx,
void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info);
+
+
#endif //__DCN314_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index a0fb4481d2f1..3a881451e9da 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -40,17 +40,51 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-
+#include "reg_helper.h"
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#include "link.h"
+#include "link_service.h"
+
+#define MAX_INSTANCE 7
+#define MAX_SEGMENT 8
+
+struct IP_BASE_INSTANCE {
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE {
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } };
+
+#define regCLK1_CLK0_CURRENT_CNT 0x0314
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x0315
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x0316
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x0317
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x0318
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0319
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define REG(reg_name) \
+ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
#define UNSUPPORTED_DCFCLK 10000000
#define MIN_DPP_DISP_CLK 100000
@@ -130,7 +164,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -194,8 +228,6 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
- if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
- new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -204,15 +236,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- /* No need to apply the w/a if we haven't taken over from bios yet */
- if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, context, true);
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
+ dcn315_disable_otg_wa(clk_mgr_base, context, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, context, false);
+ dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -243,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+}
+
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ struct dcn35_clk_internal internal = {0};
+
+ dcn315_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
return;
}
@@ -592,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn315_update_clocks,
- .init_clocks = dcn31_init_clocks,
+ .init_clocks = dcn315_init_clocks,
.enable_pme_wa = dcn315_enable_pme_wa,
.are_clock_states_equal = dcn31_are_clock_states_equal,
.notify_wm_ranges = dcn315_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+void dcn315_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int);
+ struct clk_log_info log_info = {0};
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+ dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
+}
+
void dcn315_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn315 *clk_mgr,
@@ -659,6 +743,7 @@ void dcn315_clk_mgr_construct(
/* Saved clocks configured at boot for debug purposes */
dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
+ clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000;
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
index ac36ddf5dd1a..642ae3d4a790 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
@@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+void dcn315_init_clocks(struct clk_mgr *clk_mgr);
void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
#endif //__DCN315_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 2d14346b680e..478b4d6a3544 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -49,12 +49,9 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+
+#define CTX clk_mgr->base.ctx
+#define IND_REG(offset) offset
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
@@ -67,9 +64,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define FN(reg_name, field) \
FD(reg_name##__##field)
-#define REG_NBIO(reg_name) \
- (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
-
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
@@ -77,6 +71,13 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define mmMP1_C2PMSG_3 0x3B1050C
+#define reg__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define reg__MP1_C2PMSG_3__SHIFT (0)
+
+
+#define data_reg_name__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define data_reg_name__MP1_C2PMSG_3__SHIFT (0)
+
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0
@@ -153,12 +154,10 @@ static int dcn315_smu_send_msg_with_param(
for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
/* Trigger the message transaction by writing the message ID */
- generic_write_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3, msg_id);
- read_back_data = generic_read_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3);
+ IX_REG_SET_SYNC(mmMP1_C2PMSG_3, 0,
+ MP1_C2PMSG_3, msg_id);
+ IX_REG_GET_SYNC(mmMP1_C2PMSG_3,
+ MP1_C2PMSG_3, &read_back_data);
if (read_back_data == msg_id)
break;
udelay(2);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index c3e50c3aaa60..1769b1f26e75 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -39,7 +39,7 @@
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7
@@ -140,7 +140,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -201,8 +201,6 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < 100000)
new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -211,11 +209,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 084994c650c4..7da7b41bd092 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -33,7 +33,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "dcn32_smu13_driver_if.h"
@@ -1047,11 +1047,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
+
clk_mgr_base->bw_params->max_memclk_mhz =
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index cf2d35363e8b..5d80fdf63ffc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -63,7 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
udelay(delay_us);
} while (max_retries--);
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+
return reg;
}
@@ -120,7 +121,7 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, *total_delay_us, clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
index 6a6ae618650b..4607eff07253 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
@@ -65,6 +65,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B04B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 2a74140d7ebf..dfd0c9505af0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -44,7 +44,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
#undef DC_LOGGER
@@ -89,7 +89,8 @@
#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
-#define mmCLK5_spll_field_8 0x1B04B
+#define mmCLK5_spll_field_8 0x1B24B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
@@ -116,6 +117,7 @@
#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+#define CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
#undef FN
@@ -201,34 +203,41 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
+ struct link_encoder *new_pipe_link_enc = new_pipe->link_res.dio_link_enc;
+ struct link_encoder *pipe_link_enc = pipe->link_res.dio_link_enc;
bool stream_changed_otg_dig_on = false;
+ bool has_active_hpo = false;
+
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
+
+ if (!dc->config.unify_link_enc_assignment) {
+ if (new_pipe->stream)
+ new_pipe_link_enc = new_pipe->stream->link_enc;
+ if (pipe->stream)
+ pipe_link_enc = pipe->stream->link_enc;
+ }
+
stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream &&
old_pipe->stream != new_pipe->stream &&
old_pipe->stream_res.tg == new_pipe->stream_res.tg &&
- new_pipe->stream->link_enc && !new_pipe->stream->dpms_off &&
- new_pipe->stream->link_enc->funcs->is_dig_enabled &&
- new_pipe->stream->link_enc->funcs->is_dig_enabled(
- new_pipe->stream->link_enc) &&
+ new_pipe_link_enc && !new_pipe->stream->dpms_off &&
+ new_pipe_link_enc->funcs->is_dig_enabled &&
+ new_pipe_link_enc->funcs->is_dig_enabled(
+ new_pipe_link_enc) &&
new_pipe->stream_res.stream_enc &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
- bool has_active_hpo = false;
-
if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) {
has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) &&
dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe);
- }
-
-
- if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
- (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
- !pipe->stream->link_enc) && !stream_changed_otg_dig_on)) {
-
+ }
+ if (!has_active_hpo && !stream_changed_otg_dig_on && pipe->stream &&
+ (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || !pipe_link_enc) &&
+ !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe)) {
/* This w/a should not trigger when we have a dig active */
if (disable) {
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
@@ -385,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
+ else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
+ new_clocks->ref_dtbclk_khz = 0;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
@@ -401,6 +412,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
dcn35_smu_set_dtbclk(clk_mgr, false);
+
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
@@ -418,11 +430,17 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
- dcn35_smu_set_dtbclk(clk_mgr, true);
- clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ int actual_dtbclk = 0;
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
- clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ dcn35_smu_set_dtbclk(clk_mgr, true);
+
+ actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ if (actual_dtbclk > 590000) {
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
}
/* check that we're not already in D0 */
@@ -460,14 +478,19 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
- if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz)
- new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz;
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+ dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
@@ -566,9 +589,118 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+static void dcn35_save_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr_dcn35 *clk_mgr)
{
+ struct dcn35_clk_internal internal = {0};
+ char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
+
+ dcn35_save_clk_registers_internal(&internal, &clk_mgr->base.base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
+ if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
+
+ DC_LOG_SMU("dcfclk,%d,%d,%d,%s\n",
+ regs_and_bypass->dcfclk,
+ regs_and_bypass->dcf_deep_sleep_divider,
+ regs_and_bypass->dcf_deep_sleep_allow,
+ bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
+
+ DC_LOG_SMU("dprefclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dprefclk,
+ bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
+
+ DC_LOG_SMU("dispclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dispclk,
+ bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
+
+ // REGISTER VALUES
+ DC_LOG_SMU("reg_name,value,clk_type");
+
+ DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk",
+ internal.CLK1_CLK3_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk",
+ internal.CLK1_CLK4_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider",
+ internal.CLK1_CLK3_DS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow",
+ internal.CLK1_CLK3_ALLOW_DS);
+
+ DC_LOG_SMU("CLK1_CLK2_CURRENT_CNT,%d,dprefclk",
+ internal.CLK1_CLK2_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK0_CURRENT_CNT,%d,dispclk",
+ internal.CLK1_CLK0_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk",
+ internal.CLK1_CLK1_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass",
+ internal.CLK1_CLK3_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass",
+ internal.CLK1_CLK2_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass",
+ internal.CLK1_CLK0_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass",
+ internal.CLK1_CLK1_BYPASS_CNTL);
+
+ }
}
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
@@ -577,19 +709,21 @@ static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
uint32_t ssc_enable;
- ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ if (clk_mgr_base->ctx->dce_version == DCN_VERSION_3_51) {
+ ssc_enable = REG_READ(CLK6_spll_field_8) & CLK6_spll_field_8__spll_ssc_en_MASK;
+ } else {
+ ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ }
return ssc_enable != 0;
}
static void init_clk_states(struct clk_mgr *clk_mgr)
{
- struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
- clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
@@ -600,6 +734,8 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr_int);
+
init_clk_states(clk_mgr);
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
@@ -609,6 +745,13 @@ void dcn35_init_clocks(struct clk_mgr *clk_mgr)
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+ dcn35_save_clk_registers(&clk_mgr->boot_snapshot, clk_mgr_dcn35);
+
+ clk_mgr->clks.ref_dtbclk_khz = clk_mgr->boot_snapshot.dtbclk * 10;
+ if (clk_mgr->boot_snapshot.dtbclk > 59000) {
+ /*dtbclk enabled based on */
+ clk_mgr->clks.dtbclk_en = true;
+ }
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -1152,6 +1295,35 @@ static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr,
dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
}
+static unsigned int dcn35_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn35_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
@@ -1163,6 +1335,7 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
+ .get_max_clock_khz = dcn35_get_max_clock_khz,
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
@@ -1243,7 +1416,7 @@ void dcn35_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
- if (ctx->dce_version == DCN_VERSION_3_5) {
+ if (ctx->dce_version != DCN_VERSION_3_51) {
clk_mgr->base.regs = &clk_mgr_regs_dcn35;
clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35;
clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35;
@@ -1299,7 +1472,7 @@ void dcn35_clk_mgr_construct(
dcn35_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
+ dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
@@ -1395,7 +1568,7 @@ void dcn35_clk_mgr_construct(
/* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */
if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE &&
- ctx->dce_version == DCN_VERSION_3_5 &&
+ ctx->dce_version != DCN_VERSION_3_51 &&
((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00))
ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index f6f0e6a33001..604d256cb47a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -84,8 +84,8 @@
#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
#define VBIOSSMC_MSG_SetDtbClk 0x17
-#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU
-#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU
+#define VBIOSSMC_MSG_DispIPS2Entry 0x18 ///< Display IPS2 entry, DMU
+#define VBIOSSMC_MSG_DispIPS2Exit 0x19 ///< Display IPS2 exit, DMU
#define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS
#define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
#define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported
@@ -475,7 +475,7 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
- VBIOSSMC_MSG_DispPsrExit,
+ VBIOSSMC_MSG_DispIPS2Exit,
0);
smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
return retv;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
index dbfdd3487da5..2e0d34fd7512 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
@@ -43,7 +43,9 @@
#define DALSMC_MSG_ActiveUclkFclk 0x18
#define DALSMC_MSG_IdleUclkFclk 0x19
#define DALSMC_MSG_SetUclkPstateAllow 0x1A
-#define DALSMC_Message_Count 0x1B
+#define DALSMC_MSG_SubvpUclkFclk 0x1B
+#define DALSMC_MSG_GetNumUmcChannels 0x1C
+#define DALSMC_Message_Count 0x1D
typedef enum {
FCLK_SWITCH_DISALLOW,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 5b4e1e8a9ae2..306016c1f109 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -13,7 +13,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
@@ -22,7 +22,7 @@
#include "dcn/dcn_4_1_0_offset.h"
#include "dcn/dcn_4_1_0_sh_mask.h"
-#include "dml/dcn401/dcn401_fpu.h"
+#define DCN_BASE__INST0_SEG1 0x000000C0
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37
#define mmCLK01_CLK0_CLK0_DFS_CNTL 0x16E69
@@ -162,7 +162,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
unsigned int i;
char *entry_i = (char *)entry_0;
- uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
+ uint32_t ret = dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
if (ret & (1 << 31))
/* fine-grained, only min and max */
@@ -174,50 +174,43 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
/* if the initial message failed, num_levels will be 0 */
for (i = 0; i < *num_levels && i < ARRAY_SIZE(clk_mgr->base.bw_params->clk_table.entries); i++) {
- *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
+ *((unsigned int *)entry_i) = (dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
}
}
static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr)
{
- /* legacy */
- DC_FP_START();
- dcn401_build_wm_range_table_fpu(clk_mgr);
- DC_FP_END();
-
- if (clk_mgr->ctx->dc->debug.using_dml21) {
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
-
- /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
- } else {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
- }
-
- /* Set 1B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
+ /* For min clocks use as reported by PM FW and report those as min */
+ uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
+ uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
+
+ /* Set A - Normal - default values */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
+
+ /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
+ /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
+ if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
+ } else {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
}
+
+ /* Set 1B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
}
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
@@ -238,20 +231,20 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr->smu_present = false;
clk_mgr->dpm_present = false;
- if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
+ if (!clk_mgr_base->force_smu_not_present && dcn401_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
if (!clk_mgr->smu_present)
return;
- dcn30_smu_check_driver_if_version(clk_mgr);
- dcn30_smu_check_msg_header_version(clk_mgr);
+ dcn401_smu_check_driver_if_version(clk_mgr);
+ dcn401_smu_check_msg_header_version(clk_mgr);
/* DCFCLK */
dcn401_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_entries_per_clk->num_dcfclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
if (num_entries_per_clk->num_dcfclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dcfclk_levels - 1].dcfclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = 0;
@@ -260,7 +253,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_entries_per_clk->num_socclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
if (num_entries_per_clk->num_socclk_levels && clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_socclk_levels - 1].socclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = 0;
@@ -270,7 +263,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_entries_per_clk->num_dtbclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
if (num_entries_per_clk->num_dtbclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dtbclk_levels - 1].dtbclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = 0;
@@ -280,7 +273,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_entries_per_clk->num_dispclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
if (num_entries_per_clk->num_dispclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dispclk_levels - 1].dispclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 0;
@@ -318,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_build_wm_range_table(clk_mgr_base);
}
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->smu_present && clk_mgr->dpm_present &&
+ ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz));
+}
+
static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
@@ -628,207 +640,6 @@ static void dcn401_update_clocks_update_dentist(
}
-static void dcn401_update_clocks_legacy(struct clk_mgr *clk_mgr_base,
- struct dc_state *context,
- bool safe_to_lower)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
- struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
- bool update_dppclk = false;
- bool update_dispclk = false;
- bool enter_display_off = false;
- bool dpp_clock_lowered = false;
- struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- bool force_reset = false;
- bool update_uclk = false, update_fclk = false;
- bool p_state_change_support;
- bool fclk_p_state_change_support;
- int total_plane_count;
-
- if (dc->work_arounds.skip_clock_update)
- return;
-
- if (clk_mgr_base->clks.dispclk_khz == 0 ||
- (dc->debug.force_clock_mode & 0x1)) {
- /* This is from resume or boot up, if forced_clock cfg option used,
- * we bypass program dispclk and DPPCLK, but need set them for S3.
- */
- force_reset = true;
-
- dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
-
- /* Force_clock_mode 0x1: force reset the clock even it is the same clock
- * as long as it is in Passive level.
- */
- }
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
-
- if (display_count == 0)
- enter_display_off = true;
-
- if (clk_mgr->smu_present) {
- if (enter_display_off == safe_to_lower)
- dcn401_smu_set_num_of_displays(clk_mgr, display_count);
-
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
-
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
-
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
-
- /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW */
- if (clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn401_smu_send_fclk_pstate_message(clk_mgr, true);
- }
- }
-
- if (dc->debug.force_min_dcfclk_mhz > 0)
- new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
- new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
- clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
- clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
- dcn401_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
- /* We don't actually care about socclk, don't notify SMU of hard min */
- clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
-
- clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
-
- if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
- clk_mgr_base->clks.num_ways < new_clocks->num_ways) {
- clk_mgr_base->clks.num_ways = new_clocks->num_ways;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
- }
-
-
- p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.prev_p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = p_state_change_support;
- clk_mgr_base->clks.fw_based_mclk_switching = p_state_change_support && new_clocks->fw_based_mclk_switching;
-
- /* to disable P-State switching, set UCLK min = max */
- if (!clk_mgr_base->clks.p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
- }
-
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
- if (!clk_mgr_base->clks.fclk_p_state_change_support &&
- update_fclk &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_FCLK)) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn401_smu_send_fclk_pstate_message(clk_mgr, false);
- }
-
- /* Always update saved value, even if new value not set due to P-State switching unsupported */
- if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
- clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
- update_uclk = true;
- }
-
- /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
- if (clk_mgr_base->clks.p_state_change_support &&
- (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
-
- if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
- clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
- clk_mgr_base->clks.num_ways = new_clocks->num_ways;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
- }
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
- if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
- dpp_clock_lowered = true;
-
- clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
- clk_mgr_base->clks.actual_dppclk_khz = new_clocks->dppclk_khz;
-
- if (clk_mgr->smu_present && !dpp_clock_lowered && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK))
- clk_mgr_base->clks.actual_dppclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DPPCLK, clk_mgr_base->clks.dppclk_khz);
- update_dppclk = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
-
- if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK))
- clk_mgr_base->clks.actual_dispclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DISPCLK, clk_mgr_base->clks.dispclk_khz);
-
- update_dispclk = true;
- }
-
- if (!new_clocks->dtbclk_en && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) {
- new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
- }
-
- /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
- if (!dc->debug.disable_dtb_ref_clk_switch &&
- should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) {
- /* DCCG requires KHz precision for DTBCLK */
- clk_mgr_base->clks.ref_dtbclk_khz =
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
-
- dcn401_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
- }
-
- if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
- if (dpp_clock_lowered) {
- /* if clock is being lowered, increase DTO before lowering refclk */
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context,
- safe_to_lower, clk_mgr_base->clks.dppclk_khz);
- dcn401_update_clocks_update_dentist(clk_mgr, context);
- if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK)) {
- clk_mgr_base->clks.actual_dppclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DPPCLK,
- clk_mgr_base->clks.dppclk_khz);
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower,
- clk_mgr_base->clks.actual_dppclk_khz);
- }
-
- } else {
- /* if clock is being raised, increase refclk before lowering DTO */
- if (update_dppclk || update_dispclk)
- dcn401_update_clocks_update_dentist(clk_mgr, context);
- /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
- * that we do not lower dto when it is not safe to lower. We do not need to
- * compare the current and new dppclk before calling this function.
- */
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context,
- safe_to_lower, clk_mgr_base->clks.actual_dppclk_khz);
- }
- }
-
- if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
- /*update dmcu for wait_loop count*/
- dmcu->funcs->set_psr_wait_loop(dmcu,
- clk_mgr_base->clks.dispclk_khz / 1000 / 7);
-}
-
static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned int num_steps)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -1008,15 +819,15 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
update_active_fclk = true;
update_idle_fclk = true;
- /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW */
- if (clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
- block_sequence[num_steps].params.update_pstate_support_params.support = true;
- block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
- num_steps++;
- }
- }
+ /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW (message not supported on DCN401)*/
+ // if (clk_mgr_base->clks.fclk_p_state_change_support) {
+ // /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ // if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ // block_sequence[num_steps].params.update_pstate_support_params.support = true;
+ // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
+ // num_steps++;
+ // }
+ // }
}
if (!clk_mgr_base->clks.fclk_p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
@@ -1224,14 +1035,14 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
// (*num_steps)++;
// }
- /* disable FCLK P-State support if needed */
- if (!fclk_p_state_change_support &&
- should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
- block_sequence[num_steps].params.update_pstate_support_params.support = false;
- block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
- num_steps++;
- }
+ /* disable FCLK P-State support if needed (message not supported on DCN401)*/
+ // if (!fclk_p_state_change_support &&
+ // should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) &&
+ // dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ // block_sequence[num_steps].params.update_pstate_support_params.support = false;
+ // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
+ // num_steps++;
+ // }
}
if (new_clocks->fw_based_mclk_switching != clk_mgr_base->clks.fw_based_mclk_switching &&
@@ -1412,11 +1223,6 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
unsigned int num_steps = 0;
- if (dc->debug.enable_legacy_clock_update) {
- dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower);
- return;
- }
-
/* build bandwidth related clocks update sequence */
num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base,
context,
@@ -1512,8 +1318,8 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
table->Watermarks.WatermarkRow[i].WmSetting = i;
table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
}
- dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
- dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
+ dcn401_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
+ dcn401_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
dcn401_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
@@ -1551,6 +1357,20 @@ static void dcn401_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool curren
dcn401_execute_block_sequence(clk_mgr_base, num_steps);
}
+static int dcn401_get_hard_min_memclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+}
+
+static int dcn401_get_hard_min_fclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz;
+}
+
/* Get current memclk states, update bounding box */
static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
@@ -1570,7 +1390,7 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
}
- clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
if (num_entries_per_clk->num_memclk_levels && clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = 0;
@@ -1579,22 +1399,27 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_FCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
&num_entries_per_clk->num_fclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
if (num_entries_per_clk->num_fclk_levels && clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_fclk_levels - 1].fclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = 0;
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
if (clk_mgr->dpm_present && !num_levels)
clk_mgr->dpm_present = false;
+ clk_mgr_base->bw_params->num_channels = dcn401_smu_get_num_of_umc_channels(clk_mgr);
+ if (clk_mgr_base->ctx->dc_bios) {
+ /* use BIOS values if none provided by PMFW */
+ if (clk_mgr_base->bw_params->num_channels == 0) {
+ clk_mgr_base->bw_params->num_channels = clk_mgr_base->ctx->dc_bios->vram_info.num_chans;
+ }
+ clk_mgr_base->bw_params->dram_channel_width_bytes = clk_mgr_base->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ }
+
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
@@ -1671,6 +1496,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn401_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz,
@@ -1684,6 +1538,10 @@ static struct clk_mgr_funcs dcn401_funcs = {
.enable_pme_wa = dcn401_enable_pme_wa,
.is_smu_present = dcn401_is_smu_present,
.get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
+ .get_hard_min_memclk = dcn401_get_hard_min_memclk,
+ .get_hard_min_fclk = dcn401_get_hard_min_fclk,
+ .is_dc_mode_present = dcn401_is_dc_mode_present,
+ .get_max_clock_khz = dcn401_get_max_clock_khz,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
@@ -1744,7 +1602,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
- kfree(clk_mgr);
+ kfree(clk_mgr401);
return NULL;
}
@@ -1755,6 +1613,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
if (!clk_mgr->wm_range_table) {
BREAK_TO_DEBUGGER();
kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr401);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
index 6c9ae5ca2c7e..97a1ce1e8a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
@@ -105,10 +105,13 @@ struct dcn401_clk_mgr {
};
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base);
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base);
struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx,
struct dccg *dccg);
void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
+
#endif /* __DCN401_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
index b02a41179b41..3a263840893e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
@@ -25,6 +25,9 @@
#ifndef DALSMC_MSG_SubvpUclkFclk
#define DALSMC_MSG_SubvpUclkFclk 0x1B
#endif
+#ifndef DALSMC_MSG_GetNumUmcChannels
+#define DALSMC_MSG_GetNumUmcChannels 0x1C
+#endif
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
@@ -54,6 +57,8 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
/* Wait for response register to be ready */
dcn401_smu_wait_for_response(clk_mgr, 10, 200000);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -68,9 +73,11 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
return false;
}
@@ -99,8 +106,6 @@ static uint32_t dcn401_smu_wait_for_response_delay(struct clk_mgr_internal *clk_
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
-
return reg;
}
@@ -112,6 +117,8 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Wait for response register to be ready */
dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay1_us);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -121,18 +128,71 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
- TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
-
/* Wait for response */
if (dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
*total_delay_us = delay1_us + delay2_us;
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
*total_delay_us = delay1_us + 2000000;
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
+ return false;
+}
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version)
+{
+ smu_print("SMU Get SMU version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetSmuVersion, 0, version)) {
+
+ smu_print("SMU version: %d\n", *version);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check driver if version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDriverIfVersion, 0, &response)) {
+
+ smu_print("SMU driver if version: %d\n", response);
+
+ if (response == SMU14_DRIVER_IF_VERSION)
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match DALSMC_VERSION in dalsmc.h */
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check msg header version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetMsgHeaderVersion, 0, &response)) {
+
+ smu_print("SMU msg header version: %d\n", response);
+
+ if (response == DALSMC_VERSION)
+ return true;
+ }
+
return false;
}
@@ -160,6 +220,22 @@ void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsi
smu_print("Numways for SubVP : %d\n", num_ways);
}
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
+{
+ smu_print("SMU Set DRAM addr high: %d\n", addr_high);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL);
+}
+
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
+{
+ smu_print("SMU Set DRAM addr low: %d\n", addr_low);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL);
+}
+
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table DRAM 2 SMU\n");
@@ -334,3 +410,63 @@ void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t n
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_NumOfDisplays, num_displays, NULL);
}
+
+unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr)
+{
+ unsigned int response = 0;
+
+ dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_GetNumUmcChannels, 0, &response);
+
+ smu_print("SMU Get Num UMC Channels: num_umc_channels = %d\n", response);
+
+ return response;
+}
+
+/*
+ * Frequency in MHz returned in lower 16 bits for valid DPM level
+ *
+ * Call with dpm_level = 0xFF to query features, return value will be:
+ * Bits 7:0 - number of DPM levels
+ * Bit 28 - 1 = auto DPM on
+ * Bit 29 - 1 = sweep DPM on
+ * Bit 30 - 1 = forced DPM on
+ * Bit 31 - 0 = discrete, 1 = fine-grained
+ *
+ * With fine-grained DPM, only min and max frequencies will be reported
+ *
+ * Returns 0 on failure
+ */
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 8 bits for DPM level */
+ uint32_t param = (clk << 16) | dpm_level;
+
+ smu_print("SMU Get dpm freq by index: clk = %d, dpm_level = %d\n", clk, dpm_level);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDpmFreqByIndex, param, &response);
+
+ smu_print("SMU dpm freq: %d MHz\n", response);
+
+ return response;
+}
+
+/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type */
+ uint32_t param = clk << 16;
+
+ smu_print("SMU Get DC mode max DPM freq: clk = %d\n", clk);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response);
+
+ smu_print("SMU DC mode max DMP freq: %d MHz\n", response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
index 42cf7885a7cb..4f5ac603e822 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
@@ -7,11 +7,17 @@
#include "os_types.h"
#include "core_types.h"
-#include "dcn32/dcn32_clk_mgr_smu_msg.h"
+struct clk_mgr_internal;
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version);
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr);
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_uclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
@@ -28,5 +34,8 @@ bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
uint16_t fclk_freq_mhz);
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
+unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk);
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index dfa36368ae63..8be9cbd43e18 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -36,7 +36,9 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
+#include "dc_plane.h"
#include "dc_plane_priv.h"
+#include "dc_stream_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -58,7 +60,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -81,7 +83,8 @@
#include "hw_sequencer_private.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_internal_types.h"
+#include "soc_and_ip_translator.h"
#endif
#include "dce/dmub_outbox.h"
@@ -145,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build";
/* Private functions */
-static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+static inline void elevate_update_type(
+ struct surface_update_descriptor *descriptor,
+ enum surface_update_type new_type,
+ enum dc_lock_descriptor new_locks
+)
{
- if (new > *original)
- *original = new;
+ if (new_type > descriptor->update_type)
+ descriptor->update_type = new_type;
+
+ descriptor->lock_descriptor |= new_locks;
}
static void destroy_links(struct dc *dc)
@@ -215,11 +224,24 @@ static bool create_links(
connectors_num,
num_virtual_links);
- // condition loop on link_count to allow skipping invalid indices
+ /* When getting the number of connectors, the VBIOS reports the number of valid indices,
+ * but it doesn't say which indices are valid, and not every index has an actual connector.
+ * So, if we don't find a connector on an index, that is not an error.
+ *
+ * - There is no guarantee that the first N indices will be valid
+ * - VBIOS may report a higher amount of valid indices than there are actual connectors
+ * - Some VBIOS have valid configurations for more connectors than there actually are
+ * on the card. This may be because the manufacturer used the same VBIOS for different
+ * variants of the same card.
+ */
for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
+ struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
struct link_init_data link_init_params = {0};
struct dc_link *link;
+ if (connector_id.id == CONNECTOR_ID_UNKNOWN)
+ continue;
+
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
link_init_params.ctx = dc->ctx;
@@ -239,6 +261,7 @@ static bool create_links(
DC_LOG_DC("BIOS object table - end");
/* Create a link for each usb4 dpia port */
+ dc->lowest_dpia_link_index = MAX_LINKS;
for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
struct link_init_data link_init_params = {0};
struct dc_link *link;
@@ -251,6 +274,9 @@ static bool create_links(
link = dc->link_srv->create_link(&link_init_params);
if (link) {
+ if (dc->lowest_dpia_link_index > dc->link_count)
+ dc->lowest_dpia_link_index = dc->link_count;
+
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
@@ -276,6 +302,8 @@ static bool create_links(
link->link_id.type = OBJECT_TYPE_CONNECTOR;
link->link_id.id = CONNECTOR_ID_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) {
@@ -438,9 +466,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
- if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required || dc->wm_optimized_required)
+ if (dc->ctx->dce_version > DCE_VERSION_MAX) {
+ if (dc->optimized_required &&
+ (stream->adjust.v_total_max != adjust->v_total_max ||
+ stream->adjust.v_total_min != adjust->v_total_min)) {
+ stream->adjust.timing_adjust_pending = true;
return false;
+ }
+ }
dc_exit_ips_for_hw_access(dc);
@@ -452,6 +485,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (dc->caps.max_v_total != 0 &&
(adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ stream->adjust.timing_adjust_pending = false;
if (adjust->allow_otg_v_count_halt)
return set_long_vtotal(dc, stream, adjust);
else
@@ -465,10 +499,15 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
dc->hwss.set_drr(&pipe,
1,
*adjust);
+ stream->adjust.timing_adjust_pending = false;
+
+ if (dc->hwss.notify_cursor_offload_drr_update)
+ dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream);
return true;
}
}
+
return false;
}
@@ -515,33 +554,6 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
return status;
}
-bool dc_stream_get_crtc_position(struct dc *dc,
- struct dc_stream_state **streams, int num_streams,
- unsigned int *v_pos, unsigned int *nom_v_pos)
-{
- /* TODO: Support multiple streams */
- const struct dc_stream_state *stream = streams[0];
- int i;
- bool ret = false;
- struct crtc_position position;
-
- dc_exit_ips_for_hw_access(dc);
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe =
- &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (pipe->stream == stream && pipe->stream_res.stream_enc) {
- dc->hwss.get_position(&pipe, 1, &position);
-
- *v_pos = position.vertical_count;
- *nom_v_pos = position.nominal_vcount;
- ret = true;
- }
- }
- return ret;
-}
-
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
static inline void
dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
@@ -615,6 +627,68 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
return true;
}
+
+static void
+dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv,
+ struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop)
+{
+ int i;
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num;
+ cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num;
+
+ cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+
+ if (stop) {
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE;
+ } else {
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable;
+ }
+ }
+
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+bool
+dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
+ struct crc_window *window, uint8_t phy_id, bool stop)
+{
+ struct dc_dmub_srv *dmub_srv;
+ struct otg_phy_mux mux_mapping;
+ struct pipe_ctx *pipe;
+ int i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
+ break;
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
+
+ mux_mapping.phy_output_num = phy_id;
+ mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
+
+ dmub_srv = dc->ctx->dmub_srv;
+
+ /* forward to dmub only. no dmcu support*/
+ if (dmub_srv)
+ dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, &mux_mapping, stop);
+ else
+ return false;
+
+ return true;
+}
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/**
@@ -625,15 +699,17 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
* @enable: Enable CRC if true, disable otherwise.
* @continuous: Capture CRC on every frame if true. Otherwise, only capture
* once.
+ * @idx: Capture CRC on which CRC engine instance
+ * @reset: Reset CRC engine before the configuration
*
- * By default, only CRC0 is configured, and the entire frame is used to
- * calculate the CRC.
+ * By default, the entire frame is used to calculate the CRC.
*
* Return: %false if the stream is not found or CRC capture is not supported;
* %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
- struct crc_params *crc_window, bool enable, bool continuous)
+ struct crc_params *crc_window, bool enable, bool continuous,
+ uint8_t idx, bool reset)
{
struct pipe_ctx *pipe;
struct crc_params param;
@@ -677,6 +753,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
param.continuous_mode = continuous;
param.enable = enable;
+ param.crc_eng_inst = idx;
+ param.reset = reset;
+
tg = pipe->stream_res.tg;
/* Only call if supported */
@@ -691,6 +770,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
*
* @dc: DC object.
* @stream: The DC stream state of the stream to get CRCs from.
+ * @idx: index of crc engine to get CRC from
* @r_cr: CRC value for the red component.
* @g_y: CRC value for the green component.
* @b_cb: CRC value for the blue component.
@@ -700,7 +780,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* Return:
* %false if stream is not found, or if CRCs are not enabled.
*/
-bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
+bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
int i;
@@ -721,7 +801,7 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
tg = pipe->stream_res.tg;
if (tg->funcs->get_crc)
- return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
+ return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb);
DC_LOG_WARNING("CRC capture not supported.");
return false;
}
@@ -863,7 +943,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
static void dc_destruct(struct dc *dc)
{
// reset link encoder assignment table on destruct
- if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
+ if (dc->res_pool && dc->res_pool->funcs->link_encs_assign &&
+ !dc->config.unify_link_enc_assignment)
link_enc_cfg_init(dc, dc->current_state);
if (dc->current_state) {
@@ -881,21 +962,24 @@ static void dc_destruct(struct dc *dc)
}
dc_destroy_resource_pool(dc);
-
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator);
+#endif
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
- if (dc->ctx->gpio_service)
- dal_gpio_service_destroy(&dc->ctx->gpio_service);
-
- if (dc->ctx->created_bios)
- dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ if (dc->ctx) {
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
- kfree(dc->ctx->logger);
- dc_perf_trace_destroy(&dc->ctx->perf_trace);
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ kfree(dc->ctx->logger);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
- kfree(dc->ctx);
- dc->ctx = NULL;
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+ }
kfree(dc->bw_vbios);
dc->bw_vbios = NULL;
@@ -923,6 +1007,8 @@ static bool dc_construct_ctx(struct dc *dc,
if (!dc_ctx)
return false;
+ dc_stream_init_rmcm_3dlut(dc);
+
dc_ctx->cgs_device = init_params->cgs_device;
dc_ctx->driver_context = init_params->driver;
dc_ctx->dc = dc;
@@ -1069,8 +1155,8 @@ static bool dc_construct(struct dc *dc,
/* set i2c speed if not done by the respective dcnxxx__resource.c */
if (dc->caps.i2c_speed_in_khz_hdcp == 0)
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
- if (dc->caps.max_optimizable_video_width == 0)
- dc->caps.max_optimizable_video_width = 5120;
+ if (dc->check_config.max_optimizable_video_width == 0)
+ dc->check_config.max_optimizable_video_width = 5120;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
@@ -1082,6 +1168,9 @@ static bool dc_construct(struct dc *dc,
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
DC_FP_END();
}
+ dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version);
+ if (!dc->soc_and_ip_translator)
+ goto fail;
#endif
if (!create_links(dc, init_params->num_virtual_links))
@@ -1148,6 +1237,12 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) {
+ memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color,
+ sizeof(pipe_ctx->visual_confirm_color));
+ return;
+ }
+
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1159,6 +1254,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR)
get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_DCC)
+ get_dcc_visual_confirm_color(dc, pipe_ctx, &(pipe_ctx->visual_confirm_color));
else {
if (dc->ctx->dce_version < DCN_VERSION_2_0)
color_space_to_black_color(
@@ -1173,6 +1270,53 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2)
get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC)
+ get_vabc_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ }
+ }
+}
+
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color)
+{
+ struct dc_stream_status *stream_status = dc_stream_get_status(stream_state);
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ struct dc_plane_state *plane_state = NULL;
+
+ if (!stream_status)
+ return;
+
+ switch (dc->debug.visual_confirm) {
+ case VISUAL_CONFIRM_DISABLE:
+ return;
+ case VISUAL_CONFIRM_PSR:
+ case VISUAL_CONFIRM_FAMS:
+ pipe_ctx = dc_stream_get_pipe_ctx(stream_state);
+ if (!pipe_ctx)
+ return;
+ dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx);
+ memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color));
+ return;
+
+ default:
+ /* find plane with highest layer_index */
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i]->visible)
+ plane_state = stream_status->plane_states[i];
+ }
+ if (!plane_state)
+ return;
+ /* find pipe that contains plane with highest layer index */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state == plane_state) {
+ memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color));
+ return;
+ }
}
}
}
@@ -1650,17 +1794,23 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
- if (dc->debug.force_odm_combine)
+ if (dc->debug.force_odm_combine) {
+ DC_LOG_DEBUG("boot timing validation failed due to force_odm_combine\n");
return false;
+ }
/* Check for enabled DIG to identify enabled display */
- if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ DC_LOG_DEBUG("boot timing validation failed due to disabled DIG\n");
return false;
+ }
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- if (enc_inst == ENGINE_ID_UNKNOWN)
+ if (enc_inst == ENGINE_ID_UNKNOWN) {
+ DC_LOG_DEBUG("boot timing validation failed due to unknown DIG engine ID\n");
return false;
+ }
for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
if (dc->res_pool->stream_enc[i]->id == enc_inst) {
@@ -1674,62 +1824,98 @@ bool dc_validate_boot_timing(const struct dc *dc,
}
// tg_inst not found
- if (i == dc->res_pool->stream_enc_count)
+ if (i == dc->res_pool->stream_enc_count) {
+ DC_LOG_DEBUG("boot timing validation failed due to timing generator instance not found\n");
return false;
+ }
- if (tg_inst >= dc->res_pool->timing_generator_count)
+ if (tg_inst >= dc->res_pool->timing_generator_count) {
+ DC_LOG_DEBUG("boot timing validation failed due to invalid timing generator count\n");
return false;
+ }
- if (tg_inst != link->link_enc->preferred_engine)
+ if (tg_inst != link->link_enc->preferred_engine) {
+ DC_LOG_DEBUG("boot timing validation failed due to non-preferred timing generator\n");
return false;
+ }
tg = dc->res_pool->timing_generators[tg_inst];
- if (!tg->funcs->get_hw_timing)
+ if (!tg->funcs->get_hw_timing) {
+ DC_LOG_DEBUG("boot timing validation failed due to missing get_hw_timing callback\n");
return false;
+ }
- if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) {
+ DC_LOG_DEBUG("boot timing validation failed due to failed get_hw_timing return\n");
return false;
+ }
- if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ if (crtc_timing->h_total != hw_crtc_timing.h_total) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_total mismatch\n");
return false;
+ }
- if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_border_left mismatch\n");
return false;
+ }
- if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_addressable mismatch\n");
return false;
+ }
- if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_border_right mismatch\n");
return false;
+ }
- if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_front_porch mismatch\n");
return false;
+ }
- if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_sync_width mismatch\n");
return false;
+ }
- if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ if (crtc_timing->v_total != hw_crtc_timing.v_total) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_total mismatch\n");
return false;
+ }
- if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_border_top mismatch\n");
return false;
+ }
- if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_addressable mismatch\n");
return false;
+ }
- if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_border_bottom mismatch\n");
return false;
+ }
- if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_front_porch mismatch\n");
return false;
+ }
- if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_sync_width mismatch\n");
return false;
+ }
/* block DSC for now, as VBIOS does not currently support DSC timings */
- if (crtc_timing->flags.DSC)
+ if (crtc_timing->flags.DSC) {
+ DC_LOG_DEBUG("boot timing validation failed due to DSC\n");
return false;
+ }
if (dc_is_dp_signal(link->connector_signal)) {
unsigned int pix_clk_100hz = 0;
@@ -1751,39 +1937,55 @@ bool dc_validate_boot_timing(const struct dc *dc,
} else if (se && se->funcs->get_pixels_per_cycle) {
uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se);
- if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ DC_LOG_DEBUG("boot timing validation failed due to pixels_per_cycle\n");
return false;
+ }
pix_clk_100hz *= pixels_per_cycle;
}
// Note: In rare cases, HW pixclk may differ from crtc's pixclk
// slightly due to rounding issues in 10 kHz units.
- if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
+ if (crtc_timing->pix_clk_100hz != pix_clk_100hz) {
+ DC_LOG_DEBUG("boot timing validation failed due to pix_clk_100hz mismatch\n");
return false;
+ }
- if (!se || !se->funcs->dp_get_pixel_format)
+ if (!se || !se->funcs->dp_get_pixel_format) {
+ DC_LOG_DEBUG("boot timing validation failed due to missing dp_get_pixel_format\n");
return false;
+ }
if (!se->funcs->dp_get_pixel_format(
se,
&hw_crtc_timing.pixel_encoding,
- &hw_crtc_timing.display_color_depth))
+ &hw_crtc_timing.display_color_depth)) {
+ DC_LOG_DEBUG("boot timing validation failed due to dp_get_pixel_format failure\n");
return false;
+ }
- if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) {
+ DC_LOG_DEBUG("boot timing validation failed due to display_color_depth mismatch\n");
return false;
+ }
- if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) {
+ DC_LOG_DEBUG("boot timing validation failed due to pixel_encoding mismatch\n");
return false;
+ }
}
+
if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ DC_LOG_DEBUG("boot timing validation failed due to VSC SDP colorimetry\n");
return false;
}
- if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ DC_LOG_DEBUG("boot timing validation failed due to DP 128b/132b\n");
return false;
+ }
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
@@ -1945,6 +2147,26 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb)) {
disable_vbios_mode_if_required(dc, context);
dc->hwss.enable_accelerated_mode(dc, context);
+ } else if (get_seamless_boot_stream_count(dc->current_state) > 0) {
+ /* If the previous Stream still retains the apply seamless boot flag,
+ * it means the OS has not actually performed a flip yet.
+ * At this point, if we receive dc_commit_streams again, we should
+ * once more check whether the actual HW timing matches what the OS
+ * has provided
+ */
+ disable_vbios_mode_if_required(dc, context);
+ }
+
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ //Only delay otg master for a given config
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ //dc_commit_state_no_check is always a full update
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false);
+ break;
+ }
+ }
}
if (context->stream_count > get_seamless_boot_stream_count(context) ||
@@ -1956,8 +2178,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
*/
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
@@ -1986,8 +2208,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ for (i = 0; i < dc->current_state->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
+ for (i = 0; i < context->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true);
+
if (result != DC_OK) {
/* Application of dc_state to hardware stopped. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
@@ -2011,6 +2239,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe);
+ }
+ }
+
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
@@ -2019,8 +2255,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.commit_subvp_config(dc, context);
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -2063,7 +2299,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ if (get_seamless_boot_stream_count(context) == 0 ||
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
hwss_wait_for_no_pipes_pending(dc, context);
@@ -2152,11 +2388,15 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
for (i = 0; i < params->stream_count; i++) {
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
+ struct dc_sink *sink = stream->sink;
/* revalidate streams */
- res = dc_validate_stream(dc, stream);
- if (res != DC_OK)
- return res;
+ if (!dc_is_virtual_signal(sink->sink_signal)) {
+ res = dc_validate_stream(dc, stream);
+ if (res != DC_OK)
+ return res;
+ }
+
dc_stream_log(dc, stream);
@@ -2191,12 +2431,12 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
context->power_source = params->power_source;
- res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+ res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING);
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
*/
- if (res == DC_OK && dc->res_pool->funcs->link_encs_assign)
+ if (res == DC_OK && dc->res_pool->funcs->link_encs_assign && !dc->config.unify_link_enc_assignment)
dc->res_pool->funcs->link_encs_assign(
dc, context, context->streams, context->stream_count);
@@ -2205,6 +2445,18 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
goto fail;
}
+ /*
+ * If not already seamless, make transition seamless by inserting intermediate minimal transition
+ */
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) {
+ res = commit_minimal_transition_state(dc, context);
+ if (res != DC_OK) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+
res = dc_commit_state_no_check(dc, context);
for (i = 0; i < params->stream_count; i++) {
@@ -2351,7 +2603,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
- dc->wm_optimized_required = false;
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -2420,47 +2671,50 @@ static bool is_surface_in_context(
return false;
}
-static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
+static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ // `plane_info` present means at least `STREAM` lock is required
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->plane_info->color_space != u->surface->color_space) {
update_flags->bits.color_space_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
update_flags->bits.horizontal_mirror_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->rotation != u->surface->rotation) {
update_flags->bits.rotation_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->format != u->surface->format) {
update_flags->bits.pixel_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->stereo_format != u->surface->stereo_format) {
update_flags->bits.stereo_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
update_flags->bits.per_pixel_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
update_flags->bits.global_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->dcc.enable != u->surface->dcc.enable
@@ -2472,7 +2726,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* recalculate stutter period.
*/
update_flags->bits.dcc_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
@@ -2481,30 +2735,41 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* and DML calculation
*/
update_flags->bits.bpp_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
+ const struct dc_tiling_info *tiling = &u->plane_info->tiling_info;
- if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
- sizeof(union dc_tiling_info)) != 0) {
+ if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) {
update_flags->bits.swizzle_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
-
- /* todo: below are HW dependent, we should add a hook to
- * DCE/N resource and validated there.
- */
- if (!dc->debug.skip_full_updated_if_possible) {
- /* swizzled mode requires RQ to be setup properly,
- * thus need to run DML to calculate RQ settings
- */
- update_flags->bits.bandwidth_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+
+ switch (tiling->gfxversion) {
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
+ if (tiling->gfx9.swizzle != DC_SW_LINEAR) {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ break;
+ case DcGfxAddr3:
+ if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ break;
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ case DcGfxVersionUnknown:
+ default:
+ break;
}
}
@@ -2512,14 +2777,18 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
return update_type;
}
-static enum surface_update_type get_scaling_info_update_type(
- const struct dc *dc,
+static struct surface_update_descriptor get_scaling_info_update_type(
+ const struct dc_check_config *check_config,
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ // `scaling_info` present means at least `STREAM` lock is required
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->scaling_info->src_rect.width != u->surface->src_rect.width
|| u->scaling_info->src_rect.height != u->surface->src_rect.height
@@ -2530,6 +2799,7 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->scaling_quality.integer_scaling !=
u->surface->scaling_quality.integer_scaling) {
update_flags->bits.scaling_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
@@ -2543,7 +2813,7 @@ static enum surface_update_type get_scaling_info_update_type(
/* Making dst rect smaller requires a bandwidth change */
update_flags->bits.bandwidth_change = 1;
- if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width &&
(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
/* Changing clip size of a large surface may result in MPC slice count change */
@@ -2555,123 +2825,109 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
- || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) {
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
update_flags->bits.position_change = 1;
+ }
- /* process every update flag before returning */
- if (update_flags->bits.clock_change
- || update_flags->bits.bandwidth_change
- || update_flags->bits.scaling_change)
- return UPDATE_TYPE_FULL;
-
- if (update_flags->bits.position_change)
- return UPDATE_TYPE_MED;
-
- return UPDATE_TYPE_FAST;
+ return update_type;
}
-static enum surface_update_type det_surface_update(const struct dc *dc,
- const struct dc_surface_update *u)
+static struct surface_update_descriptor det_surface_update(
+ const struct dc_check_config *check_config,
+ struct dc_surface_update *u)
{
- const struct dc_state *context = dc->current_state;
- enum surface_update_type type;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
- if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ if (u->surface->force_full_update) {
update_flags->raw = 0xFFFFFFFF;
- return UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ return overall_type;
}
update_flags->raw = 0; // Reset all flags
- type = get_plane_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ struct surface_update_descriptor inner_type = get_plane_info_update_type(u);
+
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
- type = get_scaling_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ inner_type = get_scaling_info_update_type(check_config, u);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
}
- if (u->in_transfer_func)
+ if (u->in_transfer_func) {
update_flags->bits.in_transfer_func_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->input_csc_color_matrix)
+ if (u->input_csc_color_matrix) {
update_flags->bits.input_csc_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->coeff_reduction_factor)
+ if (u->coeff_reduction_factor) {
update_flags->bits.coeff_reduction_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->gamut_remap_matrix)
+ if (u->gamut_remap_matrix) {
update_flags->bits.gamut_remap_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->blend_tf)
+ if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
-
- if (u->gamma) {
- enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
-
- if (u->plane_info)
- format = u->plane_info->format;
- else
- format = u->surface->format;
-
- if (dce_use_lut(format))
- update_flags->bits.gamma_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
- if (u->lut3d_func || u->func_shaper)
+ if (u->lut3d_func || u->func_shaper) {
update_flags->bits.lut_3d = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ // TODO: Should be fast?
update_flags->bits.hdr_mult = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->sdr_white_level_nits)
if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
+ // TODO: Should be fast?
update_flags->bits.sdr_white_level_nits = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->cm2_params) {
- if ((u->cm2_params->component_settings.shaper_3dlut_setting
- != u->surface->mcm_shaper_3dlut_setting)
- || (u->cm2_params->component_settings.lut1d_enable
- != u->surface->mcm_lut1d_enable))
- update_flags->bits.mcm_transfer_function_enable_change = 1;
- if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src
- != u->surface->mcm_luts.lut3d_data.lut3d_src)
+ if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting
+ || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable
+ || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) {
update_flags->bits.mcm_transfer_function_enable_change = 1;
- }
- if (update_flags->bits.in_transfer_func_change) {
- type = UPDATE_TYPE_MED;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
}
if (update_flags->bits.lut_3d &&
u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
- }
- if (update_flags->bits.mcm_transfer_function_enable_change) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
- if (dc->debug.enable_legacy_fast_update &&
+ if (check_config->enable_legacy_fast_update &&
(update_flags->bits.gamma_change ||
update_flags->bits.gamut_remap_change ||
update_flags->bits.input_csc_change ||
update_flags->bits.coeff_reduction_change)) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
return overall_type;
}
@@ -2699,32 +2955,26 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda
}
}
-static enum surface_update_type check_update_surfaces_for_stream(
- struct dc *dc,
+static struct surface_update_descriptor check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
-
- if (dc->idle_optimizations_allowed)
- overall_type = UPDATE_TYPE_FULL;
-
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- overall_type = UPDATE_TYPE_FULL;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (stream_update && stream_update->pending_test_pattern) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (stream_update && stream_update->hw_cursor_req) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
/* some stream updates require passive update */
if (stream_update) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
@@ -2732,14 +2982,16 @@ static enum surface_update_type check_update_surfaces_for_stream(
stream_update->integer_scaling_update)
su_flags->bits.scaling = 1;
- if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func)
su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
su_flags->bits.abm_level = 1;
- if (stream_update->dpms_off)
+ if (stream_update->dpms_off) {
su_flags->bits.dpms_off = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL | LOCK_DESCRIPTOR_LINK);
+ }
if (stream_update->gamut_remap)
su_flags->bits.gamut_remap = 1;
@@ -2764,24 +3016,30 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->sharpening_required)
su_flags->bits.sharpening_required = 1;
- if (su_flags->raw != 0)
- overall_type = UPDATE_TYPE_FULL;
+ if (stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
+
+ // TODO: Make each elevation explicit, as to not override fast stream in crct_timing_adjust
+ if (su_flags->raw)
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- if (stream_update->output_csc_transform || stream_update->output_color_space)
+ // Non-global cases
+ if (stream_update->output_csc_transform) {
su_flags->bits.out_csc = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- /* Output transfer function changes do not require bandwidth recalculation,
- * so don't trigger a full update
- */
- if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) {
su_flags->bits.out_tf = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
}
- for (i = 0 ; i < surface_count; i++) {
- enum surface_update_type type =
- det_surface_update(dc, &updates[i]);
+ for (int i = 0 ; i < surface_count; i++) {
+ struct surface_update_descriptor inner_type =
+ det_surface_update(check_config, &updates[i]);
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
return overall_type;
@@ -2792,46 +3050,18 @@ static enum surface_update_type check_update_surfaces_for_stream(
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
*/
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type type;
-
if (stream_update)
stream_update->stream->update_flags.raw = 0;
- for (i = 0; i < surface_count; i++)
+ for (size_t i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
- type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL) {
- if (stream_update) {
- uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
- stream_update->stream->update_flags.raw = 0xFFFFFFFF;
- stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
- }
- for (i = 0; i < surface_count; i++)
- updates[i].surface->update_flags.raw = 0xFFFFFFFF;
- }
-
- if (type == UPDATE_TYPE_FAST) {
- // If there's an available clock comparator, we use that.
- if (dc->clk_mgr->funcs->are_clock_states_equal) {
- if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
- dc->optimized_required = true;
- // Else we fallback to mem compare.
- } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
- dc->optimized_required = true;
- }
-
- dc->optimized_required |= dc->wm_optimized_required;
- }
-
- return type;
+ return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
static struct dc_stream_status *stream_get_status(
@@ -3057,8 +3287,14 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
- if (update->crtc_timing_adjust)
+ if (update->crtc_timing_adjust) {
+ if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
+ stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
+ stream->adjust.timing_adjust_pending)
+ update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
+ update->crtc_timing_adjust->timing_adjust_pending = false;
+ }
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -3078,6 +3314,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->adaptive_sync_infopacket)
stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+ if (update->avi_infopacket)
+ stream->avi_infopacket = *update->avi_infopacket;
+
if (update->dither_option)
stream->dither_option = *update->dither_option;
@@ -3105,7 +3344,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (dsc_validate_context) {
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
- if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context,
+ DC_VALIDATE_MODE_ONLY) != DC_OK) {
stream->timing.dsc_cfg = old_dsc_cfg;
stream->timing.flags.DSC = old_dsc_enabled;
update->dsc_config = NULL;
@@ -3134,7 +3374,7 @@ static void backup_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- scratch->plane_states[i] = *status->plane_states[i];
+ dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]);
}
scratch->stream_state = *stream;
}
@@ -3150,12 +3390,13 @@ static void restore_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- /* refcount will always be valid, restore everything else */
- struct kref refcount = status->plane_states[i]->refcount;
- *status->plane_states[i] = scratch->plane_states[i];
- status->plane_states[i]->refcount = refcount;
+ dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
+
+ // refcount is persistent
+ struct kref temp_refcount = stream->refcount;
*stream = scratch->stream_state;
+ stream->refcount = temp_refcount;
}
/**
@@ -3177,7 +3418,7 @@ static void update_seamless_boot_flags(struct dc *dc,
int surface_count,
struct dc_stream_state *stream)
{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -3193,6 +3434,13 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream);
+
/**
* update_planes_and_stream_state() - The function takes planes and stream
* updates as inputs and determines the appropriate update type. If update type
@@ -3239,7 +3487,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
context = dc->current_state;
update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
+ &dc->check_config, srf_updates, surface_count, stream_update).update_type;
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ update_type = UPDATE_TYPE_FULL;
+
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
@@ -3271,6 +3522,16 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
+ for (i = 0; i < surface_count; i++)
+ srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
+
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -3330,7 +3591,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3374,7 +3635,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
stream_update->adaptive_sync_infopacket ||
- stream_update->vtem_infopacket) {
+ stream_update->vtem_infopacket ||
+ stream_update->avi_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -3850,6 +4112,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
hwss_process_outstanding_hw_updates(dc, dc->current_state);
+ if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
+ dc->res_pool->funcs->prepare_mcache_programming(dc, context);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -3881,6 +4146,7 @@ static void commit_planes_for_stream(struct dc *dc,
&context->res_ctx,
stream);
ASSERT(top_pipe_to_program != NULL);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -3908,13 +4174,10 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
- dc->res_pool->funcs->prepare_mcache_programming(dc, context);
-
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -3934,20 +4197,23 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type < UPDATE_TYPE_FULL);
+
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
dc->hwss.interdependent_update_lock(dc, context, true);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
@@ -3990,9 +4256,8 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
-
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
return;
}
@@ -4054,12 +4319,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
- if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
- /*turn off triple buffer for full update*/
- dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
- }
stream_status =
stream_get_status(context, pipe_ctx->stream);
@@ -4068,8 +4327,37 @@ static void commit_planes_for_stream(struct dc *dc,
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ //Pipe busy until some frame and line #
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx);
+ }
+ }
+
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
@@ -4158,7 +4446,7 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4206,13 +4494,13 @@ static void commit_planes_for_stream(struct dc *dc,
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4228,6 +4516,8 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->plane_state->skip_manual_trigger)
continue;
+ if (dc->hwss.program_cursor_offload_now)
+ dc->hwss.program_cursor_offload_now(dc, pipe_ctx);
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
@@ -4409,7 +4699,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context,
+ DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) {
/* prevent underflow and corruption when reconfiguring pipes */
force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
@@ -4519,7 +4810,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
struct pipe_split_policy_backup policy;
struct dc_state *intermediate_context;
struct dc_state *old_current_state = dc->current_state;
- struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ struct dc_surface_update srf_updates[MAX_SURFACES] = {0};
int surface_count;
/*
@@ -4732,7 +5023,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update,
}
}
-static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
{
int i;
@@ -4773,18 +5064,44 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_
return false;
}
-static bool full_update_required(struct dc *dc,
- struct dc_surface_update *srf_updates,
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
-
- int i;
- struct dc_stream_status *stream_status;
const struct dc_state *context = dc->current_state;
+ if (srf_updates)
+ for (int i = 0; i < surface_count; i++)
+ if (!is_surface_in_context(context, srf_updates[i].surface))
+ return true;
- for (i = 0; i < surface_count; i++) {
+ if (stream) {
+ const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
+ return false;
+}
+
+static bool full_update_required(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
+{
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ return true;
+
+ for (int i = 0; i < surface_count; i++) {
if (srf_updates &&
(srf_updates[i].plane_info ||
srf_updates[i].scaling_info ||
@@ -4800,8 +5117,7 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
(srf_updates[i].cm2_params &&
(srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
- srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) ||
- !is_surface_in_context(context, srf_updates[i].surface)))
+ srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable))))
return true;
}
@@ -4818,6 +5134,7 @@ static bool full_update_required(struct dc *dc,
stream_update->hfvsif_infopacket ||
stream_update->vtem_infopacket ||
stream_update->adaptive_sync_infopacket ||
+ stream_update->avi_infopacket ||
stream_update->dpms_off ||
stream_update->allow_freesync ||
stream_update->vrr_active_variable ||
@@ -4832,154 +5149,25 @@ static bool full_update_required(struct dc *dc,
stream_update->lut3d_func ||
stream_update->pending_test_pattern ||
stream_update->crtc_timing_adjust ||
- stream_update->scaler_sharpener_update))
- return true;
-
- if (stream) {
- stream_status = dc_stream_get_status(stream);
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- return true;
- }
- if (dc->idle_optimizations_allowed)
+ stream_update->scaler_sharpener_update ||
+ stream_update->hw_cursor_req))
return true;
return false;
}
-static bool fast_update_only(struct dc *dc,
- struct dc_fast_update *fast_update,
- struct dc_surface_update *srf_updates,
+static bool fast_update_only(
+ const struct dc *dc,
+ const struct dc_fast_update *fast_update,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
return fast_updates_exist(fast_update, surface_count)
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-static bool update_planes_and_stream_v1(struct dc *dc,
- struct dc_surface_update *srf_updates, int surface_count,
- struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
-{
- const struct dc_stream_status *stream_status;
- enum surface_update_type update_type;
- struct dc_state *context;
- struct dc_context *dc_ctx = dc->ctx;
- int i, j;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
- dc_exit_ips_for_hw_access(dc);
-
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
- /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
- * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
- * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
- */
- force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
-
- if (update_type >= UPDATE_TYPE_FULL) {
-
- /* initialize scratch memory for building context */
- context = dc_state_create_copy(state);
- if (context == NULL) {
- DC_ERROR("Failed to allocate new validate context!\n");
- return false;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
- }
- } else if (update_type == UPDATE_TYPE_FAST) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- dc_post_update_surfaces_to_stream(dc);
- }
-
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *surface = srf_updates[i].surface;
-
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
- if (update_type >= UPDATE_TYPE_MED) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != surface)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
- }
- }
-
- copy_stream_update_to_stream(dc, context, stream, stream_update);
-
- if (update_type >= UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_state_release(context);
- return false;
- }
- }
-
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
-
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
- commit_planes_for_stream_fast(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- } else {
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- }
- /*update current_State*/
- if (dc->current_state != context) {
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
-
- /* Legacy optimization path for DCE. */
- if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
- dc_post_update_surfaces_to_stream(dc);
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
- }
- return true;
-}
-
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -5037,7 +5225,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
- if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5080,7 +5268,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
stream_update);
if (fast_update_only(dc, fast_update, srf_updates, surface_count,
stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update)
+ !dc->check_config.enable_legacy_fast_update)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5206,14 +5394,15 @@ bool dc_update_planes_and_stream(struct dc *dc,
* specially handle compatibility problems with transitions among those
* features as they are now transparent to the new sequence.
*/
- if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01 || dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21)
ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
else
ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
-
- if (ret)
+ if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_01))
clear_update_flags(srf_updates, surface_count, stream);
return ret;
@@ -5237,14 +5426,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ } else {
ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- } else
- ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ }
- if (ret)
+ if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
}
@@ -5316,13 +5503,18 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc->vm_pa_config.valid) {
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
- /*mark d0 last*/
- dc->power_state = power_state;
+ break;
+ case DC_ACPI_CM_POWER_STATE_D3:
+ if (dc->caps.ips_support)
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dc->caps.ips_v2_support) {
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
+ }
break;
default:
ASSERT(dc->current_state->stream_count == 0);
- /*mark d3 first*/
- dc->power_state = power_state;
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
dc_state_destruct(dc->current_state);
@@ -5348,18 +5540,6 @@ bool dc_is_dmcu_initialized(struct dc *dc)
return false;
}
-void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
-{
- info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
- info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
- info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
- info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
- info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
- info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
- info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
- info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
- info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
-}
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
{
if (dc->hwss.set_clock)
@@ -5446,6 +5626,11 @@ bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips)
void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
{
+ int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0;
+ enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0};
+ struct pipe_ctx *pipe = NULL;
+ struct dc_state *context = dc->current_state;
+
if (dc->debug.disable_idle_power_optimizations) {
DC_LOG_DEBUG("%s: disabled\n", __func__);
return;
@@ -5470,6 +5655,25 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
dc->idle_optimizations_allowed = allow;
DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled");
}
+
+ // log idle clocks and sub vp pipe types at idle optimization time
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk)
+ idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr);
+
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk)
+ idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr);
+
+ if (dc->res_pool && context) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
+ }
+ }
+ if (!dc->caps.is_apu)
+ DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
+ __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
+ subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
+
}
void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
@@ -5776,6 +5980,101 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
return true;
}
+bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits,
+ uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline)
+{
+ bool status = false;
+ struct dc *dc = link->ctx->dc;
+ union dmub_rb_cmd cmd;
+ uint8_t otg_inst = 0;
+ unsigned int panel_inst = 0;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
+
+ // get panel_inst
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return status;
+
+ // get otg_inst
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (pipe_ctx)
+ otg_inst = pipe_ctx->stream_res.tg->inst;
+
+ // before enable smart power OLED, we need to call set pipe for DMUB to set ABM config
+ if (enable) {
+ if (dc->hwss.set_pipe && pipe_ctx)
+ dc->hwss.set_pipe(pipe_ctx);
+ }
+
+ // fill in cmd
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.smart_power_oled_enable.header.type = DMUB_CMD__SMART_POWER_OLED;
+ cmd.smart_power_oled_enable.header.sub_type = DMUB_CMD__SMART_POWER_OLED_ENABLE;
+ cmd.smart_power_oled_enable.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_smart_power_oled_enable_data) - sizeof(struct dmub_cmd_header);
+ cmd.smart_power_oled_enable.header.ret_status = 1;
+ cmd.smart_power_oled_enable.data.enable = enable;
+ cmd.smart_power_oled_enable.data.panel_inst = panel_inst;
+ cmd.smart_power_oled_enable.data.peak_nits = peak_nits;
+ cmd.smart_power_oled_enable.data.otg_inst = otg_inst;
+ cmd.smart_power_oled_enable.data.digfe_inst = link->link_enc->preferred_engine;
+ cmd.smart_power_oled_enable.data.digbe_inst = link->link_enc->transmitter;
+
+ cmd.smart_power_oled_enable.data.debugcontrol = debug_control;
+ cmd.smart_power_oled_enable.data.triggerline = triggerline;
+ cmd.smart_power_oled_enable.data.fixed_max_cll = fixed_CLL;
+
+ // send cmd
+ status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return status;
+}
+
+bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL)
+{
+ struct dc *dc = link->ctx->dc;
+ union dmub_rb_cmd cmd;
+ bool status = false;
+ unsigned int panel_inst = 0;
+
+ // get panel_inst
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return status;
+
+ // fill in cmd
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.smart_power_oled_getmaxcll.header.type = DMUB_CMD__SMART_POWER_OLED;
+ cmd.smart_power_oled_getmaxcll.header.sub_type = DMUB_CMD__SMART_POWER_OLED_GETMAXCLL;
+ cmd.smart_power_oled_getmaxcll.header.payload_bytes = sizeof(cmd.smart_power_oled_getmaxcll.data);
+ cmd.smart_power_oled_getmaxcll.header.ret_status = 1;
+
+ cmd.smart_power_oled_getmaxcll.data.input.panel_inst = panel_inst;
+
+ // send cmd and wait for reply
+ status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+
+ if (status)
+ *pCurrent_MaxCLL = cmd.smart_power_oled_getmaxcll.data.output.current_max_cll;
+ else
+ *pCurrent_MaxCLL = 0;
+
+ return status;
+}
+
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index)
{
@@ -6064,15 +6363,22 @@ bool dc_abm_save_restore(
void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
{
unsigned int i;
- bool subvp_sw_cursor_req = false;
+ unsigned int max_cursor_size = dc->caps.max_cursor_size;
+ unsigned int stream_cursor_size;
- for (i = 0; i < dc->current_state->stream_count; i++) {
- if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
- subvp_sw_cursor_req = true;
- break;
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc,
+ dc->current_state,
+ dc->current_state->streams[i]);
+
+ if (stream_cursor_size < max_cursor_size) {
+ max_cursor_size = stream_cursor_size;
+ }
}
}
- properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
+
+ properties->cursor_size_limit = max_cursor_size;
}
/**
@@ -6099,13 +6405,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
}
-/*
- *****************************************************************************
+/**
* dc_get_power_profile_for_dc_state() - extracts power profile from dc state
*
* Called when DM wants to make power policy decisions based on dc_state
*
- *****************************************************************************
+ * @context: Pointer to the dc_state from which the power profile is extracted.
+ *
+ * Return: The power profile structure containing the power level information.
*/
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
{
@@ -6121,13 +6428,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
return profile;
}
-/*
- **********************************************************************************
+/**
* dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
*
- * Called when DM wants to log detile buffer size from dc_state
+ * This function is called to log the detile buffer size from the dc_state.
+ *
+ * @context: a pointer to the dc_state from which the detile buffer size is extracted.
*
- **********************************************************************************
+ * Return: the size of the detile buffer, or 0 if not available.
*/
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
{
@@ -6138,3 +6446,648 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
else
return 0;
}
+
+/**
+ * dc_get_host_router_index: Get index of host router from a dpia link
+ *
+ * This function return a host router index of the target link. If the target link is dpia link.
+ *
+ * @link: Pointer to the target link (input)
+ * @host_router_index: Pointer to store the host router index of the target link (output).
+ *
+ * Return: true if the host router index is found and valid.
+ *
+ */
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index)
+{
+ struct dc *dc;
+
+ if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ dc = link->ctx->dc;
+
+ if (link->link_index < dc->lowest_dpia_link_index)
+ return false;
+
+ *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router;
+ if (*host_router_index < dc->caps.num_of_host_routers)
+ return true;
+ else
+ return false;
+}
+
+bool dc_is_cursor_limit_pending(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i]))
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_can_clear_cursor_limit(const struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state))
+ return true;
+ }
+
+ return false;
+}
+
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct timing_generator *tg = NULL;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (dc->res_pool->timing_generators[i] &&
+ dc->res_pool->timing_generators[i]->inst == primary_otg_inst) {
+ tg = dc->res_pool->timing_generators[i];
+ break;
+ }
+ }
+
+ dc_exit_ips_for_hw_access(dc);
+ if (dc->hwss.get_underflow_debug_data)
+ dc->hwss.get_underflow_debug_data(dc, tg, out_data);
+}
+
+void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst,
+ struct power_features *out_data)
+{
+ out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support;
+ out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
+}
+
+bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state)
+{
+ struct dc_state *context;
+ struct resource_context *res_ctx;
+ int i;
+
+ if (!dc || !dc->current_state || !state) {
+ if (state)
+ state->state_valid = false;
+ return false;
+ }
+
+ /* Initialize the state structure */
+ memset(state, 0, sizeof(struct dc_register_software_state));
+
+ context = dc->current_state;
+ res_ctx = &context->res_ctx;
+
+ /* Count active pipes and streams */
+ state->active_pipe_count = 0;
+ state->active_stream_count = context->stream_count;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (res_ctx->pipe_ctx[i].stream)
+ state->active_pipe_count++;
+ }
+
+ /* Capture HUBP programming state for each pipe */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ state->hubp[i].valid_stream = false;
+ if (!pipe_ctx->stream)
+ continue;
+
+ state->hubp[i].valid_stream = true;
+
+ /* HUBP register programming variables */
+ if (pipe_ctx->stream_res.tg)
+ state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst;
+
+ state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0;
+
+ state->hubp[i].valid_plane_state = false;
+ if (pipe_ctx->plane_state) {
+ state->hubp[i].valid_plane_state = true;
+ state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format;
+ state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation;
+ state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0;
+
+ /* Surface size */
+ if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) {
+ state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width;
+ state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height;
+ }
+
+ /* Viewport dimensions from scaler data */
+ if (pipe_ctx->plane_state->src_rect.width > 0) {
+ state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width;
+ state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height;
+ state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x;
+ state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y;
+ }
+
+ /* DCC settings */
+ state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0;
+ state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks;
+ state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk;
+
+ /* Surface pitch */
+ state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch;
+ state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch;
+ state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch;
+ state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c;
+
+ /* Surface addresses - primary */
+ state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part;
+ state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part;
+ state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part;
+ state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part;
+
+ /* TMZ settings */
+ state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
+ state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
+
+ /* Tiling configuration */
+ state->hubp[i].min_dc_gfx_version9 = false;
+ if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) {
+ state->hubp[i].min_dc_gfx_version9 = true;
+ state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle;
+ state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes;
+ state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks;
+ state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave;
+ state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines;
+ state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se;
+ state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs;
+ }
+ }
+
+ /* DML Request Size Configuration */
+ if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) {
+ state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size;
+ state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size;
+ state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size;
+ state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size;
+ state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size;
+ state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size;
+ state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height;
+ state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear;
+ }
+
+ /* Chroma request size configuration */
+ if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) {
+ state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size;
+ state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size;
+ state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size;
+ state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size;
+ state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size;
+ state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size;
+ state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height;
+ state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear;
+ }
+
+ /* DML expansion modes */
+ state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode;
+ state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode;
+ state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode;
+ state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode;
+
+ /* DML DLG parameters - nominal */
+ state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank;
+ state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank;
+ state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip;
+ state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip;
+
+ /* DML prefetch settings */
+ state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch;
+ state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch;
+ state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c;
+
+ /* TTU parameters */
+ state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm;
+ state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm;
+ state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip;
+ state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank;
+ }
+
+ /* Capture HUBBUB programming state */
+ if (dc->res_pool->hubbub) {
+ /* Individual DET buffer sizes - software state variables that program DET registers */
+ for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) {
+ uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb;
+ switch (i) {
+ case 0:
+ state->hubbub.det0_size = det_size;
+ break;
+ case 1:
+ state->hubbub.det1_size = det_size;
+ break;
+ case 2:
+ state->hubbub.det2_size = det_size;
+ break;
+ case 3:
+ state->hubbub.det3_size = det_size;
+ break;
+ }
+ }
+
+ /* Compression buffer configuration - software state that programs COMPBUF_SIZE register */
+ // TODO: Handle logic for legacy DCN pre-DCN401
+ state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
+ }
+
+ /* Capture DPP programming state for each pipe */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0;
+
+ if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) {
+ /* Access dscl_prog_data directly - this contains the actual software state used for register programming */
+ struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+
+ /* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */
+ state->dpp[i].recout_start_x = dscl_data->recout.x;
+ state->dpp[i].recout_start_y = dscl_data->recout.y;
+ state->dpp[i].recout_width = dscl_data->recout.width;
+ state->dpp[i].recout_height = dscl_data->recout.height;
+
+ /* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */
+ state->dpp[i].mpc_width = dscl_data->mpc_size.width;
+ state->dpp[i].mpc_height = dscl_data->mpc_size.height;
+
+ /* DSCL mode - software state that programs SCL_MODE registers */
+ state->dpp[i].dscl_mode = dscl_data->dscl_mode;
+
+ /* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */
+ state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio
+ state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio
+
+ /* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */
+ state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
+ state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
+ }
+ }
+
+ /* Capture essential clock state for underflow analysis */
+ if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) {
+ /* Core display clocks affecting bandwidth and timing */
+ state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
+
+ /* Per-pipe clock configuration - only capture what's essential */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ if (pipe_ctx->stream) {
+ /* Essential clocks that directly affect underflow risk */
+ state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz;
+ state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ state->dccg.dppclk_enable[i] = 1;
+
+ /* DP stream clock only for DP signals */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ state->dccg.dpstreamclk_enable[i] = 1;
+ } else {
+ state->dccg.dpstreamclk_enable[i] = 0;
+ }
+ } else {
+ /* Inactive pipe - no clocks */
+ state->dccg.dppclk_khz[i] = 0;
+ state->dccg.pixclk_khz[i] = 0;
+ state->dccg.dppclk_enable[i] = 0;
+ if (i < 4) {
+ state->dccg.dpstreamclk_enable[i] = 0;
+ }
+ }
+ }
+
+ /* DSC clock state - only when actually using DSC */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL;
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
+ state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */
+ } else {
+ state->dccg.dscclk_khz[i] = 0;
+ }
+ }
+
+ /* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */
+ for (i = 0; i < 2; i++) {
+ state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */
+ }
+
+ }
+
+ /* Capture essential DSC configuration for underflow analysis */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
+ /* DSC is enabled - capture essential configuration */
+ state->dsc[i].dsc_clock_enable = 1;
+
+ /* DSC configuration affecting bandwidth and timing */
+ struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg;
+ state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h;
+ state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v;
+ state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel;
+
+ /* OPP pipe source for DSC forwarding */
+ if (pipe_ctx->stream_res.opp) {
+ state->dsc[i].dscrm_dsc_forward_enable = 1;
+ state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
+ } else {
+ state->dsc[i].dscrm_dsc_forward_enable = 0;
+ state->dsc[i].dscrm_dsc_opp_pipe_source = 0;
+ }
+ } else {
+ /* DSC not enabled - clear all fields */
+ memset(&state->dsc[i], 0, sizeof(state->dsc[i]));
+ }
+ }
+
+ /* Capture MPC programming state - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream) {
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ /* MPCC blending tree and mode control - capture actual blend configuration */
+ state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0;
+ state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0;
+ state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0;
+ state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */
+ state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value;
+ state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0;
+ state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */
+ state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */
+
+ /* MPCC blending tree connections - capture tree topology */
+ if (pipe_ctx->bottom_pipe) {
+ state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx;
+ } else {
+ state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
+ }
+ state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */
+
+ /* MPCC output gamma control - capture gamma programming */
+ if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) {
+ state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */
+ state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */
+ state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */
+ } else {
+ state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */
+ state->mpc.mpcc_ogam_select[i] = 0;
+ state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
+ }
+
+ /* MPCC pipe assignment and operational status */
+ if (pipe_ctx->stream_res.opp) {
+ state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst;
+ } else {
+ state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
+ }
+
+ /* MPCC status indicators - active pipe state */
+ state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */
+ state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */
+
+ } else {
+ /* Pipe not active - set disabled/idle state for all fields */
+ state->mpc.mpcc_mode[i] = 0;
+ state->mpc.mpcc_alpha_blend_mode[i] = 0;
+ state->mpc.mpcc_alpha_multiplied_mode[i] = 0;
+ state->mpc.mpcc_blnd_active_overlap_only[i] = 0;
+ state->mpc.mpcc_global_alpha[i] = 0;
+ state->mpc.mpcc_global_gain[i] = 0;
+ state->mpc.mpcc_bg_bpc[i] = 0;
+ state->mpc.mpcc_bot_gain_mode[i] = 0;
+ state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
+ state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */
+ state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */
+ state->mpc.mpcc_ogam_select[i] = 0;
+ state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
+ state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
+ state->mpc.mpcc_idle[i] = 1; /* Idle */
+ state->mpc.mpcc_busy[i] = 0; /* Not busy */
+ }
+ }
+
+ /* Capture OPP programming state for each pipe - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.opp) {
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+
+ /* OPP Pipe Control */
+ state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */
+
+ /* Display Pattern Generator (DPG) Control - 19 fields */
+ if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+ state->opp[i].dpg_enable = 1;
+ } else {
+ /* Video mode - DPG disabled */
+ state->opp[i].dpg_enable = 0;
+ }
+
+ /* Format Control (FMT) - 18 fields */
+ state->opp[i].fmt_pixel_encoding = timing->pixel_encoding;
+
+ /* Chroma subsampling mode based on pixel encoding */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */
+ } else {
+ state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */
+ }
+
+ state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
+
+ /* Dithering control based on bit depth */
+ if (timing->display_color_depth < COLOR_DEPTH_121212) {
+ state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */
+ state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */
+ state->opp[i].fmt_spatial_dither_enable = 1;
+ state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */
+ state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth;
+ state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */
+ } else {
+ state->opp[i].fmt_spatial_dither_frame_counter_max = 0;
+ state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0;
+ state->opp[i].fmt_spatial_dither_enable = 0;
+ state->opp[i].fmt_spatial_dither_mode = 0;
+ state->opp[i].fmt_spatial_dither_depth = 0;
+ state->opp[i].fmt_temporal_dither_enable = 0;
+ }
+
+ /* Truncation control for bit depth reduction */
+ if (timing->display_color_depth < COLOR_DEPTH_121212) {
+ state->opp[i].fmt_truncate_enable = 1;
+ state->opp[i].fmt_truncate_depth = timing->display_color_depth;
+ state->opp[i].fmt_truncate_mode = 0; /* Round mode */
+ } else {
+ state->opp[i].fmt_truncate_enable = 0;
+ state->opp[i].fmt_truncate_depth = 0;
+ state->opp[i].fmt_truncate_mode = 0;
+ }
+
+ /* Data clamping control */
+ state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */
+ state->opp[i].fmt_clamp_color_format = timing->pixel_encoding;
+
+ /* Dynamic expansion for limited range content */
+ if (timing->pixel_encoding != PIXEL_ENCODING_RGB) {
+ state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */
+ state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */
+ } else {
+ state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */
+ state->opp[i].fmt_dynamic_exp_mode = 0;
+ }
+
+ /* Legacy field for compatibility */
+ state->opp[i].fmt_bit_depth_control = timing->display_color_depth;
+
+ /* Output Buffer (OPPBUF) Control - 6 fields */
+ state->opp[i].oppbuf_active_width = timing->h_addressable;
+ state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */
+
+ /* Multi-Stream Output (MSO) / ODM segmentation */
+ if (pipe_ctx->next_odm_pipe) {
+ state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */
+ state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */
+ } else {
+ state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */
+ state->opp[i].oppbuf_overlap_pixel_num = 0;
+ }
+
+ /* 3D/Stereo control */
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) {
+ state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */
+ state->opp[i].oppbuf_3d_vact_space2_size = 30;
+ } else {
+ state->opp[i].oppbuf_3d_vact_space1_size = 0;
+ state->opp[i].oppbuf_3d_vact_space2_size = 0;
+ }
+
+ /* DSC Forward Config - 3 fields */
+ if (timing->dsc_cfg.num_slices_h > 0) {
+ state->opp[i].dscrm_dsc_forward_enable = 1;
+ state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
+ state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */
+ } else {
+ state->opp[i].dscrm_dsc_forward_enable = 0;
+ state->opp[i].dscrm_dsc_opp_pipe_source = 0;
+ state->opp[i].dscrm_dsc_forward_enable_status = 0;
+ }
+ } else {
+ /* No OPP resource - set all fields to disabled state */
+ memset(&state->opp[i], 0, sizeof(state->opp[i]));
+ }
+ }
+
+ /* Capture OPTC programming state for each pipe - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg) {
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+
+ state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst;
+
+ /* OTG_CONTROL register - 5 fields */
+ state->optc[i].otg_master_enable = 1; /* Active stream */
+ state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */
+ state->optc[i].otg_start_point_cntl = 0; /* Normal start */
+ state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0;
+ state->optc[i].otg_out_mux = 0; /* Direct output */
+
+ /* OTG Horizontal Timing - 7 fields */
+ state->optc[i].otg_h_total = timing->h_total;
+ state->optc[i].otg_h_blank_start = timing->h_addressable;
+ state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch;
+ state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch;
+ state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width;
+ state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1;
+ state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */
+
+ /* OTG Vertical Timing - 7 fields */
+ state->optc[i].otg_v_total = timing->v_total;
+ state->optc[i].otg_v_blank_start = timing->v_addressable;
+ state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch;
+ state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch;
+ state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width;
+ state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1;
+ state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */
+
+ /* Initialize remaining core fields with appropriate defaults */
+ // TODO: Update logic for accurate vtotal min/max
+ state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */
+ state->optc[i].otg_v_total_min = timing->v_total - 50;
+ state->optc[i].otg_v_total_mid = timing->v_total;
+
+ /* ODM configuration */
+ // TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe
+ if (pipe_ctx->next_odm_pipe) {
+ state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
+ state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0;
+ state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */
+ } else {
+ state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
+ state->optc[i].optc_seg1_src_sel = 0;
+ state->optc[i].optc_num_of_input_segment = 0; /* Single segment */
+ }
+
+ /* DSC configuration */
+ if (timing->dsc_cfg.num_slices_h > 0) {
+ state->optc[i].optc_dsc_mode = 1; /* DSC enabled */
+ state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */
+ state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h;
+ } else {
+ state->optc[i].optc_dsc_mode = 0;
+ state->optc[i].optc_dsc_bytes_per_pixel = 0;
+ state->optc[i].optc_dsc_slice_width = 0;
+ }
+
+ /* Essential control fields */
+ state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
+ state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0;
+ state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */
+ state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */
+
+ /* Initialize other key fields to defaults */
+ state->optc[i].optc_input_pix_clk_en = 1;
+ state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable;
+ state->optc[i].otg_vready_offset = 1;
+ state->optc[i].otg_vstartup_start = timing->v_addressable + 10;
+ state->optc[i].otg_vupdate_offset = 0;
+ state->optc[i].otg_vupdate_width = 5;
+ } else {
+ /* No timing generator resource - initialize all fields to 0 */
+ memset(&state->optc[i], 0, sizeof(state->optc[i]));
+ }
+ }
+
+ state->state_valid = true;
+ return true;
+}
+
+void dc_log_preos_dmcub_info(const struct dc *dc)
+{
+ dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index af1ea5792560..bbce751b485f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -51,126 +51,6 @@
DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
} while (0)
-void pre_surface_trace(
- struct dc *dc,
- const struct dc_plane_state *const *plane_states,
- int surface_count)
-{
- int i;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- for (i = 0; i < surface_count; i++) {
- const struct dc_plane_state *plane_state = plane_states[i];
-
- SURFACE_TRACE("Planes %d:\n", i);
-
- SURFACE_TRACE(
- "plane_state->visible = %d;\n"
- "plane_state->flip_immediate = %d;\n"
- "plane_state->address.type = %d;\n"
- "plane_state->address.grph.addr.quad_part = 0x%llX;\n"
- "plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n"
- "plane_state->scaling_quality.h_taps = %d;\n"
- "plane_state->scaling_quality.v_taps = %d;\n"
- "plane_state->scaling_quality.h_taps_c = %d;\n"
- "plane_state->scaling_quality.v_taps_c = %d;\n",
- plane_state->visible,
- plane_state->flip_immediate,
- plane_state->address.type,
- plane_state->address.grph.addr.quad_part,
- plane_state->address.grph.meta_addr.quad_part,
- plane_state->scaling_quality.h_taps,
- plane_state->scaling_quality.v_taps,
- plane_state->scaling_quality.h_taps_c,
- plane_state->scaling_quality.v_taps_c);
-
- SURFACE_TRACE(
- "plane_state->src_rect.x = %d;\n"
- "plane_state->src_rect.y = %d;\n"
- "plane_state->src_rect.width = %d;\n"
- "plane_state->src_rect.height = %d;\n"
- "plane_state->dst_rect.x = %d;\n"
- "plane_state->dst_rect.y = %d;\n"
- "plane_state->dst_rect.width = %d;\n"
- "plane_state->dst_rect.height = %d;\n"
- "plane_state->clip_rect.x = %d;\n"
- "plane_state->clip_rect.y = %d;\n"
- "plane_state->clip_rect.width = %d;\n"
- "plane_state->clip_rect.height = %d;\n",
- plane_state->src_rect.x,
- plane_state->src_rect.y,
- plane_state->src_rect.width,
- plane_state->src_rect.height,
- plane_state->dst_rect.x,
- plane_state->dst_rect.y,
- plane_state->dst_rect.width,
- plane_state->dst_rect.height,
- plane_state->clip_rect.x,
- plane_state->clip_rect.y,
- plane_state->clip_rect.width,
- plane_state->clip_rect.height);
-
- SURFACE_TRACE(
- "plane_state->plane_size.surface_size.x = %d;\n"
- "plane_state->plane_size.surface_size.y = %d;\n"
- "plane_state->plane_size.surface_size.width = %d;\n"
- "plane_state->plane_size.surface_size.height = %d;\n"
- "plane_state->plane_size.surface_pitch = %d;\n",
- plane_state->plane_size.surface_size.x,
- plane_state->plane_size.surface_size.y,
- plane_state->plane_size.surface_size.width,
- plane_state->plane_size.surface_size.height,
- plane_state->plane_size.surface_pitch);
-
-
- SURFACE_TRACE(
- "plane_state->tiling_info.gfx8.num_banks = %d;\n"
- "plane_state->tiling_info.gfx8.bank_width = %d;\n"
- "plane_state->tiling_info.gfx8.bank_width_c = %d;\n"
- "plane_state->tiling_info.gfx8.bank_height = %d;\n"
- "plane_state->tiling_info.gfx8.bank_height_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_aspect = %d;\n"
- "plane_state->tiling_info.gfx8.tile_aspect_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_split = %d;\n"
- "plane_state->tiling_info.gfx8.tile_split_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_mode = %d;\n"
- "plane_state->tiling_info.gfx8.tile_mode_c = %d;\n",
- plane_state->tiling_info.gfx8.num_banks,
- plane_state->tiling_info.gfx8.bank_width,
- plane_state->tiling_info.gfx8.bank_width_c,
- plane_state->tiling_info.gfx8.bank_height,
- plane_state->tiling_info.gfx8.bank_height_c,
- plane_state->tiling_info.gfx8.tile_aspect,
- plane_state->tiling_info.gfx8.tile_aspect_c,
- plane_state->tiling_info.gfx8.tile_split,
- plane_state->tiling_info.gfx8.tile_split_c,
- plane_state->tiling_info.gfx8.tile_mode,
- plane_state->tiling_info.gfx8.tile_mode_c);
-
- SURFACE_TRACE(
- "plane_state->tiling_info.gfx8.pipe_config = %d;\n"
- "plane_state->tiling_info.gfx8.array_mode = %d;\n"
- "plane_state->color_space = %d;\n"
- "plane_state->dcc.enable = %d;\n"
- "plane_state->format = %d;\n"
- "plane_state->rotation = %d;\n"
- "plane_state->stereo_format = %d;\n",
- plane_state->tiling_info.gfx8.pipe_config,
- plane_state->tiling_info.gfx8.array_mode,
- plane_state->color_space,
- plane_state->dcc.enable,
- plane_state->format,
- plane_state->rotation,
- plane_state->stereo_format);
-
- SURFACE_TRACE("plane_state->tiling_info.gfx9.swizzle = %d;\n",
- plane_state->tiling_info.gfx9.swizzle);
-
- SURFACE_TRACE("\n");
- }
- SURFACE_TRACE("\n");
-}
-
void update_surface_trace(
struct dc *dc,
const struct dc_surface_update *updates,
@@ -386,6 +266,10 @@ char *dc_status_to_str(enum dc_status status)
return "Fail dp payload allocation";
case DC_FAIL_DP_LINK_BANDWIDTH:
return "Insufficient DP link bandwidth";
+ case DC_FAIL_HW_CURSOR_SUPPORT:
+ return "HW Cursor not supported";
+ case DC_FAIL_DP_TUNNEL_BW_VALIDATE:
+ return "Fail DP Tunnel BW validation";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 252af83e34a5..e2763b60482a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -32,8 +32,16 @@
#include "resource.h"
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
+#include "opp.h"
+#include "dsc.h"
+#include "dchubbub.h"
+#include "dccg.h"
+#include "abm.h"
+#include "dcn10/dcn10_hubbub.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+#define MAX_NUM_MCACHE 8
/* used as index in array of black_color_format */
enum black_color_format {
@@ -176,7 +184,7 @@ static bool is_ycbcr2020_type(
{
bool ret = false;
- if (color_space == COLOR_SPACE_2020_YCBCR)
+ if (color_space == COLOR_SPACE_2020_YCBCR_LIMITED || color_space == COLOR_SPACE_2020_YCBCR_FULL)
ret = true;
return ret;
}
@@ -247,7 +255,8 @@ void color_space_to_black_color(
case COLOR_SPACE_YCBCR709_BLACK:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
+ case COLOR_SPACE_2020_YCBCR_FULL:
*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
break;
@@ -256,7 +265,7 @@ void color_space_to_black_color(
black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
break;
- /**
+ /*
* Remove default and add case for all color space
* so when we forget to add new color space
* compiler will give a warning
@@ -425,6 +434,70 @@ void get_hdr_visual_confirm_color(
}
}
+/* Visual Confirm color definition for Smart Mux */
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ const struct tg_color sm_ver_colors[5] = {
+ {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */
+ {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/
+ };
+
+ if (dc->caps.is_apu) {
+ /* APU driving the eDP */
+ *color = sm_ver_colors[dc->config.smart_mux_version];
+ } else {
+ /* dGPU driving the eDP - red */
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
+/* Visual Confirm color definition for VABC */
+void get_vabc_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ struct dc_link *edp_link = NULL;
+
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link) {
+ if (pipe_ctx->stream->link->connector_signal == SIGNAL_TYPE_EDP)
+ edp_link = pipe_ctx->stream->link;
+ }
+
+ if (edp_link) {
+ switch (edp_link->backlight_control_type) {
+ case BACKLIGHT_CONTROL_PWM:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case BACKLIGHT_CONTROL_AMD_AUX:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case BACKLIGHT_CONTROL_VESA_AUX:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ }
+ } else {
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
void get_subvp_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
@@ -514,6 +587,53 @@ void get_cursor_visual_confirm_color(
}
}
+void get_dcc_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
+
+ if (!pipe_ctx->plane_state->dcc.enable) {
+ color->color_r_cr = 0; /* black - DCC disabled */
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ return;
+ }
+
+ if (dc->ctx->dce_version < DCN_VERSION_4_01) {
+ color->color_r_cr = MAX_TG_COLOR_VALUE; /* red - DCC enabled */
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ return;
+ }
+
+ uint32_t first_id = pipe_ctx->mcache_regs.main.p0.mcache_id_first;
+ uint32_t second_id = pipe_ctx->mcache_regs.main.p0.mcache_id_second;
+
+ if (first_id != MCACHE_ID_UNASSIGNED && second_id != MCACHE_ID_UNASSIGNED && first_id != second_id) {
+ color->color_r_cr = MAX_TG_COLOR_VALUE/2; /* grey - 2 mcache */
+ color->color_g_y = MAX_TG_COLOR_VALUE/2;
+ color->color_b_cb = MAX_TG_COLOR_VALUE/2;
+ }
+
+ else if (first_id != MCACHE_ID_UNASSIGNED || second_id != MCACHE_ID_UNASSIGNED) {
+ const struct tg_color id_colors[MAX_NUM_MCACHE] = {
+ {0, MAX_TG_COLOR_VALUE, 0}, /* green */
+ {0, 0, MAX_TG_COLOR_VALUE}, /* blue */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* magenta */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* cyan */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* white */
+ {MAX_TG_COLOR_VALUE/2, 0, 0}, /* dark red */
+ {0, MAX_TG_COLOR_VALUE/2, 0}, /* dark green */
+ };
+
+ uint32_t assigned_id = (first_id != MCACHE_ID_UNASSIGNED) ? first_id : second_id;
+ *color = id_colors[assigned_id];
+ }
+}
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
@@ -525,6 +645,7 @@ void set_p_state_switch_method(
if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba)
return;
+ pipe_ctx->p_state_type = P_STATE_UNKNOWN;
if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
dm_dram_clock_change_unsupported) {
/* MCLK switching is supported */
@@ -571,6 +692,21 @@ void set_p_state_switch_method(
}
}
+void set_drr_and_clear_adjust_pending(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream,
+ struct drr_params *params)
+{
+ /* params can be null.*/
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, params);
+
+ if (stream)
+ stream->adjust.timing_adjust_pending = false;
+}
+
void get_fams2_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
@@ -594,7 +730,7 @@ void get_fams2_visual_confirm_color(
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
@@ -626,11 +762,13 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = true;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required =
+ dc_state_is_fams2_in_use(dc, context) ||
+ dmub_hw_lock_mgr_does_link_require_lock(dc, stream->link);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
if (dc->hwss.pipe_control_lock) {
@@ -655,7 +793,7 @@ void hwss_build_fast_sequence(struct dc *dc,
while (current_mpc_pipe) {
if (current_mpc_pipe->plane_state) {
if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.hubp = current_mpc_pipe->plane_res.hubp;
block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
(*num_steps)++;
@@ -714,7 +852,14 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
(*num_steps)++;
}
-
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE &&
+ dc->hwss.update_visual_confirm_color) {
+ block_sequence[*num_steps].params.update_visual_confirm_params.dc = dc;
+ block_sequence[*num_steps].params.update_visual_confirm_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.update_visual_confirm_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst;
+ block_sequence[*num_steps].func = MPC_UPDATE_VISUAL_CONFIRM;
+ (*num_steps)++;
+ }
if (current_mpc_pipe->stream->update_flags.bits.out_csc) {
block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpc = dc->res_pool->mpc;
block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst;
@@ -758,11 +903,11 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = false;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -775,6 +920,13 @@ void hwss_build_fast_sequence(struct dc *dc,
current_mpc_pipe->stream && current_mpc_pipe->plane_state &&
current_mpc_pipe->plane_state->update_flags.bits.addr_update &&
!current_mpc_pipe->plane_state->skip_manual_trigger) {
+ if (dc->hwss.program_cursor_offload_now) {
+ block_sequence[*num_steps].params.program_cursor_update_now_params.dc = dc;
+ block_sequence[*num_steps].params.program_cursor_update_now_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = PROGRAM_CURSOR_UPDATE_NOW;
+ (*num_steps)++;
+ }
+
block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe;
block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
(*num_steps)++;
@@ -786,7 +938,7 @@ void hwss_build_fast_sequence(struct dc *dc,
}
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps)
{
unsigned int i;
@@ -806,8 +958,9 @@ void hwss_execute_sequence(struct dc *dc,
params->pipe_control_lock_params.lock);
break;
case HUBP_SET_FLIP_CONTROL_GSL:
- dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx,
- params->set_flip_control_gsl_params.flip_immediate);
+ params->set_flip_control_gsl_params.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ params->set_flip_control_gsl_params.hubp,
+ params->set_flip_control_gsl_params.flip_immediate);
break;
case HUBP_PROGRAM_TRIPLEBUFFER:
dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc,
@@ -865,8 +1018,301 @@ void hwss_execute_sequence(struct dc *dc,
params->wait_for_dcc_meta_propagation_params.dc,
params->wait_for_dcc_meta_propagation_params.top_pipe_to_program);
break;
- case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST:
- dc->hwss.fams2_global_control_lock_fast(params);
+ case DMUB_HW_CONTROL_LOCK_FAST:
+ dc->hwss.dmub_hw_control_lock_fast(params);
+ break;
+ case HUBP_PROGRAM_SURFACE_CONFIG:
+ hwss_program_surface_config(params);
+ break;
+ case HUBP_PROGRAM_MCACHE_ID:
+ hwss_program_mcache_id_and_split_coordinate(params);
+ break;
+ case PROGRAM_CURSOR_UPDATE_NOW:
+ dc->hwss.program_cursor_offload_now(
+ params->program_cursor_update_now_params.dc,
+ params->program_cursor_update_now_params.pipe_ctx);
+ break;
+ case HUBP_WAIT_PIPE_READ_START:
+ params->hubp_wait_pipe_read_start_params.hubp->funcs->hubp_wait_pipe_read_start(
+ params->hubp_wait_pipe_read_start_params.hubp);
+ break;
+ case HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM:
+ dc->hwss.apply_update_flags_for_phantom(params->apply_update_flags_for_phantom_params.pipe_ctx);
+ break;
+ case HWS_UPDATE_PHANTOM_VP_POSITION:
+ dc->hwss.update_phantom_vp_position(params->update_phantom_vp_position_params.dc,
+ params->update_phantom_vp_position_params.context,
+ params->update_phantom_vp_position_params.pipe_ctx);
+ break;
+ case OPTC_SET_ODM_COMBINE:
+ hwss_set_odm_combine(params);
+ break;
+ case OPTC_SET_ODM_BYPASS:
+ hwss_set_odm_bypass(params);
+ break;
+ case OPP_PIPE_CLOCK_CONTROL:
+ hwss_opp_pipe_clock_control(params);
+ break;
+ case OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL:
+ hwss_opp_program_left_edge_extra_pixel(params);
+ break;
+ case DCCG_SET_DTO_DSCCLK:
+ hwss_dccg_set_dto_dscclk(params);
+ break;
+ case DSC_SET_CONFIG:
+ hwss_dsc_set_config(params);
+ break;
+ case DSC_ENABLE:
+ hwss_dsc_enable(params);
+ break;
+ case TG_SET_DSC_CONFIG:
+ hwss_tg_set_dsc_config(params);
+ break;
+ case DSC_DISCONNECT:
+ hwss_dsc_disconnect(params);
+ break;
+ case DSC_READ_STATE:
+ hwss_dsc_read_state(params);
+ break;
+ case DSC_CALCULATE_AND_SET_CONFIG:
+ hwss_dsc_calculate_and_set_config(params);
+ break;
+ case DSC_ENABLE_WITH_OPP:
+ hwss_dsc_enable_with_opp(params);
+ break;
+ case TG_PROGRAM_GLOBAL_SYNC:
+ hwss_tg_program_global_sync(params);
+ break;
+ case TG_WAIT_FOR_STATE:
+ hwss_tg_wait_for_state(params);
+ break;
+ case TG_SET_VTG_PARAMS:
+ hwss_tg_set_vtg_params(params);
+ break;
+ case TG_SETUP_VERTICAL_INTERRUPT2:
+ hwss_tg_setup_vertical_interrupt2(params);
+ break;
+ case DPP_SET_HDR_MULTIPLIER:
+ hwss_dpp_set_hdr_multiplier(params);
+ break;
+ case HUBP_PROGRAM_DET_SIZE:
+ hwss_program_det_size(params);
+ break;
+ case HUBP_PROGRAM_DET_SEGMENTS:
+ hwss_program_det_segments(params);
+ break;
+ case OPP_SET_DYN_EXPANSION:
+ hwss_opp_set_dyn_expansion(params);
+ break;
+ case OPP_PROGRAM_FMT:
+ hwss_opp_program_fmt(params);
+ break;
+ case OPP_PROGRAM_BIT_DEPTH_REDUCTION:
+ hwss_opp_program_bit_depth_reduction(params);
+ break;
+ case OPP_SET_DISP_PATTERN_GENERATOR:
+ hwss_opp_set_disp_pattern_generator(params);
+ break;
+ case ABM_SET_PIPE:
+ hwss_set_abm_pipe(params);
+ break;
+ case ABM_SET_LEVEL:
+ hwss_set_abm_level(params);
+ break;
+ case ABM_SET_IMMEDIATE_DISABLE:
+ hwss_set_abm_immediate_disable(params);
+ break;
+ case MPC_REMOVE_MPCC:
+ hwss_mpc_remove_mpcc(params);
+ break;
+ case OPP_SET_MPCC_DISCONNECT_PENDING:
+ hwss_opp_set_mpcc_disconnect_pending(params);
+ break;
+ case DC_SET_OPTIMIZED_REQUIRED:
+ hwss_dc_set_optimized_required(params);
+ break;
+ case HUBP_DISCONNECT:
+ hwss_hubp_disconnect(params);
+ break;
+ case HUBBUB_FORCE_PSTATE_CHANGE_CONTROL:
+ hwss_hubbub_force_pstate_change_control(params);
+ break;
+ case TG_ENABLE_CRTC:
+ hwss_tg_enable_crtc(params);
+ break;
+ case TG_SET_GSL:
+ hwss_tg_set_gsl(params);
+ break;
+ case TG_SET_GSL_SOURCE_SELECT:
+ hwss_tg_set_gsl_source_select(params);
+ break;
+ case HUBP_WAIT_FLIP_PENDING:
+ hwss_hubp_wait_flip_pending(params);
+ break;
+ case TG_WAIT_DOUBLE_BUFFER_PENDING:
+ hwss_tg_wait_double_buffer_pending(params);
+ break;
+ case UPDATE_FORCE_PSTATE:
+ hwss_update_force_pstate(params);
+ break;
+ case HUBBUB_APPLY_DEDCN21_147_WA:
+ hwss_hubbub_apply_dedcn21_147_wa(params);
+ break;
+ case HUBBUB_ALLOW_SELF_REFRESH_CONTROL:
+ hwss_hubbub_allow_self_refresh_control(params);
+ break;
+ case TG_GET_FRAME_COUNT:
+ hwss_tg_get_frame_count(params);
+ break;
+ case MPC_SET_DWB_MUX:
+ hwss_mpc_set_dwb_mux(params);
+ break;
+ case MPC_DISABLE_DWB_MUX:
+ hwss_mpc_disable_dwb_mux(params);
+ break;
+ case MCIF_WB_CONFIG_BUF:
+ hwss_mcif_wb_config_buf(params);
+ break;
+ case MCIF_WB_CONFIG_ARB:
+ hwss_mcif_wb_config_arb(params);
+ break;
+ case MCIF_WB_ENABLE:
+ hwss_mcif_wb_enable(params);
+ break;
+ case MCIF_WB_DISABLE:
+ hwss_mcif_wb_disable(params);
+ break;
+ case DWBC_ENABLE:
+ hwss_dwbc_enable(params);
+ break;
+ case DWBC_DISABLE:
+ hwss_dwbc_disable(params);
+ break;
+ case DWBC_UPDATE:
+ hwss_dwbc_update(params);
+ break;
+ case HUBP_UPDATE_MALL_SEL:
+ hwss_hubp_update_mall_sel(params);
+ break;
+ case HUBP_PREPARE_SUBVP_BUFFERING:
+ hwss_hubp_prepare_subvp_buffering(params);
+ break;
+ case HUBP_SET_BLANK_EN:
+ hwss_hubp_set_blank_en(params);
+ break;
+ case HUBP_DISABLE_CONTROL:
+ hwss_hubp_disable_control(params);
+ break;
+ case HUBBUB_SOFT_RESET:
+ hwss_hubbub_soft_reset(params);
+ break;
+ case HUBP_CLK_CNTL:
+ hwss_hubp_clk_cntl(params);
+ break;
+ case HUBP_INIT:
+ hwss_hubp_init(params);
+ break;
+ case HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS:
+ hwss_hubp_set_vm_system_aperture_settings(params);
+ break;
+ case HUBP_SET_FLIP_INT:
+ hwss_hubp_set_flip_int(params);
+ break;
+ case DPP_DPPCLK_CONTROL:
+ hwss_dpp_dppclk_control(params);
+ break;
+ case DISABLE_PHANTOM_CRTC:
+ hwss_disable_phantom_crtc(params);
+ break;
+ case DSC_PG_STATUS:
+ hwss_dsc_pg_status(params);
+ break;
+ case DSC_WAIT_DISCONNECT_PENDING_CLEAR:
+ hwss_dsc_wait_disconnect_pending_clear(params);
+ break;
+ case DSC_DISABLE:
+ hwss_dsc_disable(params);
+ break;
+ case DCCG_SET_REF_DSCCLK:
+ hwss_dccg_set_ref_dscclk(params);
+ break;
+ case DPP_PG_CONTROL:
+ hwss_dpp_pg_control(params);
+ break;
+ case HUBP_PG_CONTROL:
+ hwss_hubp_pg_control(params);
+ break;
+ case HUBP_RESET:
+ hwss_hubp_reset(params);
+ break;
+ case DPP_RESET:
+ hwss_dpp_reset(params);
+ break;
+ case DPP_ROOT_CLOCK_CONTROL:
+ hwss_dpp_root_clock_control(params);
+ break;
+ case DC_IP_REQUEST_CNTL:
+ hwss_dc_ip_request_cntl(params);
+ break;
+ case DCCG_UPDATE_DPP_DTO:
+ hwss_dccg_update_dpp_dto(params);
+ break;
+ case HUBP_VTG_SEL:
+ hwss_hubp_vtg_sel(params);
+ break;
+ case HUBP_SETUP2:
+ hwss_hubp_setup2(params);
+ break;
+ case HUBP_SETUP:
+ hwss_hubp_setup(params);
+ break;
+ case HUBP_SET_UNBOUNDED_REQUESTING:
+ hwss_hubp_set_unbounded_requesting(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT2:
+ hwss_hubp_setup_interdependent2(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT:
+ hwss_hubp_setup_interdependent(params);
+ break;
+ case DPP_SET_CURSOR_MATRIX:
+ hwss_dpp_set_cursor_matrix(params);
+ break;
+ case MPC_UPDATE_BLENDING:
+ hwss_mpc_update_blending(params);
+ break;
+ case MPC_ASSERT_IDLE_MPCC:
+ hwss_mpc_assert_idle_mpcc(params);
+ break;
+ case MPC_INSERT_PLANE:
+ hwss_mpc_insert_plane(params);
+ break;
+ case DPP_SET_SCALER:
+ hwss_dpp_set_scaler(params);
+ break;
+ case HUBP_MEM_PROGRAM_VIEWPORT:
+ hwss_hubp_mem_program_viewport(params);
+ break;
+ case ABORT_CURSOR_OFFLOAD_UPDATE:
+ hwss_abort_cursor_offload_update(params);
+ break;
+ case SET_CURSOR_ATTRIBUTE:
+ hwss_set_cursor_attribute(params);
+ break;
+ case SET_CURSOR_POSITION:
+ hwss_set_cursor_position(params);
+ break;
+ case SET_CURSOR_SDR_WHITE_LEVEL:
+ hwss_set_cursor_sdr_white_level(params);
+ break;
+ case PROGRAM_OUTPUT_CSC:
+ hwss_program_output_csc(params);
+ break;
+ case HUBP_SET_BLANK:
+ hwss_hubp_set_blank(params);
+ break;
+ case PHANTOM_HUBP_POST_ENABLE:
+ hwss_phantom_hubp_post_enable(params);
break;
default:
ASSERT(false);
@@ -875,6 +1321,338 @@ void hwss_execute_sequence(struct dc *dc,
}
}
+/*
+ * Helper function to add OPTC pipe control lock to block sequence
+ */
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool lock)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.lock = lock;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PIPE_CONTROL_LOCK;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP set flip control GSL to block sequence
+ */
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool flip_immediate)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.flip_immediate = flip_immediate;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program triplebuffer to block sequence
+ */
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enableTripleBuffer)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.enableTripleBuffer = enableTripleBuffer;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP update plane address to block sequence
+ */
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_PLANE_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set input transfer function to block sequence
+ */
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.plane_state = plane_state;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP program gamut remap to block sequence
+ */
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_gamut_remap_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP program bias and scale to block sequence
+ */
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_bias_and_scale_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC program manual trigger to block sequence
+ */
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_manual_trigger_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set output transfer function to block sequence
+ */
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.stream = stream;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC update visual confirm to block sequence
+ */
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_VISUAL_CONFIRM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC power on MPC mem PWR to block sequence
+ */
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.power_on = power_on;
+ seq_state->steps[*seq_state->num_steps].func = MPC_POWER_ON_MPC_MEM_PWR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC set output CSC to block sequence
+ */
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.regval = regval;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OUTPUT_CSC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC set OCSC default to block sequence
+ */
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space colorspace,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.color_space = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OCSC_DEFAULT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DMUB send DMCUB command to block sequence
+ */
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *cmd,
+ enum dm_dmub_wait_type wait_type)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.ctx = ctx;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.cmd = cmd;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.wait_type = wait_type;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SEND_DMCUB_CMD;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DMUB SubVP save surface address to block sequence
+ */
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct dc_plane_address *addr,
+ uint8_t subvp_index)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc_dmub_srv;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.addr = addr;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.subvp_index = subvp_index;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait for DCC meta propagation to block sequence
+ */
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *top_pipe_to_program)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = top_pipe_to_program;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait pipe read start to block sequence
+ */
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_pipe_read_start_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_PIPE_READ_START;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HWS apply update flags for phantom to block sequence
+ */
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.apply_update_flags_for_phantom_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HWS update phantom VP position to block sequence
+ */
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.context = context;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_UPDATE_PHANTOM_VP_POSITION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC set ODM combine to block sequence
+ */
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.tg = tg;
+ memcpy(seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_inst, opp_inst, sizeof(int) * MAX_PIPES);
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_head_count = opp_head_count;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.odm_slice_width = odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.last_odm_slice_width = last_odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_COMBINE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC set ODM bypass to block sequence
+ */
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.timing = timing;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_BYPASS;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_send_dmcub_cmd(union block_sequence_params *params)
{
struct dc_context *ctx = params->send_dmcub_cmd_params.ctx;
@@ -884,6 +1662,276 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
}
+/*
+ * Helper function to add TG program global sync to block sequence
+ */
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vready_offset = vready_offset;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vstartup_lines = vstartup_lines;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_offset_pixels = vupdate_offset_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_vupdate_width_pixels = vupdate_vupdate_width_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.pstate_keepout_start_lines = pstate_keepout_start_lines;
+ seq_state->steps[*seq_state->num_steps].func = TG_PROGRAM_GLOBAL_SYNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG wait for state to block sequence
+ */
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ enum crtc_state state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.state = state;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_FOR_STATE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG set VTG params to block sequence
+ */
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dc_crtc_timing *dc_crtc_timing,
+ bool program_fp2)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.timing = dc_crtc_timing;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.program_fp2 = program_fp2;
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_VTG_PARAMS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG setup vertical interrupt2 to block sequence
+ */
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.start_line = start_line;
+ seq_state->steps[*seq_state->num_steps].func = TG_SETUP_VERTICAL_INTERRUPT2;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set HDR multiplier to block sequence
+ */
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.hw_mult = hw_mult;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_HDR_MULTIPLIER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program DET size to block sequence
+ */
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_buffer_size_kb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.det_buffer_size_kb = det_buffer_size_kb;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SIZE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.mcache_regs = mcache_regs;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_MCACHE_ID;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool enable,
+ bool wait)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.wait = wait;
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_FORCE_PSTATE_CHANGE_CONTROL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program DET segments to block sequence
+ */
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_size)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.det_size = det_size;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SEGMENTS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPP set dynamic expansion to block sequence
+ */
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_space,
+ enum dc_color_depth color_depth,
+ enum signal_type signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.signal = signal;
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DYN_EXPANSION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPP program FMT to block sequence
+ */
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.fmt_bit_depth = fmt_bit_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.clamping = clamping;
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_FMT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.pixel_encoding = pixel_encoding;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.is_otg_master = is_otg_master;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add ABM set pipe to block sequence
+ */
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_PIPE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add ABM set level to block sequence
+ */
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm,
+ uint32_t abm_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm = abm;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm_level = abm_level;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_LEVEL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG enable CRTC to block sequence
+ */
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_enable_crtc_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].func = TG_ENABLE_CRTC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait flip pending to block sequence
+ */
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FLIP_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG wait double buffer pending to block sequence
+ */
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_DOUBLE_BUFFER_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_program_manual_trigger(union block_sequence_params *params)
{
struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx;
@@ -910,12 +1958,6 @@ void hwss_setup_dpp(union block_sequence_params *params)
plane_state->color_space,
NULL);
}
-
- if (dpp && dpp->funcs->set_cursor_matrix) {
- dpp->funcs->set_cursor_matrix(dpp,
- plane_state->color_space,
- plane_state->cursor_csc_color_matrix);
- }
}
void hwss_program_bias_and_scale(union block_sequence_params *params)
@@ -926,9 +1968,8 @@ void hwss_program_bias_and_scale(union block_sequence_params *params)
struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
//TODO :for CNVC set scale and bias registers if necessary
- if (dpp->funcs->dpp_program_bias_and_scale) {
+ if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
- }
}
void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params)
@@ -978,6 +2019,39 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
}
+void hwss_program_surface_config(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_surface_config_params.hubp;
+ enum surface_pixel_format format = params->program_surface_config_params.format;
+ struct dc_tiling_info *tiling_info = params->program_surface_config_params.tiling_info;
+ struct plane_size size = params->program_surface_config_params.plane_size;
+ enum dc_rotation_angle rotation = params->program_surface_config_params.rotation;
+ struct dc_plane_dcc_param *dcc = params->program_surface_config_params.dcc;
+ bool horizontal_mirror = params->program_surface_config_params.horizontal_mirror;
+ int compat_level = params->program_surface_config_params.compat_level;
+
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ format,
+ tiling_info,
+ &size,
+ rotation,
+ dcc,
+ horizontal_mirror,
+ compat_level);
+
+ hubp->power_gated = false;
+}
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_mcache_id_and_split_coordinate.hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs = params->program_mcache_id_and_split_coordinate.mcache_regs;
+
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, mcache_regs);
+
+}
+
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
@@ -1041,6 +2115,8 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ if (tg->funcs->wait_otg_disable)
+ tg->funcs->wait_otg_disable(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
@@ -1050,6 +2126,7 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
+
for (i = 0; i < MAX_PIPES; i++) {
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1126,3 +2203,1869 @@ void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_cont
if (dc->hwss.program_outstanding_updates)
dc->hwss.program_outstanding_updates(dc, dc_context);
}
+
+void hwss_set_odm_combine(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_combine_params.tg;
+ int *opp_inst = params->set_odm_combine_params.opp_inst;
+ int opp_head_count = params->set_odm_combine_params.opp_head_count;
+ int odm_slice_width = params->set_odm_combine_params.odm_slice_width;
+ int last_odm_slice_width = params->set_odm_combine_params.last_odm_slice_width;
+
+ if (tg && tg->funcs->set_odm_combine)
+ tg->funcs->set_odm_combine(tg, opp_inst, opp_head_count,
+ odm_slice_width, last_odm_slice_width);
+}
+
+void hwss_set_odm_bypass(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_bypass_params.tg;
+ const struct dc_crtc_timing *timing = params->set_odm_bypass_params.timing;
+
+ if (tg && tg->funcs->set_odm_bypass)
+ tg->funcs->set_odm_bypass(tg, timing);
+}
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_pipe_clock_control_params.opp;
+ bool enable = params->opp_pipe_clock_control_params.enable;
+
+ if (opp && opp->funcs->opp_pipe_clock_control)
+ opp->funcs->opp_pipe_clock_control(opp, enable);
+}
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_left_edge_extra_pixel_params.opp;
+ enum dc_pixel_encoding pixel_encoding = params->opp_program_left_edge_extra_pixel_params.pixel_encoding;
+ bool is_otg_master = params->opp_program_left_edge_extra_pixel_params.is_otg_master;
+
+ if (opp && opp->funcs->opp_program_left_edge_extra_pixel)
+ opp->funcs->opp_program_left_edge_extra_pixel(opp, pixel_encoding, is_otg_master);
+}
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_dto_dscclk_params.dccg;
+ int inst = params->dccg_set_dto_dscclk_params.inst;
+ int num_slices_h = params->dccg_set_dto_dscclk_params.num_slices_h;
+
+ if (dccg && dccg->funcs->set_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, inst, num_slices_h);
+}
+
+void hwss_dsc_set_config(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_set_config_params.dsc;
+ struct dsc_config *dsc_cfg = params->dsc_set_config_params.dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg = params->dsc_set_config_params.dsc_optc_cfg;
+
+ if (dsc && dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, dsc_cfg, dsc_optc_cfg);
+}
+
+void hwss_dsc_enable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_enable_params.dsc;
+ int opp_inst = params->dsc_enable_params.opp_inst;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, opp_inst);
+}
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_dsc_config_params.tg;
+ enum optc_dsc_mode optc_dsc_mode = OPTC_DSC_DISABLED;
+ uint32_t bytes_per_pixel = 0;
+ uint32_t slice_width = 0;
+
+ if (params->tg_set_dsc_config_params.enable) {
+ struct dsc_optc_config *dsc_optc_cfg = params->tg_set_dsc_config_params.dsc_optc_cfg;
+
+ if (dsc_optc_cfg) {
+ bytes_per_pixel = dsc_optc_cfg->bytes_per_pixel;
+ slice_width = dsc_optc_cfg->slice_width;
+ optc_dsc_mode = dsc_optc_cfg->is_pixel_format_444 ?
+ OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+ }
+ }
+
+ if (tg && tg->funcs->set_dsc_config)
+ tg->funcs->set_dsc_config(tg, optc_dsc_mode, bytes_per_pixel, slice_width);
+}
+
+void hwss_dsc_disconnect(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disconnect_params.dsc;
+
+ if (dsc && dsc->funcs->dsc_disconnect)
+ dsc->funcs->dsc_disconnect(dsc);
+}
+
+void hwss_dsc_read_state(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_read_state_params.dsc;
+ struct dcn_dsc_state *dsc_state = params->dsc_read_state_params.dsc_state;
+
+ if (dsc && dsc->funcs->dsc_read_state)
+ dsc->funcs->dsc_read_state(dsc, dsc_state);
+}
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_calculate_and_set_config_params.pipe_ctx;
+ struct pipe_ctx *top_pipe = pipe_ctx;
+ bool enable = params->dsc_calculate_and_set_config_params.enable;
+ int opp_cnt = params->dsc_calculate_and_set_config_params.opp_cnt;
+
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ if (!dsc || !enable)
+ return;
+
+ /* Calculate DSC configuration - extracted from dcn32_update_dsc_on_stream */
+ struct dsc_config dsc_cfg;
+
+ while (top_pipe->prev_odm_pipe)
+ top_pipe = top_pipe->prev_odm_pipe;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + top_pipe->dsc_padding_params.dsc_hactive_padding +
+ stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = top_pipe->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = top_pipe->dsc_padding_params.dsc_hactive_padding;
+
+ /* Set DSC configuration */
+ if (dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg,
+ &params->dsc_calculate_and_set_config_params.dsc_optc_cfg);
+}
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_enable_with_opp_params.pipe_ctx;
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+}
+
+void hwss_tg_program_global_sync(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_program_global_sync_params.tg;
+ int vready_offset = params->tg_program_global_sync_params.vready_offset;
+ unsigned int vstartup_lines = params->tg_program_global_sync_params.vstartup_lines;
+ unsigned int vupdate_offset_pixels = params->tg_program_global_sync_params.vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels = params->tg_program_global_sync_params.vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines = params->tg_program_global_sync_params.pstate_keepout_start_lines;
+
+ if (tg->funcs->program_global_sync) {
+ tg->funcs->program_global_sync(tg, vready_offset, vstartup_lines,
+ vupdate_offset_pixels, vupdate_vupdate_width_pixels, pstate_keepout_start_lines);
+ }
+}
+
+void hwss_tg_wait_for_state(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_for_state_params.tg;
+ enum crtc_state state = params->tg_wait_for_state_params.state;
+
+ if (tg->funcs->wait_for_state)
+ tg->funcs->wait_for_state(tg, state);
+}
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_vtg_params_params.tg;
+ struct dc_crtc_timing *timing = params->tg_set_vtg_params_params.timing;
+ bool program_fp2 = params->tg_set_vtg_params_params.program_fp2;
+
+ if (tg->funcs->set_vtg_params)
+ tg->funcs->set_vtg_params(tg, timing, program_fp2);
+}
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_setup_vertical_interrupt2_params.tg;
+ int start_line = params->tg_setup_vertical_interrupt2_params.start_line;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_hdr_multiplier_params.dpp;
+ uint32_t hw_mult = params->dpp_set_hdr_multiplier_params.hw_mult;
+
+ if (dpp->funcs->dpp_set_hdr_multiplier)
+ dpp->funcs->dpp_set_hdr_multiplier(dpp, hw_mult);
+}
+
+void hwss_program_det_size(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_size_params.hubbub;
+ unsigned int hubp_inst = params->program_det_size_params.hubp_inst;
+ unsigned int det_buffer_size_kb = params->program_det_size_params.det_buffer_size_kb;
+
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, hubp_inst, det_buffer_size_kb);
+}
+
+void hwss_program_det_segments(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_segments_params.hubbub;
+ unsigned int hubp_inst = params->program_det_segments_params.hubp_inst;
+ unsigned int det_size = params->program_det_segments_params.det_size;
+
+ if (hubbub->funcs->program_det_segments)
+ hubbub->funcs->program_det_segments(hubbub, hubp_inst, det_size);
+}
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_dyn_expansion_params.opp;
+ enum dc_color_space color_space = params->opp_set_dyn_expansion_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_dyn_expansion_params.color_depth;
+ enum signal_type signal = params->opp_set_dyn_expansion_params.signal;
+
+ if (opp->funcs->opp_set_dyn_expansion)
+ opp->funcs->opp_set_dyn_expansion(opp, color_space, color_depth, signal);
+}
+
+void hwss_opp_program_fmt(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_fmt_params.opp;
+ struct bit_depth_reduction_params *fmt_bit_depth = params->opp_program_fmt_params.fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping = params->opp_program_fmt_params.clamping;
+
+ if (opp->funcs->opp_program_fmt)
+ opp->funcs->opp_program_fmt(opp, fmt_bit_depth, clamping);
+}
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_bit_depth_reduction_params.opp;
+ bool use_default_params = params->opp_program_bit_depth_reduction_params.use_default_params;
+ struct pipe_ctx *pipe_ctx = params->opp_program_bit_depth_reduction_params.pipe_ctx;
+ struct bit_depth_reduction_params bit_depth_params;
+
+ if (use_default_params)
+ memset(&bit_depth_params, 0, sizeof(bit_depth_params));
+ else
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &bit_depth_params);
+
+ if (opp->funcs->opp_program_bit_depth_reduction)
+ opp->funcs->opp_program_bit_depth_reduction(opp, &bit_depth_params);
+}
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_disp_pattern_generator_params.opp;
+ enum controller_dp_test_pattern test_pattern = params->opp_set_disp_pattern_generator_params.test_pattern;
+ enum controller_dp_color_space color_space = params->opp_set_disp_pattern_generator_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_disp_pattern_generator_params.color_depth;
+ struct tg_color *solid_color = params->opp_set_disp_pattern_generator_params.use_solid_color ?
+ &params->opp_set_disp_pattern_generator_params.solid_color : NULL;
+ int width = params->opp_set_disp_pattern_generator_params.width;
+ int height = params->opp_set_disp_pattern_generator_params.height;
+ int offset = params->opp_set_disp_pattern_generator_params.offset;
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator) {
+ opp->funcs->opp_set_disp_pattern_generator(opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+ }
+}
+
+void hwss_set_abm_pipe(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_pipe_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_pipe_params.pipe_ctx;
+
+ dc->hwss.set_pipe(pipe_ctx);
+}
+
+void hwss_set_abm_level(union block_sequence_params *params)
+{
+ struct abm *abm = params->set_abm_level_params.abm;
+ unsigned int abm_level = params->set_abm_level_params.abm_level;
+
+ if (abm->funcs->set_abm_level)
+ abm->funcs->set_abm_level(abm, abm_level);
+}
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_immediate_disable_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_immediate_disable_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_abm_immediate_disable)
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+}
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_remove_mpcc_params.mpc;
+ struct mpc_tree *mpc_tree_params = params->mpc_remove_mpcc_params.mpc_tree_params;
+ struct mpcc *mpcc_to_remove = params->mpc_remove_mpcc_params.mpcc_to_remove;
+
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+}
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_mpcc_disconnect_pending_params.opp;
+ int mpcc_inst = params->opp_set_mpcc_disconnect_pending_params.mpcc_inst;
+ bool pending = params->opp_set_mpcc_disconnect_pending_params.pending;
+
+ opp->mpcc_disconnect_pending[mpcc_inst] = pending;
+}
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_set_optimized_required_params.dc;
+ bool optimized_required = params->dc_set_optimized_required_params.optimized_required;
+
+ dc->optimized_required = optimized_required;
+}
+
+void hwss_hubp_disconnect(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disconnect_params.hubp;
+
+ if (hubp->funcs->hubp_disconnect)
+ hubp->funcs->hubp_disconnect(hubp);
+}
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_force_pstate_change_control_params.hubbub;
+ bool enable = params->hubbub_force_pstate_change_control_params.enable;
+ bool wait = params->hubbub_force_pstate_change_control_params.wait;
+
+ if (hubbub->funcs->force_pstate_change_control) {
+ hubbub->funcs->force_pstate_change_control(hubbub, enable, wait);
+ /* Add delay when enabling pstate change control */
+ if (enable)
+ udelay(500);
+ }
+}
+
+void hwss_tg_enable_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_enable_crtc_params.tg;
+
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+}
+
+void hwss_tg_set_gsl(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_params.tg;
+ struct gsl_params *gsl = &params->tg_set_gsl_params.gsl;
+
+ if (tg->funcs->set_gsl)
+ tg->funcs->set_gsl(tg, gsl);
+}
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_source_select_params.tg;
+ int group_idx = params->tg_set_gsl_source_select_params.group_idx;
+ uint32_t gsl_ready_signal = params->tg_set_gsl_source_select_params.gsl_ready_signal;
+
+ if (tg->funcs->set_gsl_source_select)
+ tg->funcs->set_gsl_source_select(tg, group_idx, gsl_ready_signal);
+}
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_wait_flip_pending_params.hubp;
+ unsigned int timeout_us = params->hubp_wait_flip_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->hubp_wait_flip_pending_params.polling_interval_us;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+}
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_double_buffer_pending_params.tg;
+ unsigned int timeout_us = params->tg_wait_double_buffer_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->tg_wait_double_buffer_pending_params.polling_interval_us;
+ int j = 0;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+}
+
+void hwss_update_force_pstate(union block_sequence_params *params)
+{
+ struct dc *dc = params->update_force_pstate_params.dc;
+ struct dc_state *context = params->update_force_pstate_params.context;
+ struct dce_hwseq *hwseq = dc->hwseq;
+
+ if (hwseq->funcs.update_force_pstate)
+ hwseq->funcs.update_force_pstate(dc, context);
+}
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_apply_dedcn21_147_wa_params.hubbub;
+
+ hubbub->funcs->apply_DEDCN21_147_wa(hubbub);
+}
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_allow_self_refresh_control_params.hubbub;
+ bool allow = params->hubbub_allow_self_refresh_control_params.allow;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, allow);
+
+ if (!allow && params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied)
+ *params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = true;
+}
+
+void hwss_tg_get_frame_count(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_get_frame_count_params.tg;
+ unsigned int *frame_count = params->tg_get_frame_count_params.frame_count;
+
+ *frame_count = tg->funcs->get_frame_count(tg);
+}
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_set_dwb_mux_params.mpc;
+ int dwb_id = params->mpc_set_dwb_mux_params.dwb_id;
+ int mpcc_id = params->mpc_set_dwb_mux_params.mpcc_id;
+
+ if (mpc->funcs->set_dwb_mux)
+ mpc->funcs->set_dwb_mux(mpc, dwb_id, mpcc_id);
+}
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_disable_dwb_mux_params.mpc;
+ unsigned int dwb_id = params->mpc_disable_dwb_mux_params.dwb_id;
+
+ if (mpc->funcs->disable_dwb_mux)
+ mpc->funcs->disable_dwb_mux(mpc, dwb_id);
+}
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_buf_params.mcif_wb;
+ struct mcif_buf_params *mcif_buf_params = params->mcif_wb_config_buf_params.mcif_buf_params;
+ unsigned int dest_height = params->mcif_wb_config_buf_params.dest_height;
+
+ if (mcif_wb->funcs->config_mcif_buf)
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, dest_height);
+}
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_arb_params.mcif_wb;
+ struct mcif_arb_params *mcif_arb_params = params->mcif_wb_config_arb_params.mcif_arb_params;
+
+ if (mcif_wb->funcs->config_mcif_arb)
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, mcif_arb_params);
+}
+
+void hwss_mcif_wb_enable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_enable_params.mcif_wb;
+
+ if (mcif_wb->funcs->enable_mcif)
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+}
+
+void hwss_mcif_wb_disable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_disable_params.mcif_wb;
+
+ if (mcif_wb->funcs->disable_mcif)
+ mcif_wb->funcs->disable_mcif(mcif_wb);
+}
+
+void hwss_dwbc_enable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_enable_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_enable_params.dwb_params;
+
+ if (dwb->funcs->enable)
+ dwb->funcs->enable(dwb, dwb_params);
+}
+
+void hwss_dwbc_disable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_disable_params.dwb;
+
+ if (dwb->funcs->disable)
+ dwb->funcs->disable(dwb);
+}
+
+void hwss_dwbc_update(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_update_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_update_params.dwb_params;
+
+ if (dwb->funcs->update)
+ dwb->funcs->update(dwb, dwb_params);
+}
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_update_mall_sel_params.hubp;
+ uint32_t mall_sel = params->hubp_update_mall_sel_params.mall_sel;
+ bool cache_cursor = params->hubp_update_mall_sel_params.cache_cursor;
+
+ if (hubp && hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, mall_sel, cache_cursor);
+}
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_prepare_subvp_buffering_params.hubp;
+ bool enable = params->hubp_prepare_subvp_buffering_params.enable;
+
+ if (hubp && hubp->funcs->hubp_prepare_subvp_buffering)
+ hubp->funcs->hubp_prepare_subvp_buffering(hubp, enable);
+}
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_en_params.hubp;
+ bool enable = params->hubp_set_blank_en_params.enable;
+
+ if (hubp && hubp->funcs->set_hubp_blank_en)
+ hubp->funcs->set_hubp_blank_en(hubp, enable);
+}
+
+void hwss_hubp_disable_control(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disable_control_params.hubp;
+ bool disable = params->hubp_disable_control_params.disable;
+
+ if (hubp && hubp->funcs->hubp_disable_control)
+ hubp->funcs->hubp_disable_control(hubp, disable);
+}
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_soft_reset_params.hubbub;
+ bool reset = params->hubbub_soft_reset_params.reset;
+
+ if (hubbub)
+ params->hubbub_soft_reset_params.hubbub_soft_reset(hubbub, reset);
+}
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_clk_cntl_params.hubp;
+ bool enable = params->hubp_clk_cntl_params.enable;
+
+ if (hubp && hubp->funcs->hubp_clk_cntl) {
+ hubp->funcs->hubp_clk_cntl(hubp, enable);
+ hubp->power_gated = !enable;
+ }
+}
+
+void hwss_hubp_init(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_init_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_init)
+ hubp->funcs->hubp_init(hubp);
+}
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_vm_system_aperture_settings_params.hubp;
+ struct vm_system_aperture_param apt;
+
+ apt.sys_default = params->hubp_set_vm_system_aperture_settings_params.sys_default;
+ apt.sys_high = params->hubp_set_vm_system_aperture_settings_params.sys_high;
+ apt.sys_low = params->hubp_set_vm_system_aperture_settings_params.sys_low;
+
+ if (hubp && hubp->funcs->hubp_set_vm_system_aperture_settings)
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+}
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_flip_int_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_set_flip_int)
+ hubp->funcs->hubp_set_flip_int(hubp);
+}
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_dppclk_control_params.dpp;
+ bool dppclk_div = params->dpp_dppclk_control_params.dppclk_div;
+ bool enable = params->dpp_dppclk_control_params.enable;
+
+ if (dpp && dpp->funcs->dpp_dppclk_control)
+ dpp->funcs->dpp_dppclk_control(dpp, dppclk_div, enable);
+}
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->disable_phantom_crtc_params.tg;
+
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+}
+
+void hwss_dsc_pg_status(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dsc_pg_status_params.hws;
+ int dsc_inst = params->dsc_pg_status_params.dsc_inst;
+
+ if (hws && hws->funcs.dsc_pg_status)
+ params->dsc_pg_status_params.is_ungated = hws->funcs.dsc_pg_status(hws, dsc_inst);
+}
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_wait_disconnect_pending_clear_params.dsc;
+
+ if (!params->dsc_wait_disconnect_pending_clear_params.is_ungated)
+ return;
+ if (*params->dsc_wait_disconnect_pending_clear_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_wait_disconnect_pending_clear)
+ dsc->funcs->dsc_wait_disconnect_pending_clear(dsc);
+}
+
+void hwss_dsc_disable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disable_params.dsc;
+
+ if (!params->dsc_disable_params.is_ungated)
+ return;
+ if (*params->dsc_disable_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_disable)
+ dsc->funcs->dsc_disable(dsc);
+}
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_ref_dscclk_params.dccg;
+ int dsc_inst = params->dccg_set_ref_dscclk_params.dsc_inst;
+
+ if (!params->dccg_set_ref_dscclk_params.is_ungated)
+ return;
+ if (*params->dccg_set_ref_dscclk_params.is_ungated == false)
+ return;
+
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, dsc_inst);
+}
+
+void hwss_dpp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_pg_control_params.hws;
+ unsigned int dpp_inst = params->dpp_pg_control_params.dpp_inst;
+ bool power_on = params->dpp_pg_control_params.power_on;
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp_inst, power_on);
+}
+
+void hwss_hubp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->hubp_pg_control_params.hws;
+ unsigned int hubp_inst = params->hubp_pg_control_params.hubp_inst;
+ bool power_on = params->hubp_pg_control_params.power_on;
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp_inst, power_on);
+}
+
+void hwss_hubp_reset(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_reset_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_reset)
+ hubp->funcs->hubp_reset(hubp);
+}
+
+void hwss_dpp_reset(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_reset_params.dpp;
+
+ if (dpp && dpp->funcs->dpp_reset)
+ dpp->funcs->dpp_reset(dpp);
+}
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_root_clock_control_params.hws;
+ unsigned int dpp_inst = params->dpp_root_clock_control_params.dpp_inst;
+ bool clock_on = params->dpp_root_clock_control_params.clock_on;
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp_inst, clock_on);
+}
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_ip_request_cntl_params.dc;
+ bool enable = params->dc_ip_request_cntl_params.enable;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.dc_ip_request_cntl)
+ hws->funcs.dc_ip_request_cntl(dc, enable);
+}
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_update_dpp_dto_params.dccg;
+ int dpp_inst = params->dccg_update_dpp_dto_params.dpp_inst;
+ int dppclk_khz = params->dccg_update_dpp_dto_params.dppclk_khz;
+
+ if (dccg && dccg->funcs->update_dpp_dto)
+ dccg->funcs->update_dpp_dto(dccg, dpp_inst, dppclk_khz);
+}
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_vtg_sel_params.hubp;
+ uint32_t otg_inst = params->hubp_vtg_sel_params.otg_inst;
+
+ if (hubp && hubp->funcs->hubp_vtg_sel)
+ hubp->funcs->hubp_vtg_sel(hubp, otg_inst);
+}
+
+void hwss_hubp_setup2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup2_params.hubp_regs;
+ union dml2_global_sync_programming *global_sync = params->hubp_setup2_params.global_sync;
+ struct dc_crtc_timing *timing = params->hubp_setup2_params.timing;
+
+ if (hubp && hubp->funcs->hubp_setup2)
+ hubp->funcs->hubp_setup2(hubp, hubp_regs, global_sync, timing);
+}
+
+void hwss_hubp_setup(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_params.ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = params->hubp_setup_params.rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest = params->hubp_setup_params.pipe_dest;
+
+ if (hubp && hubp->funcs->hubp_setup)
+ hubp->funcs->hubp_setup(hubp, dlg_regs, ttu_regs, rq_regs, pipe_dest);
+}
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_unbounded_requesting_params.hubp;
+ bool unbounded_req = params->hubp_set_unbounded_requesting_params.unbounded_req;
+
+ if (hubp && hubp->funcs->set_unbounded_requesting)
+ hubp->funcs->set_unbounded_requesting(hubp, unbounded_req);
+}
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup_interdependent2_params.hubp_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent2)
+ hubp->funcs->hubp_setup_interdependent2(hubp, hubp_regs);
+}
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_interdependent_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_interdependent_params.ttu_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent)
+ hubp->funcs->hubp_setup_interdependent(hubp, dlg_regs, ttu_regs);
+}
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_cursor_matrix_params.dpp;
+ enum dc_color_space color_space = params->dpp_set_cursor_matrix_params.color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix = params->dpp_set_cursor_matrix_params.cursor_csc_color_matrix;
+
+ if (dpp && dpp->funcs->set_cursor_matrix)
+ dpp->funcs->set_cursor_matrix(dpp, color_space, *cursor_csc_color_matrix);
+}
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params)
+{
+ struct dc *dc = params->mpc_update_mpcc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->mpc_update_mpcc_params.pipe_ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.update_mpcc)
+ hws->funcs.update_mpcc(dc, pipe_ctx);
+}
+
+void hwss_mpc_update_blending(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_update_blending_params.mpc;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_update_blending_params.blnd_cfg;
+ int mpcc_id = params->mpc_update_blending_params.mpcc_id;
+
+ if (mpc && mpc->funcs->update_blending)
+ mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
+}
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_assert_idle_mpcc_params.mpc;
+ int mpcc_id = params->mpc_assert_idle_mpcc_params.mpcc_id;
+
+ if (mpc && mpc->funcs->wait_for_idle)
+ mpc->funcs->wait_for_idle(mpc, mpcc_id);
+}
+
+void hwss_mpc_insert_plane(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_insert_plane_params.mpc;
+ struct mpc_tree *tree = params->mpc_insert_plane_params.mpc_tree_params;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_insert_plane_params.blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg = params->mpc_insert_plane_params.sm_cfg;
+ struct mpcc *insert_above_mpcc = params->mpc_insert_plane_params.insert_above_mpcc;
+ int mpcc_id = params->mpc_insert_plane_params.mpcc_id;
+ int dpp_id = params->mpc_insert_plane_params.dpp_id;
+
+ if (mpc && mpc->funcs->insert_plane)
+ mpc->funcs->insert_plane(mpc, tree, blnd_cfg, sm_cfg, insert_above_mpcc,
+ dpp_id, mpcc_id);
+}
+
+void hwss_dpp_set_scaler(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_scaler_params.dpp;
+ const struct scaler_data *scl_data = params->dpp_set_scaler_params.scl_data;
+
+ if (dpp && dpp->funcs->dpp_set_scaler)
+ dpp->funcs->dpp_set_scaler(dpp, scl_data);
+}
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_mem_program_viewport_params.hubp;
+ const struct rect *viewport = params->hubp_mem_program_viewport_params.viewport;
+ const struct rect *viewport_c = params->hubp_mem_program_viewport_params.viewport_c;
+
+ if (hubp && hubp->funcs->mem_program_viewport)
+ hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c);
+}
+
+void hwss_abort_cursor_offload_update(union block_sequence_params *params)
+{
+ struct dc *dc = params->abort_cursor_offload_update_params.dc;
+ struct pipe_ctx *pipe_ctx = params->abort_cursor_offload_update_params.pipe_ctx;
+
+ if (dc && dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+}
+
+void hwss_set_cursor_attribute(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_attribute_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_attribute_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_attribute)
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+}
+
+void hwss_set_cursor_position(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_position_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_position_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_position)
+ dc->hwss.set_cursor_position(pipe_ctx);
+}
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_sdr_white_level_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_sdr_white_level_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+}
+
+void hwss_program_output_csc(union block_sequence_params *params)
+{
+ struct dc *dc = params->program_output_csc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->program_output_csc_params.pipe_ctx;
+ enum dc_color_space colorspace = params->program_output_csc_params.colorspace;
+ uint16_t *matrix = params->program_output_csc_params.matrix;
+ int opp_id = params->program_output_csc_params.opp_id;
+
+ if (dc && dc->hwss.program_output_csc)
+ dc->hwss.program_output_csc(dc, pipe_ctx, colorspace, matrix, opp_id);
+}
+
+void hwss_hubp_set_blank(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_params.hubp;
+ bool blank = params->hubp_set_blank_params.blank;
+
+ if (hubp && hubp->funcs->set_blank)
+ hubp->funcs->set_blank(hubp, blank);
+}
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->phantom_hubp_post_enable_params.hubp;
+
+ if (hubp && hubp->funcs->phantom_hubp_post_enable)
+ hubp->funcs->phantom_hubp_post_enable(hubp);
+}
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_DTO_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.inst = inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.num_slices_h = num_slices_h;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_CALCULATE_AND_SET_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.opp_cnt = opp_cnt;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_REMOVE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpcc_to_remove = mpcc_to_remove;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_MPCC_DISCONNECT_PENDING;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.mpcc_inst = mpcc_inst;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.pending = pending;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disconnect_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_ENABLE_WITH_OPP;
+ seq_state->steps[*seq_state->num_steps].params.dsc_enable_with_opp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dsc_optc_config *dsc_optc_cfg, bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_DSC_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.dsc_optc_cfg = dsc_optc_cfg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disconnect_params.dsc = dsc;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_SET_OPTIMIZED_REQUIRED;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.optimized_required = optimized_required;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_IMMEDIATE_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DISP_PATTERN_GENERATOR;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.test_pattern = test_pattern;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.solid_color = solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.use_solid_color = use_solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.width = width;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.height = height;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.offset = offset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC update blending to block sequence
+ */
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_BLENDING;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC insert plane to block sequence
+ */
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_INSERT_PLANE;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.sm_cfg = sm_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.insert_above_mpcc = insert_above_mpcc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.dpp_id = dpp_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC assert idle MPCC to block sequence
+ */
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_ASSERT_IDLE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP set blank to block sequence
+ */
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.blank = blank;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_BIT_DEPTH_REDUCTION;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.use_default_params = use_default_params;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_IP_REQUEST_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_BUF;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_buf_params = mcif_buf_params;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.dest_height = dest_height;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_ARB;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_arb_params = mcif_arb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_enable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_disable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.dwb_id = dwb_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_DISABLE_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.dwb_id = dwb_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_disable_params.dwb = dwb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.gsl = gsl;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL_SOURCE_SELECT;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.group_idx = group_idx;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.gsl_ready_signal = gsl_ready_signal;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_MALL_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.mall_sel = mall_sel;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.cache_cursor = cache_cursor;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PREPARE_SUBVP_BUFFERING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK_EN;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISABLE_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.disable = disable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_SOFT_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub_soft_reset = hubbub_soft_reset;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.reset = reset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_CLK_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_DPPCLK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dppclk_div = dppclk_div;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DISABLE_PHANTOM_CRTC;
+ seq_state->steps[*seq_state->num_steps].params.disable_phantom_crtc_params.tg = tg;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_PG_STATUS;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_WAIT_DISCONNECT_PENDING_CLEAR;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_REF_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_ROOT_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.clock_on = clock_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_INIT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_init_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubp_reset_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.dpp_reset_params.dpp = dpp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PIPE_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_default.quad_part = sys_default;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_low.quad_part = sys_low;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_high.quad_part = sys_high;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_INT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_flip_int_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_UPDATE_DPP_DTO;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dppclk_khz = dppclk_khz;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_VTG_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.otg_inst = otg_inst;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp_regs = hubp_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.global_sync = global_sync;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.timing = timing;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.ttu_regs = ttu_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.rq_regs = rq_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.pipe_dest = pipe_dest;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_UNBOUNDED_REQUESTING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.unbounded_req = unbounded_req;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp_regs = hubp_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.ttu_regs = ttu_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_SURFACE_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.format = format;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.tiling_info = tiling_info;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.plane_size = plane_size;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.rotation = rotation;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.dcc = dcc;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.horizontal_mirror = horizontal_mirror;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.compat_level = compat_level;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SETUP_DPP;
+ seq_state->steps[*seq_state->num_steps].params.setup_dpp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_CURSOR_MATRIX;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.cursor_csc_color_matrix = cursor_csc_color_matrix;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_SCALER;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.scl_data = scl_data;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_MEM_PROGRAM_VIEWPORT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport = viewport;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport_c = viewport_c;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABORT_CURSOR_OFFLOAD_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_ATTRIBUTE;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_POSITION;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_SDR_WHITE_LEVEL;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PROGRAM_OUTPUT_CSC;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.colorspace = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.matrix = matrix;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.opp_id = opp_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PHANTOM_HUBP_POST_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.phantom_hubp_post_enable_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = UPDATE_FORCE_PSTATE;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.context = context;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_APPLY_DEDCN21_147_WA;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_apply_dedcn21_147_wa_params.hubbub = hubbub;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_ALLOW_SELF_REFRESH_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.allow = allow;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = disallow_self_refresh_applied;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_GET_FRAME_COUNT;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.frame_count = frame_count;
+ (*seq_state->num_steps)++;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 039b176e086d..deb23d20bca6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -24,7 +24,7 @@
#include "link_enc_cfg.h"
#include "resource.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER dc->ctx->logger
@@ -44,20 +44,8 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
* yet match.
*/
if (link_enc && ((uint32_t)stream->link->connector_signal & link_enc->output_signals)) {
- if (dc_is_dp_signal(stream->signal)) {
- /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
- struct dc_link_settings link_settings = {0};
-
- stream->ctx->dc->link_srv->dp_decide_link_settings(stream, &link_settings);
- if ((link_settings.link_rate >= LINK_RATE_LOW) &&
- link_settings.link_rate <= LINK_RATE_HIGH3) {
- is_dig_stream = true;
- break;
- }
- } else {
- is_dig_stream = true;
- break;
- }
+ is_dig_stream = true;
+ break;
}
}
}
@@ -534,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
{
struct link_encoder *link_enc = NULL;
- enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
+ enum engine_id encs_assigned[MAX_LINK_ENCODERS];
int i;
- for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
+ for (i = 0; i < MAX_LINK_ENCODERS; i++)
encs_assigned[i] = ENGINE_ID_UNKNOWN;
/* Add assigned encoders to list. */
@@ -559,17 +547,6 @@ struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
return link_enc;
}
-struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
- struct dc *dc,
- const struct dc_stream_state *stream)
-{
- struct link_encoder *link_enc;
-
- link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, stream->link);
-
- return link_enc;
-}
-
struct link_encoder *link_enc_cfg_get_link_enc(
const struct dc_link *link)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 457d60eeb486..9acd30019717 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -33,8 +33,9 @@
* dc.h with detail interface documentation, then add function implementation
* in this file which calls link functions.
*/
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_i2c.h"
+
struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
{
if (link_index >= MAX_LINKS)
@@ -125,6 +126,14 @@ uint32_t dc_link_bandwidth_kbps(
return link->dc->link_srv->dp_link_bandwidth_kbps(link, link_settings);
}
+uint32_t dc_link_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params)
+{
+ return link->dc->link_srv->dp_required_hblank_size_bytes(link,
+ audio_params);
+}
+
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
{
dc->link_srv->get_cur_res_map(dc, map);
@@ -142,6 +151,12 @@ bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx)
return link->dc->link_srv->update_dsc_config(pipe_ctx);
}
+struct ddc_service *
+dc_get_oem_i2c_device(struct dc *dc)
+{
+ return dc->res_pool->oem_device;
+}
+
bool dc_is_oem_i2c_device_present(
struct dc *dc,
size_t slave_address)
@@ -356,15 +371,10 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
return link->dc->link_srv->dp_should_enable_fec(link);
}
-int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw)
{
- return link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
-}
-
-void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
-{
- link->dc->link_srv->dpia_handle_bw_alloc_response(link, bw, result);
+ link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
}
bool dc_link_check_link_loss_status(
@@ -506,7 +516,15 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- return dc->link_srv->validate_dpia_bandwidth(streams, count);
+ return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+
+void dc_link_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ link->dc->link_srv->edp_get_alpm_support(link, auxless_support, auxwake_support);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 626f75b6ad00..848c267ef11e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -40,7 +40,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
@@ -76,6 +76,7 @@
#include "dcn321/dcn321_resource.h"
#include "dcn35/dcn35_resource.h"
#include "dcn351/dcn351_resource.h"
+#include "dcn36/dcn36_resource.h"
#include "dcn401/dcn401_resource.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
#include "dc_spl_translate.h"
@@ -94,11 +95,44 @@
#define DC_LOGGER \
dc->ctx->logger
#define DC_LOGGER_INIT(logger)
-
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define UNABLE_TO_SPLIT -1
+static void capture_pipe_topology_data(struct dc *dc, int plane_idx, int slice_idx, int stream_idx,
+ int dpp_inst, int opp_inst, int tg_inst, bool is_phantom_pipe)
+{
+ struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
+
+ if (current_snapshot->line_count >= MAX_PIPES)
+ return;
+
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].is_phantom_pipe = is_phantom_pipe;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].plane_idx = plane_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].slice_idx = slice_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].stream_idx = stream_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].dpp_inst = dpp_inst;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].opp_inst = opp_inst;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].tg_inst = tg_inst;
+
+ current_snapshot->line_count++;
+}
+
+static void start_new_topology_snapshot(struct dc *dc, struct dc_state *state)
+{
+ // Move to next snapshot slot (circular buffer)
+ dc->debug_data.topology_history.current_snapshot_index = (dc->debug_data.topology_history.current_snapshot_index + 1) % MAX_TOPOLOGY_SNAPSHOTS;
+
+ // Clear the new snapshot
+ struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
+ memset(current_snapshot, 0, sizeof(*current_snapshot));
+
+ // Set metadata
+ current_snapshot->timestamp_us = dm_get_timestamp(dc->ctx);
+ current_snapshot->stream_count = state->stream_count;
+ current_snapshot->phantom_stream_count = state->phantom_stream_count;
+}
+
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -164,7 +198,13 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
- if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) {
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE ||
+ asic_id.chip_id == DEVICE_ID_NV_143F ||
+ asic_id.chip_id == DEVICE_ID_NV_13F9 ||
+ asic_id.chip_id == DEVICE_ID_NV_13FA ||
+ asic_id.chip_id == DEVICE_ID_NV_13FB ||
+ asic_id.chip_id == DEVICE_ID_NV_13FC ||
+ asic_id.chip_id == DEVICE_ID_NV_13DB) {
dc_version = DCN_VERSION_2_01;
break;
}
@@ -204,6 +244,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
dc_version = DCN_VERSION_3_5;
if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_51;
+ if (ASICREV_IS_DCN36(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_3_6;
break;
case AMDGPU_FAMILY_GC_12_0_0:
if (ASICREV_IS_GC_12_0_1_A0(asic_id.hw_internal_rev) ||
@@ -320,6 +362,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_3_51:
res_pool = dcn351_create_resource_pool(init_data, dc);
break;
+ case DCN_VERSION_3_6:
+ res_pool = dcn36_create_resource_pool(init_data, dc);
+ break;
case DCN_VERSION_4_01:
res_pool = dcn401_create_resource_pool(init_data, dc);
break;
@@ -435,6 +480,14 @@ bool resource_construct(
DC_ERR("DC: failed to create stream_encoder!\n");
pool->stream_enc_count++;
}
+
+ for (i = 0; i < caps->num_analog_stream_encoder; i++) {
+ pool->stream_enc[caps->num_stream_encoder + i] =
+ create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx);
+ if (pool->stream_enc[caps->num_stream_encoder + i] == NULL)
+ DC_ERR("DC: failed to create analog stream_encoder %d!\n", i);
+ pool->stream_enc_count++;
+ }
}
pool->hpo_dp_stream_enc_count = 0;
@@ -941,6 +994,17 @@ static void calculate_adjust_recout_for_visual_confirm(struct pipe_ctx *pipe_ctx
*base_offset = VISUAL_CONFIRM_BASE_DEFAULT;
}
+static void reverse_adjust_recout_for_visual_confirm(struct rect *recout,
+ struct pipe_ctx *pipe_ctx)
+{
+ int dpp_offset, base_offset;
+
+ calculate_adjust_recout_for_visual_confirm(pipe_ctx, &base_offset,
+ &dpp_offset);
+ recout->height += base_offset;
+ recout->height += dpp_offset;
+}
+
static void adjust_recout_for_visual_confirm(struct rect *recout,
struct pipe_ctx *pipe_ctx)
{
@@ -1325,32 +1389,6 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
data->viewport_c.y += src.y / vpc_div;
}
-static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
-{
- uint32_t refresh_rate;
- struct dc *dc = stream->ctx->dc;
-
- refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
- stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
- refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
- refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
-
- /* If there's any stream that fits the SubVP high refresh criteria,
- * we must return true. This is because cursor updates are asynchronous
- * with full updates, so we could transition into a SubVP config and
- * remain in HW cursor mode if there's no cursor update which will
- * then cause corruption.
- */
- if ((refresh_rate >= 120 && refresh_rate <= 175 &&
- stream->timing.v_addressable >= 1080 &&
- stream->timing.v_addressable <= 2160) &&
- (dc->current_state->stream_count > 1 ||
- (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
- return true;
-
- return false;
-}
-
static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern(
enum dp_test_pattern test_pattern)
{
@@ -1455,7 +1493,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
- if (!plane_state->dst_rect.width ||
+ if (!plane_state ||
+ !plane_state->dst_rect.width ||
!plane_state->dst_rect.height ||
!plane_state->src_rect.width ||
!plane_state->src_rect.height) {
@@ -1641,6 +1680,62 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
return res;
}
+bool resource_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *test_pipe, *split_pipe;
+ struct rect r1 = pipe_ctx->plane_res.scl_data.recout;
+ int r1_right, r1_bottom;
+ int cur_layer = pipe_ctx->plane_state->layer_index;
+
+ reverse_adjust_recout_for_visual_confirm(&r1, pipe_ctx);
+ r1_right = r1.x + r1.width;
+ r1_bottom = r1.y + r1.height;
+
+ /**
+ * Disable the cursor if there's another pipe above this with a
+ * plane that contains this pipe's viewport to prevent double cursor
+ * and incorrect scaling artifacts.
+ */
+ for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+ test_pipe = test_pipe->top_pipe) {
+ struct rect r2;
+ int r2_right, r2_bottom;
+ // Skip invisible layer and pipe-split plane on same layer
+ if (!test_pipe->plane_state ||
+ !test_pipe->plane_state->visible ||
+ test_pipe->plane_state->layer_index == cur_layer)
+ continue;
+
+ r2 = test_pipe->plane_res.scl_data.recout;
+ reverse_adjust_recout_for_visual_confirm(&r2, test_pipe);
+ r2_right = r2.x + r2.width;
+ r2_bottom = r2.y + r2.height;
+
+ /**
+ * There is another half plane on same layer because of
+ * pipe-split, merge together per same height.
+ */
+ for (split_pipe = pipe_ctx->top_pipe; split_pipe;
+ split_pipe = split_pipe->top_pipe)
+ if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
+ struct rect r2_half;
+
+ r2_half = split_pipe->plane_res.scl_data.recout;
+ reverse_adjust_recout_for_visual_confirm(&r2_half, split_pipe);
+ r2.x = min(r2_half.x, r2.x);
+ r2.width = r2.width + r2_half.width;
+ r2_right = r2.x + r2.width;
+ r2_bottom = min(r2_bottom, r2_half.y + r2_half.height);
+ break;
+ }
+
+ if (r1.x >= r2.x && r1.y >= r2.y && r1_right <= r2_right && r1_bottom <= r2_bottom)
+ return true;
+ }
+
+ return false;
+}
+
enum dc_status resource_build_scaling_params_for_context(
const struct dc *dc,
@@ -2095,7 +2190,7 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
h_active = timing->h_addressable +
timing->h_border_left +
timing->h_border_right +
- otg_master->hblank_borrow;
+ otg_master->dsc_padding_params.dsc_hactive_padding;
width = h_active / count;
if (otg_master->stream_res.tg)
@@ -2250,10 +2345,11 @@ bool resource_is_odm_topology_changed(const struct pipe_ctx *otg_master_a,
static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
int stream_idx, int slice_idx, int plane_idx, int slice_count,
- bool is_primary)
+ bool is_primary, bool is_phantom_pipe)
{
DC_LOGGER_INIT(dc->ctx->logger);
+ // new format for logging: bit storing code
if (slice_idx == 0 && plane_idx == 0 && is_primary) {
/* case 0 (OTG master pipe with plane) */
DC_LOG_DC(" | plane%d slice%d stream%d|",
@@ -2262,6 +2358,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst,
pipe->stream_res.tg->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx == 0 && plane_idx == -1) {
/* case 1 (OTG master pipe without plane) */
DC_LOG_DC(" | slice%d stream%d|",
@@ -2270,6 +2370,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
pipe->stream_res.opp->inst,
pipe->stream_res.opp->inst,
pipe->stream_res.tg->inst);
+ capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx != 0 && plane_idx == 0 && is_primary) {
/* case 2 (OPP head pipe with plane) */
DC_LOG_DC(" | plane%d slice%d | |",
@@ -2277,27 +2381,43 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
DC_LOG_DC(" |DPP%d----OPP%d----| |",
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx != 0 && plane_idx == -1) {
/* case 3 (OPP head pipe without plane) */
DC_LOG_DC(" | slice%d | |", slice_idx);
DC_LOG_DC(" |DPG%d----OPP%d----| |",
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst);
+ capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx == slice_count - 1) {
/* case 4 (DPP pipe in last slice) */
DC_LOG_DC(" | plane%d | |", plane_idx);
DC_LOG_DC(" |DPP%d----| |",
pipe->plane_res.dpp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else {
/* case 5 (DPP pipe not in last slice) */
DC_LOG_DC(" | plane%d | | |", plane_idx);
DC_LOG_DC(" |DPP%d----| | |",
pipe->plane_res.dpp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
}
}
static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
- struct pipe_ctx *otg_master, int stream_idx)
+ struct pipe_ctx *otg_master, int stream_idx, bool is_phantom_pipe)
{
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
@@ -2323,12 +2443,12 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
resource_log_pipe(dc, dpp_pipes[dpp_idx],
stream_idx, slice_idx,
plane_idx, slice_count,
- is_primary);
+ is_primary, is_phantom_pipe);
}
} else {
resource_log_pipe(dc, opp_heads[slice_idx],
stream_idx, slice_idx, plane_idx,
- slice_count, true);
+ slice_count, true, is_phantom_pipe);
}
}
@@ -2359,6 +2479,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
struct pipe_ctx *otg_master;
int stream_idx, phantom_stream_idx;
DC_LOGGER_INIT(dc->ctx->logger);
+ bool is_phantom_pipe = false;
+
+ // Start a new snapshot for this topology update
+ start_new_topology_snapshot(dc, state);
DC_LOG_DC(" pipe topology update");
DC_LOG_DC(" ________________________");
@@ -2372,9 +2496,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
if (!otg_master)
continue;
- resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
}
if (state->phantom_stream_count > 0) {
+ is_phantom_pipe = true;
DC_LOG_DC(" | (phantom pipes) |");
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
@@ -2387,7 +2512,7 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
if (!otg_master)
continue;
- resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
}
}
DC_LOG_DC(" |________________________|\n");
@@ -2616,6 +2741,185 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
}
}
+static inline int find_acquired_dio_link_enc_for_link(
+ const struct resource_context *res_ctx,
+ const struct dc_link *link)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ctx->dio_link_enc_ref_cnts); i++)
+ if (res_ctx->dio_link_enc_ref_cnts[i] > 0 &&
+ res_ctx->dio_link_enc_to_link_idx[i] == link->link_index)
+ return i;
+
+ return -1;
+}
+
+static inline int find_fixed_dio_link_enc(const struct dc_link *link)
+{
+ /* the 8b10b dp phy can only use fixed link encoder */
+ return link->eng_id;
+}
+
+static inline int find_free_dio_link_enc(const struct resource_context *res_ctx,
+ const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream)
+{
+ int i, j = -1;
+ int stream_enc_inst = -1;
+ int enc_count = pool->dig_link_enc_count;
+
+ /* Find stream encoder instance for the stream */
+ if (stream) {
+ for (i = 0; i < pool->pipe_count; i++) {
+ if ((res_ctx->pipe_ctx[i].stream == stream) &&
+ (res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) {
+ stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id;
+ break;
+ }
+ }
+ }
+
+ /* Assign dpia preferred > stream enc instance > available */
+ for (i = 0; i < enc_count; i++) {
+ if (res_ctx->dio_link_enc_ref_cnts[i] == 0) {
+ if (j == -1)
+ j = i;
+
+ if (link->dpia_preferred_eng_id == i) {
+ j = i;
+ break;
+ }
+
+ if (stream_enc_inst == i) {
+ j = stream_enc_inst;
+ }
+ }
+ }
+ return j;
+}
+
+static inline void acquire_dio_link_enc(
+ struct resource_context *res_ctx,
+ unsigned int link_index,
+ int enc_index)
+{
+ res_ctx->dio_link_enc_to_link_idx[enc_index] = link_index;
+ res_ctx->dio_link_enc_ref_cnts[enc_index] = 1;
+}
+
+static inline void retain_dio_link_enc(
+ struct resource_context *res_ctx,
+ int enc_index)
+{
+ res_ctx->dio_link_enc_ref_cnts[enc_index]++;
+}
+
+static inline void release_dio_link_enc(
+ struct resource_context *res_ctx,
+ int enc_index)
+{
+ ASSERT(res_ctx->dio_link_enc_ref_cnts[enc_index] > 0);
+ res_ctx->dio_link_enc_ref_cnts[enc_index]--;
+}
+
+static bool is_dio_enc_acquired_by_other_link(const struct dc_link *link,
+ int enc_index,
+ int *link_index)
+{
+ const struct dc *dc = link->dc;
+ const struct resource_context *res_ctx = &dc->current_state->res_ctx;
+
+ /* pass the link_index that acquired the enc_index */
+ if (res_ctx->dio_link_enc_ref_cnts[enc_index] > 0 &&
+ res_ctx->dio_link_enc_to_link_idx[enc_index] != link->link_index) {
+ *link_index = res_ctx->dio_link_enc_to_link_idx[enc_index];
+ return true;
+ }
+
+ return false;
+}
+
+static void swap_dio_link_enc_to_muxable_ctx(struct dc_state *context,
+ const struct resource_pool *pool,
+ int new_encoder,
+ int old_encoder)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ int stream_count = context->stream_count;
+ int i = 0;
+
+ res_ctx->dio_link_enc_ref_cnts[new_encoder] = res_ctx->dio_link_enc_ref_cnts[old_encoder];
+ res_ctx->dio_link_enc_to_link_idx[new_encoder] = res_ctx->dio_link_enc_to_link_idx[old_encoder];
+ res_ctx->dio_link_enc_ref_cnts[old_encoder] = 0;
+
+ for (i = 0; i < stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+
+ if (pipe_ctx && pipe_ctx->link_res.dio_link_enc == pool->link_encoders[old_encoder])
+ pipe_ctx->link_res.dio_link_enc = pool->link_encoders[new_encoder];
+ }
+}
+
+static bool add_dio_link_enc_to_ctx(const struct dc *dc,
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ int enc_index;
+
+ enc_index = find_acquired_dio_link_enc_for_link(res_ctx, stream->link);
+
+ if (enc_index >= 0) {
+ retain_dio_link_enc(res_ctx, enc_index);
+ } else {
+ if (stream->link->is_dig_mapping_flexible)
+ enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream);
+ else {
+ int link_index = 0;
+
+ enc_index = find_fixed_dio_link_enc(stream->link);
+ /* Fixed mapping link can only use its fixed link encoder.
+ * If the encoder is acquired by other link then get a new free encoder and swap the new
+ * one into the acquiring link.
+ */
+ if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) {
+ int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream);
+
+ if (new_enc_index >= 0)
+ swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index);
+ else
+ return false;
+ }
+ }
+
+ if (enc_index >= 0)
+ acquire_dio_link_enc(res_ctx, stream->link->link_index, enc_index);
+ }
+
+ if (enc_index >= 0)
+ pipe_ctx->link_res.dio_link_enc = pool->link_encoders[enc_index];
+
+ return pipe_ctx->link_res.dio_link_enc != NULL;
+}
+
+static void remove_dio_link_enc_from_ctx(struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ int enc_index = -1;
+
+ if (stream->link)
+ enc_index = find_acquired_dio_link_enc_for_link(res_ctx, stream->link);
+
+ if (enc_index >= 0) {
+ release_dio_link_enc(res_ctx, enc_index);
+ pipe_ctx->link_res.dio_link_enc = NULL;
+ }
+}
+
static int get_num_of_free_pipes(const struct resource_pool *pool, const struct dc_state *context)
{
int i;
@@ -2663,6 +2967,10 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
remove_hpo_dp_link_enc_from_ctx(
&context->res_ctx, otg_master, stream);
}
+
+ if (stream->ctx->dc->config.unify_link_enc_assignment)
+ remove_dio_link_enc_from_ctx(&context->res_ctx, otg_master, stream);
+
if (otg_master->stream_res.audio)
update_audio_usage(
&context->res_ctx,
@@ -2677,6 +2985,7 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
if (pool->funcs->remove_stream_from_ctx)
pool->funcs->remove_stream_from_ctx(
stream->ctx->dc, context, stream);
+
memset(otg_master, 0, sizeof(*otg_master));
}
@@ -3388,10 +3697,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
break;
case COLOR_DEPTH_121212:
normalized_pix_clk = (pix_clk * 36) / 24;
- break;
+ break;
+ case COLOR_DEPTH_141414:
+ normalized_pix_clk = (pix_clk * 42) / 24;
+ break;
case COLOR_DEPTH_161616:
normalized_pix_clk = (pix_clk * 48) / 24;
- break;
+ break;
default:
ASSERT(0);
break;
@@ -3525,16 +3837,22 @@ static int acquire_resource_from_hw_enabled_state(
return -1;
}
-static void mark_seamless_boot_stream(
- const struct dc *dc,
- struct dc_stream_state *stream)
+static void mark_seamless_boot_stream(const struct dc *dc,
+ struct dc_stream_state *stream)
{
struct dc_bios *dcb = dc->ctx->dc_bios;
- if (dc->config.allow_seamless_boot_optimization &&
- !dcb->funcs->is_accelerated_mode(dcb)) {
- if (dc_validate_boot_timing(dc, stream->sink, &stream->timing))
- stream->apply_seamless_boot_optimization = true;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (stream->apply_seamless_boot_optimization)
+ return;
+ if (!dc->config.allow_seamless_boot_optimization)
+ return;
+ if (dcb->funcs->is_accelerated_mode(dcb))
+ return;
+ if (dc_validate_boot_timing(dc, stream->sink, &stream->timing)) {
+ stream->apply_seamless_boot_optimization = true;
+ DC_LOG_DC("Marked stream for seamless boot optimization\n");
}
}
@@ -3645,6 +3963,7 @@ enum dc_status resource_map_pool_resources(
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
bool acquired = false;
+ bool is_dio_encoder = true;
calculate_phy_pix_clks(stream);
@@ -3692,6 +4011,10 @@ enum dc_status resource_map_pool_resources(
if (!dc->link_srv->dp_decide_link_settings(stream,
&pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
+
+ dc->link_srv->dp_decide_tunnel_settings(stream,
+ &pipe_ctx->link_config.dp_tunnel_settings);
+
if (dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
@@ -3710,10 +4033,16 @@ enum dc_status resource_map_pool_resources(
}
}
+ if (dc->config.unify_link_enc_assignment && is_dio_encoder)
+ if (!add_dio_link_enc_to_ctx(dc, context, pool, pipe_ctx, stream))
+ return DC_NO_LINK_ENC_RESOURCE;
+
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
- stream->audio_info.mode_count && stream->audio_info.flags.all) {
+ stream->audio_info.mode_count &&
+ (stream->audio_info.flags.all ||
+ (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) {
pipe_ctx->stream_res.audio = find_first_free_audio(
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
@@ -3826,7 +4155,7 @@ static bool add_all_planes_for_stream(
* @set: An array of dc_validation_set with all the current streams reference
* @set_count: Total of streams
* @context: New context
- * @fast_validate: Enable or disable fast validation
+ * @validate_mode: identify the validation mode
*
* This function updates the potential new stream in the context object. It
* creates multiple lists for the add, remove, and unchanged streams. In
@@ -3841,7 +4170,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
@@ -4010,7 +4339,12 @@ enum dc_status dc_validate_with_context(struct dc *dc,
}
}
- res = dc_validate_global_state(dc, context, fast_validate);
+ /* clear subvp cursor limitations */
+ for (i = 0; i < context->stream_count; i++) {
+ dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false);
+ }
+
+ res = dc_validate_global_state(dc, context, validate_mode);
/* calculate pixel rate divider after deciding pxiel clock & odm combine */
if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) {
@@ -4027,39 +4361,33 @@ fail:
return res;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
+#endif /* CONFIG_DRM_AMD_DC_FP */
+
/**
- * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
+ * calculate_timing_params_for_dsc_with_padding - Calculates timing parameters for DSC with padding.
* @pipe_ctx: Pointer to the pipe context structure.
*
- * This function calculates the horizontal blanking borrow value for a given pipe context based on the
+ * This function calculates the timing parameters for a given pipe context based on the
* display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
- * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
- * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
+ * than the total width of the DSC slices, it sets the dsc_hactive_padding value to the difference. If the
+ * total horizontal timing minus the dsc_hactive_padding value is less than 32, it resets the dsc_hactive_padding
* value to 0.
*/
-static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
+static void calculate_timing_params_for_dsc_with_padding(struct pipe_ctx *pipe_ctx)
{
- uint32_t hactive;
- uint32_t ceil_slice_width;
struct dc_stream_state *stream = NULL;
if (!pipe_ctx)
return;
stream = pipe_ctx->stream;
+ pipe_ctx->dsc_padding_params.dsc_hactive_padding = 0;
+ pipe_ctx->dsc_padding_params.dsc_htotal_padding = 0;
- if (stream->timing.flags.DSC) {
- hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
-
- /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
- if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
- ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
- pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
+ if (stream)
+ pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz = stream->timing.pix_clk_100hz;
- if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
- pipe_ctx->hblank_borrow = 0;
- }
- }
}
/**
@@ -4067,7 +4395,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
*
* @dc: dc struct for this driver
* @new_ctx: state to be validated
- * @fast_validate: set to true if only yes/no to support matters
+ * @validate_mode: identify the validation mode
*
* Checks hardware resource availability and bandwidth requirement.
*
@@ -4077,7 +4405,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
@@ -4102,7 +4430,7 @@ enum dc_status dc_validate_global_state(
/* Decide whether hblank borrow is needed and save it in pipe_ctx */
if (dc->debug.enable_hblank_borrow)
- decide_hblank_borrow(pipe_ctx);
+ calculate_timing_params_for_dsc_with_padding(pipe_ctx);
if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
@@ -4136,8 +4464,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
- result = DC_FAIL_BANDWIDTH_VALIDATE;
+ result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode);
return result;
}
@@ -4178,8 +4505,14 @@ static void set_avi_info_frame(
unsigned int fr_ind = pipe_ctx->stream->timing.fr_index;
enum dc_timing_3d_format format;
+ if (stream->avi_infopacket.valid) {
+ *info_packet = stream->avi_infopacket;
+ return;
+ }
+
memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
+
color_space = pipe_ctx->stream->output_color_space;
if (color_space == COLOR_SPACE_UNKNOWN)
color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
@@ -4243,7 +4576,7 @@ static void set_avi_info_frame(
break;
case COLOR_SPACE_2020_RGB_FULLRANGE:
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
break;
@@ -4257,7 +4590,7 @@ static void set_avi_info_frame(
break;
}
- if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
+ if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR_LIMITED &&
stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
hdmi_info.bits.EC0_EC2 = 0;
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
@@ -4478,7 +4811,7 @@ static void set_hfvs_info_packet(
static void adaptive_sync_override_dp_info_packets_sdp_line_num(
const struct dc_crtc_timing *timing,
struct enc_sdp_line_num *sdp_line_num,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+ unsigned int vstartup_start)
{
uint32_t asic_blank_start = 0;
uint32_t asic_blank_end = 0;
@@ -4493,8 +4826,8 @@ static void adaptive_sync_override_dp_info_packets_sdp_line_num(
asic_blank_end = (asic_blank_start - tg->v_border_bottom -
tg->v_addressable - tg->v_border_top);
- if (pipe_dlg_param->vstartup_start > asic_blank_end) {
- v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end));
+ if (vstartup_start > asic_blank_end) {
+ v_update = (tg->v_total - (vstartup_start - asic_blank_end));
sdp_line_num->adaptive_sync_line_num_valid = true;
sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1);
} else {
@@ -4507,7 +4840,7 @@ static void set_adaptive_sync_info_packet(
struct dc_info_packet *info_packet,
const struct dc_stream_state *stream,
struct encoder_info_frame *info_frame,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+ unsigned int vstartup_start)
{
if (!stream->adaptive_sync_infopacket.valid)
return;
@@ -4515,7 +4848,7 @@ static void set_adaptive_sync_info_packet(
adaptive_sync_override_dp_info_packets_sdp_line_num(
&stream->timing,
&info_frame->sdp_line_num,
- pipe_dlg_param);
+ vstartup_start);
*info_packet = stream->adaptive_sync_infopacket;
}
@@ -4548,6 +4881,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
{
enum signal_type signal = SIGNAL_TYPE_NONE;
struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+ unsigned int vstartup_start = 0;
/* default all packets to invalid */
info->avi.valid = false;
@@ -4561,6 +4895,9 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->adaptive_sync.valid = false;
signal = pipe_ctx->stream->signal;
+ if (pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe)
+ vstartup_start = pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe(pipe_ctx);
+
/* HDMi and DP have different info packets*/
if (dc_is_hdmi_signal(signal)) {
set_avi_info_frame(&info->avi, pipe_ctx);
@@ -4582,7 +4919,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_adaptive_sync_info_packet(&info->adaptive_sync,
pipe_ctx->stream,
info,
- &pipe_ctx->pipe_dlg_param);
+ vstartup_start);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -4676,7 +5013,10 @@ bool pipe_need_reprogram(
return true;
/* DIG link encoder resource assignment for stream changed. */
- if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
+ if (pipe_ctx_old->stream->ctx->dc->config.unify_link_enc_assignment) {
+ if (pipe_ctx_old->link_res.dio_link_enc != pipe_ctx->link_res.dio_link_enc)
+ return true;
+ } else if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
bool need_reprogram = false;
struct dc *dc = pipe_ctx_old->stream->ctx->dc;
struct link_encoder *link_enc_prev =
@@ -4942,6 +5282,28 @@ void get_audio_check(struct audio_info *aud_modes,
}
}
+struct link_encoder *get_temp_dio_link_enc(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *const pool,
+ const struct dc_link *link)
+{
+ struct link_encoder *link_enc = NULL;
+ int enc_index;
+
+ if (link->is_dig_mapping_flexible)
+ enc_index = find_acquired_dio_link_enc_for_link(res_ctx, link);
+ else
+ enc_index = link->eng_id;
+
+ if (enc_index < 0)
+ enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL);
+
+ if (enc_index >= 0)
+ link_enc = pool->link_encoders[enc_index];
+
+ return link_enc;
+}
+
static struct hpo_dp_link_encoder *get_temp_hpo_dp_link_enc(
const struct resource_context *res_ctx,
const struct resource_pool *const pool,
@@ -4971,11 +5333,17 @@ bool get_temp_dp_link_res(struct dc_link *link,
memset(link_res, 0, sizeof(*link_res));
if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
- link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,
- dc->res_pool, link);
+ link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx, dc->res_pool, link);
if (!link_res->hpo_dp_link_enc)
return false;
+ } else if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ dc->config.unify_link_enc_assignment) {
+ link_res->dio_link_enc = get_temp_dio_link_enc(res_ctx,
+ dc->res_pool, link);
+ if (!link_res->dio_link_enc)
+ return false;
}
+
return true;
}
@@ -5247,26 +5615,24 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream);
}
+ if (pipe_ctx->link_res.dio_link_enc == NULL && dc->config.unify_link_enc_assignment)
+ if (!add_dio_link_enc_to_ctx(dc, context, dc->res_pool, pipe_ctx, pipe_ctx->stream))
+ return DC_NO_LINK_ENC_RESOURCE;
+
return DC_OK;
}
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
+struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
{
- if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
- return true;
- if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
- else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
-
- return false;
+ return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
}
-struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
+static bool resource_allocate_mcache(struct dc_state *context, const struct dc_mcache_params *mcache_params)
{
- return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+ if (context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config)
+ context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config(context, mcache_params);
+
+ return true;
}
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
@@ -5288,6 +5654,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase;
+ dml2_options->callbacks.allocate_mcache = &resource_allocate_mcache;
dml2_options->svp_pstate.callbacks.dc = dc;
dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index fe9f99f1bdf9..f976ffd6d466 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,7 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
- get_link_index_from_dpia_port_index(dc, notify->link_index);
+ get_link_index_from_dpia_port_index(dc, notify->instance);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index e006f816ff2f..2de8ef4a58ec 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include "dc_types.h"
#include "core_types.h"
#include "core_status.h"
#include "dc_state.h"
@@ -34,8 +35,8 @@
#include "link_enc_cfg.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_wrapper.h"
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_wrapper.h"
+#include "dml2_0/dml2_internal_types.h"
#endif
#define DC_LOGGER \
@@ -193,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state)
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
struct dc_state *state;
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-#endif
state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL);
@@ -210,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
#ifdef CONFIG_DRM_AMD_DC_FP
if (dc->debug.using_dml2) {
- dml2_opt->use_clock_dc_limits = false;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) {
+ if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) {
dc_state_release(state);
return NULL;
}
- dml2_opt->use_clock_dc_limits = true;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) {
+ if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
dc_state_release(state);
return NULL;
}
@@ -433,6 +427,8 @@ enum dc_status dc_state_remove_stream(
return DC_ERROR_UNEXPECTED;
}
+ dc_stream_release_3dlut_for_stream(dc, stream);
+
dc_stream_release(state->streams[i]);
state->stream_count--;
@@ -483,9 +479,9 @@ bool dc_state_add_plane(
if (stream_status == NULL) {
dm_error("Existing stream not found; failed to attach surface!\n");
goto out;
- } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ } else if (stream_status->plane_count == MAX_SURFACES) {
dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
+ plane_state, MAX_SURFACES);
goto out;
} else if (!otg_master_pipe) {
goto out;
@@ -600,7 +596,7 @@ bool dc_state_rem_all_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == stream) {
@@ -812,8 +808,12 @@ enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
if (phantom_stream_status) {
phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ phantom_stream_status->mall_stream_config.subvp_limit_cursor_size = false;
+ phantom_stream_status->mall_stream_config.cursor_size_limit_subvp = false;
}
+ dc_state_set_stream_subvp_cursor_limit(main_stream, state, true);
+
return res;
}
@@ -875,7 +875,7 @@ bool dc_state_rem_all_phantom_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == phantom_stream) {
@@ -939,13 +939,20 @@ void dc_state_release_phantom_streams_and_planes(
const struct dc *dc,
struct dc_state *state)
{
+ unsigned int phantom_count;
+ struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+ struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
int i;
- for (i = 0; i < state->phantom_stream_count; i++)
- dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+ phantom_count = state->phantom_stream_count;
+ memcpy(phantom_streams, state->phantom_streams, sizeof(struct dc_stream_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_stream(dc, state, phantom_streams[i]);
- for (i = 0; i < state->phantom_plane_count; i++)
- dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+ phantom_count = state->phantom_plane_count;
+ memcpy(phantom_planes, state->phantom_planes, sizeof(struct dc_plane_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_plane(dc, state, phantom_planes[i]);
}
struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id)
@@ -977,3 +984,94 @@ bool dc_state_is_fams2_in_use(
return is_fams2_in_use;
}
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.subvp_limit_cursor_size = limit;
+ }
+}
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.subvp_limit_cursor_size;
+ }
+
+ return limit;
+}
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.cursor_size_limit_subvp = limit;
+ }
+}
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.cursor_size_limit_subvp;
+ }
+
+ return limit;
+}
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool can_clear_limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, state) &&
+ (stream_status->mall_stream_config.type == SUBVP_PHANTOM ||
+ stream->hw_cursor_req ||
+ !stream_status->mall_stream_config.subvp_limit_cursor_size ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, state, &stream->cursor_attributes));
+ }
+
+ return can_clear_limit;
+}
+
+bool dc_state_is_subvp_in_use(struct dc_state *state)
+{
+ uint32_t i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (dc_state_get_stream_subvp_type(state, state->streams[i]) != SUBVP_NONE)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 701b7e4f2920..129cd5f84983 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -37,6 +37,8 @@
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+#ifndef MAX
#define MAX(x, y) ((x > y) ? x : y)
#endif
@@ -199,7 +201,8 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
- if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
+ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign &&
+ !new_stream->ctx->dc->config.unify_link_enc_assignment)
new_stream->link_enc = NULL;
kref_init(&new_stream->refcount);
@@ -221,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status(
return dc_state_get_stream_status(dc->current_state, stream);
}
+const struct dc_stream_status *dc_stream_get_status_const(
+ const struct dc_stream_state *stream)
+{
+ struct dc *dc = stream->ctx->dc;
+
+ return dc_state_get_stream_status(dc->current_state, stream);
+}
+
void program_cursor_attributes(
struct dc *dc,
struct dc_stream_state *stream)
@@ -228,6 +239,7 @@ void program_cursor_attributes(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -242,9 +254,14 @@ void program_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+ }
}
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -252,23 +269,32 @@ void program_cursor_attributes(
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
}
if (pipe_to_program) {
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ }
}
}
/*
- * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ * dc_stream_check_cursor_attributes() - Check validitity of cursor attributes and surface address
*/
-bool dc_stream_set_cursor_attributes(
- struct dc_stream_state *stream,
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
const struct dc_cursor_attributes *attributes)
{
- struct dc *dc;
+ const struct dc *dc;
+
+ unsigned int max_cursor_size;
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -286,24 +312,41 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
- * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
- * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
- * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
- * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
+ /* SubVP is not compatible with HW cursor larger than what can fit in cursor SRAM.
+ * Therefore, if cursor is greater than this, fallback to SW cursor.
*/
- if (dc->debug.allow_sw_cursor_fallback &&
- attributes->height * attributes->width * 4 > 16384 &&
- !stream->hw_cursor_req) {
- if (check_subvp_sw_cursor_fallback_req(dc, stream))
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ max_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, state, stream);
+ max_cursor_size = max_cursor_size * max_cursor_size * 4;
+
+ if (attributes->height * attributes->width * 4 > max_cursor_size) {
return false;
+ }
}
- stream->cursor_attributes = *attributes;
-
return true;
}
+/*
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ bool result = false;
+
+ if (!stream)
+ return false;
+
+ if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
+ stream->cursor_attributes = *attributes;
+ result = true;
+ }
+
+ return result;
+}
+
bool dc_stream_program_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes)
@@ -311,7 +354,10 @@ bool dc_stream_program_cursor_attributes(
struct dc *dc;
bool reset_idle_optimizations = false;
- dc = stream ? stream->ctx->dc : NULL;
+ if (!stream)
+ return false;
+
+ dc = stream->ctx->dc;
if (dc_stream_set_cursor_attributes(stream, attributes)) {
dc_z10_restore(dc);
@@ -340,6 +386,7 @@ void program_cursor_position(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -358,16 +405,27 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_position(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
+
if (dc->ctx->dmub_srv)
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
- if (pipe_to_program)
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program) {
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ }
}
bool dc_stream_set_cursor_position(
@@ -549,6 +607,14 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
return true;
}
+/**
+ * dc_stream_remove_writeback() - Disables writeback and removes writeback info.
+ * @dc: Display core control structure.
+ * @stream: Display core stream state.
+ * @dwb_pipe_inst: Display writeback pipe.
+ *
+ * Return: returns true on success, false otherwise.
+ */
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
@@ -671,9 +737,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
{
uint8_t i;
bool ret = false;
- struct dc *dc = stream->ctx->dc;
- struct resource_context *res_ctx =
- &dc->current_state->res_ctx;
+ struct dc *dc;
+ struct resource_context *res_ctx;
+
+ if (!stream->ctx)
+ return false;
+
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
dc_exit_ips_for_hw_access(dc);
@@ -821,14 +892,83 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
DC_LOG_DC(
- "\tdispname: %s signal: %x\n",
+ "\tsignal: %x dispname: %s manufacturer_id: 0x%x product_id: 0x%x\n",
+ stream->signal,
stream->sink->edid_caps.display_name,
- stream->signal);
+ stream->sink->edid_caps.manufacturer_id,
+ stream->sink->edid_caps.product_id);
}
}
}
/*
+* dc_stream_get_3dlut()
+* Requirements:
+* 1. Is stream already owns an RMCM instance, return it.
+* 2. If it doesn't and we don't need to allocate, return NULL.
+* 3. If there's a free RMCM instance, assign to stream and return it.
+* 4. If no free RMCM instances, return NULL.
+*/
+
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ // see if one is allocated for this stream
+ for (int i = 0; i < num_rmcm; i++) {
+ if (dc->res_pool->rmcm_3dlut[i].isInUse &&
+ dc->res_pool->rmcm_3dlut[i].stream == stream)
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+
+ //case: not found one, and dont need to allocate
+ if (!allocate_one)
+ return NULL;
+
+ //see if there is an unused 3dlut, allocate
+ for (int i = 0; i < num_rmcm; i++) {
+ if (!dc->res_pool->rmcm_3dlut[i].isInUse) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = true;
+ dc->res_pool->rmcm_3dlut[i].stream = stream;
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+ }
+
+ //dont have a 3dlut
+ return NULL;
+}
+
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream)
+{
+ struct dc_rmcm_3dlut *rmcm_3dlut =
+ dc_stream_get_3dlut_for_stream(dc, stream, false);
+
+ if (rmcm_3dlut) {
+ rmcm_3dlut->isInUse = false;
+ rmcm_3dlut->stream = NULL;
+ rmcm_3dlut->protection_bits = 0;
+ }
+}
+
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ for (int i = 0; i < num_rmcm; i++) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = false;
+ dc->res_pool->rmcm_3dlut[i].stream = NULL;
+ dc->res_pool->rmcm_3dlut[i].protection_bits = 0;
+ }
+}
+
+/*
* Finds the greatest index in refresh_rate_hz that contains a value <= refresh
*/
static int dc_stream_get_nearest_smallest_index(struct dc_stream_state *stream, int refresh)
@@ -1106,3 +1246,26 @@ unsigned int dc_stream_get_max_flickerless_instant_vtotal_increase(struct dc_str
return dc_stream_get_max_flickerless_instant_vtotal_delta(stream, is_gaming, false);
}
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool is_limit_pending = false;
+
+ if (dc->current_state)
+ is_limit_pending = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state);
+
+ return is_limit_pending;
+}
+
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool can_clear_limit = false;
+
+ if (dc->current_state)
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state) &&
+ (stream->hw_cursor_req ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, dc->current_state, &stream->cursor_attributes));
+
+ return can_clear_limit;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index f3471d45b312..922f23557f5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -109,7 +109,8 @@ struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
*****************************************************************************
*/
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state)
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags)
{
const struct dc_plane_status *plane_status;
struct dc *dc;
@@ -136,7 +137,7 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- if (pipe_ctx->plane_state)
+ if (pipe_ctx->plane_state && flags.bits.address)
pipe_ctx->plane_state->status.is_flip_pending = false;
break;
@@ -151,7 +152,8 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- dc->hwss.update_pending_status(pipe_ctx);
+ if (flags.bits.address)
+ dc->hwss.update_pending_status(pipe_ctx);
}
return plane_status;
@@ -270,8 +272,8 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)
kref_get(&lut->refcount);
}
-void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
- bool clear_tiling)
+void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
+ bool clear_tiling)
{
struct dc *dc;
int i;
@@ -290,30 +292,21 @@ void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
if (!pipe_ctx)
continue;
- if (dc->ctx->dce_version >= DCE_VERSION_MAX) {
- struct hubp *hubp = pipe_ctx->plane_res.hubp;
- if (!hubp)
- continue;
- /* if framebuffer is tiled, disable tiling */
- if (clear_tiling && hubp->funcs->hubp_clear_tiling)
- hubp->funcs->hubp_clear_tiling(hubp);
-
- /* force page flip to see the new content of the framebuffer */
- hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
- &plane_state->address,
- true);
- } else {
- struct mem_input *mi = pipe_ctx->plane_res.mi;
- if (!mi)
- continue;
- /* if framebuffer is tiled, disable tiling */
- if (clear_tiling && mi->funcs->mem_input_clear_tiling)
- mi->funcs->mem_input_clear_tiling(mi);
-
- /* force page flip to see the new content of the framebuffer */
- mi->funcs->mem_input_program_surface_flip_and_addr(mi,
- &plane_state->address,
- true);
- }
+ if (dc->hwss.clear_surface_dcc_and_tiling)
+ dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling);
}
}
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src)
+{
+ struct kref temp_refcount;
+
+ /* backup persistent info */
+ memcpy(&temp_refcount, &dst->refcount, sizeof(struct kref));
+
+ /* copy all configuration information */
+ memcpy(dst, src, sizeof(struct dc_plane_state));
+
+ /* restore persistent info */
+ memcpy(&dst->refcount, &temp_refcount, sizeof(struct kref));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8c6347413038..29edfa51ea2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,11 +42,11 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dmub/inc/dmub_cmd.h"
-#include "spl/dc_spl_types.h"
+#include "sspl/dc_spl_types.h"
struct abm_save_restore;
@@ -54,15 +54,33 @@ struct abm_save_restore;
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
+struct dcn_hubbub_reg_state;
+struct dcn_hubp_reg_state;
+struct dcn_dpp_reg_state;
+struct dcn_mpc_reg_state;
+struct dcn_opp_reg_state;
+struct dcn_dsc_reg_state;
+struct dcn_optc_reg_state;
+struct dcn_dccg_reg_state;
-#define DC_VER "3.2.314"
+#define DC_VER "3.2.359"
-#define MAX_SURFACES 3
+/**
+ * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
+ */
+#define MAX_SURFACES 4
+/**
+ * MAX_PLANES - representative of the upper bound of planes that are supported by the HW
+ */
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
-#define MAX_HOST_ROUTERS_NUM 2
+#define MAX_SUPPORTED_FORMATS 7
+
+#define MAX_HOST_ROUTERS_NUM 3
+#define MAX_DPIA_PER_HOST_ROUTER 3
+#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER)
/* Display Core Interfaces */
struct dc_versions {
@@ -188,6 +206,34 @@ struct dpp_color_caps {
struct rom_curve_caps ogam_rom_caps;
};
+/* Below structure is to describe the HW support for mem layout, extend support
+ range to match what OS could handle in the roadmap */
+struct lut3d_caps {
+ uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */
+ struct {
+ uint32_t swizzle_3d_rgb : 1;
+ uint32_t swizzle_3d_bgr : 1;
+ uint32_t linear_1d : 1;
+ } mem_layout_support;
+ struct {
+ uint32_t unorm_12msb : 1;
+ uint32_t unorm_12lsb : 1;
+ uint32_t float_fp1_5_10 : 1;
+ } mem_format_support;
+ struct {
+ uint32_t order_rgba : 1;
+ uint32_t order_bgra : 1;
+ } mem_pixel_order_support;
+ /*< size options are 9, 17, 33, 45, 65 */
+ struct {
+ uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */
+ uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */
+ uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */
+ uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */
+ uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */
+ } lut_dim_caps;
+};
+
/**
* struct mpc_color_caps - color pipeline capabilities for multiple pipe and
* plane combined blocks
@@ -196,17 +242,25 @@ struct dpp_color_caps {
* @ogam_ram: programmable out gamma LUT
* @ocsc: output color space conversion matrix
* @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @num_rmcm_3dluts: number of RMCM 3D LUTS; always assumes a preceding shaper LUT
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ * @mcm_3d_lut_caps: HW support cap for MCM LUT memory
+ * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory
+ * @preblend: whether color manager supports preblend with MPC
*/
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t num_3dluts : 3;
+ uint16_t num_rmcm_3dluts : 3;
uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
+ struct lut3d_caps mcm_3d_lut_caps;
+ struct lut3d_caps rmcm_3d_lut_caps;
+ bool preblend;
};
/**
@@ -232,6 +286,15 @@ struct dc_scl_caps {
bool sharpener_support;
};
+struct dc_check_config {
+ /**
+ * max video plane width that can be safely assumed to be always
+ * supported by single DPP pipe.
+ */
+ unsigned int max_optimizable_video_width;
+ bool enable_legacy_fast_update;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -245,12 +308,8 @@ struct dc_caps {
uint32_t i2c_speed_in_khz_hdcp;
uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
+ unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
- /*
- * max video plane width that can be safely assumed to be always
- * supported by single DPP pipe.
- */
- unsigned int max_optimizable_video_width;
unsigned int min_horizontal_blanking_period;
int linear_pitch_alignment;
bool dcc_const_color;
@@ -265,6 +324,7 @@ struct dc_caps {
bool dmcub_support;
bool zstate_support;
bool ips_support;
+ bool ips_v2_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
@@ -278,6 +338,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ bool fused_io_supported;
uint32_t max_otg_num;
uint32_t max_cab_allocation_bytes;
uint32_t cache_line_size;
@@ -299,6 +360,10 @@ struct dc_caps {
/* Conservative limit for DCC cases which require ODM4:1 to support*/
uint32_t dcc_plane_width_limit;
struct dc_scl_caps scl_caps;
+ uint8_t num_of_host_routers;
+ uint8_t num_of_dpias_per_host_router;
+ /* limit of the ODM only, could be limited by other factors (like pipe count)*/
+ uint8_t max_odm_combine_factor;
};
struct dc_bug_wa {
@@ -402,6 +467,18 @@ enum surface_update_type {
UPDATE_TYPE_FULL, /* may need to shuffle resources */
};
+enum dc_lock_descriptor {
+ LOCK_DESCRIPTOR_NONE = 0x0,
+ LOCK_DESCRIPTOR_STREAM = 0x1,
+ LOCK_DESCRIPTOR_LINK = 0x2,
+ LOCK_DESCRIPTOR_GLOBAL = 0x4,
+};
+
+struct surface_update_descriptor {
+ enum surface_update_type update_type;
+ enum dc_lock_descriptor lock_descriptor;
+};
+
/* Forward declaration*/
struct dc;
struct dc_plane_state;
@@ -443,6 +520,7 @@ struct dc_config {
bool enable_windowed_mpo_odm;
bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520
uint32_t allow_edp_hotplug_detection;
+ bool skip_riommu_prefetch_wa;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
@@ -452,6 +530,7 @@ struct dc_config {
bool use_spl;
bool prefer_easf;
bool use_pipe_ctx_sync_logic;
+ int smart_mux_version;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
@@ -462,6 +541,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_rcg;
unsigned int disable_ips_in_vpb;
bool disable_ips_in_dpms_off;
bool usb4_bw_alloc_support;
@@ -472,6 +552,11 @@ struct dc_config {
bool disable_hbr_audio_dp2;
bool consolidated_dpia_dp_lt;
bool set_pipe_unlock_order;
+ bool enable_dpia_pre_training;
+ bool unify_link_enc_assignment;
+ bool enable_cursor_offload;
+ struct spl_sharpness_range dcn_sharpness_range;
+ struct spl_sharpness_range dcn_override_sharpness_range;
};
enum visual_confirm {
@@ -483,11 +568,15 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_SMARTMUX_DGPU = 10,
VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
VISUAL_CONFIRM_FAMS2 = 19,
VISUAL_CONFIRM_HW_CURSOR = 20,
+ VISUAL_CONFIRM_VABC = 21,
+ VISUAL_CONFIRM_DCC = 22,
+ VISUAL_CONFIRM_EXPLICIT = 0x80000000,
};
enum dc_psr_power_opts {
@@ -631,6 +720,15 @@ struct dc_clocks {
int idle_fclk_khz;
int subvp_prefetch_dramclk_khz;
int subvp_prefetch_fclk_khz;
+
+ /* Stutter efficiency is technically not clock values
+ * but stored here so the values are part of the update_clocks call similar to num_ways
+ * Efficiencies are stored as percentage (0-100)
+ */
+ struct {
+ uint8_t base_efficiency; //LP1
+ uint8_t low_power_efficiency; //LP2
+ } stutter_efficiency;
};
struct dc_bw_validation_profile {
@@ -758,6 +856,7 @@ enum pg_hw_resources {
PG_DCHVM,
PG_DWB,
PG_HPO,
+ PG_DCOH,
PG_HW_RESOURCES_NUM_ELEMENT
};
@@ -774,7 +873,7 @@ union dpia_debug_options {
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
- uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
+ uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
uint32_t reserved:25;
} bits;
uint32_t raw;
@@ -800,6 +899,7 @@ struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
uint32_t auxErrorCount;
+ struct pipe_topology_history topology_history;
};
struct dc_phy_addr_space_config {
@@ -892,12 +992,18 @@ struct dc_debug_options {
bool voltage_align_fclk;
bool disable_min_fclk;
+ bool hdcp_lc_force_fw_enable;
+ bool hdcp_lc_enable_sw_fallback;
+
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
bool disable_hubp_power_gate;
bool disable_dsc_power_gate;
bool disable_optc_power_gate;
bool disable_hpo_power_gate;
+ bool disable_io_clk_power_gate;
+ bool disable_mem_power_gate;
+ bool disable_dio_power_gate;
int dsc_min_slice_height_override;
int dsc_bpp_increment_div;
bool disable_pplib_wm_range;
@@ -1002,6 +1108,7 @@ struct dc_debug_options {
unsigned int force_mall_ss_num_ways;
bool alloc_extra_way_for_cursor;
uint32_t subvp_extra_lines;
+ bool disable_force_pstate_allow_on_hw_release;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -1038,7 +1145,6 @@ struct dc_debug_options {
uint32_t fpo_vactive_min_active_margin_us;
uint32_t fpo_vactive_max_blank_us;
bool enable_hpo_pg_support;
- bool enable_legacy_fast_update;
bool disable_dc_mode_overwrite;
bool replay_skip_crtc_disabled;
bool ignore_pg;/*do nothing, let pmfw control it*/
@@ -1060,7 +1166,6 @@ struct dc_debug_options {
uint32_t dml21_disable_pstate_method_mask;
union fw_assisted_mclk_switch_version fams_version;
union dmub_fams2_global_feature_config fams2_config;
- bool enable_legacy_clock_update;
unsigned int force_cositing;
unsigned int disable_spl;
unsigned int force_easf;
@@ -1071,10 +1176,18 @@ struct dc_debug_options {
bool enable_ips_visual_confirm;
unsigned int sharpen_policy;
unsigned int scale_to_sharpness_policy;
- bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
+ uint32_t acpi_transition_bitmasks[MAX_PIPES];
+ bool enable_pg_cntl_debug_logs;
+ unsigned int auxless_alpm_lfps_setup_ns;
+ unsigned int auxless_alpm_lfps_period_ns;
+ unsigned int auxless_alpm_lfps_silence_ns;
+ unsigned int auxless_alpm_lfps_t1t2_us;
+ short auxless_alpm_lfps_t1t2_offset_us;
+ bool disable_stutter_for_wm_program;
+ bool enable_block_sequence_programming;
};
@@ -1134,7 +1247,7 @@ struct dc_init_data {
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
uint32_t *clk_reg_offsets;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
struct dc_callback_init {
@@ -1235,6 +1348,38 @@ union dc_3dlut_state {
};
+#define MATRIX_9C__DIM_128_ALIGNED_LEN 16 // 9+8 : 9 * 8 + 7 * 8 = 72 + 56 = 128 % 128 = 0
+#define MATRIX_17C__DIM_128_ALIGNED_LEN 32 //17+15: 17 * 8 + 15 * 8 = 136 + 120 = 256 % 128 = 0
+#define MATRIX_33C__DIM_128_ALIGNED_LEN 64 //17+47: 17 * 8 + 47 * 8 = 136 + 376 = 512 % 128 = 0
+
+struct lut_rgb {
+ uint16_t b;
+ uint16_t g;
+ uint16_t r;
+ uint16_t padding;
+};
+
+//this structure maps directly to how the lut will read it from memory
+struct lut_mem_mapping {
+ union {
+ //NATIVE MODE 1, 2
+ //RGB layout [b][g][r] //red is 128 byte aligned
+ //BGR layout [r][g][b] //blue is 128 byte aligned
+ struct lut_rgb rgb_17c[17][17][MATRIX_17C__DIM_128_ALIGNED_LEN];
+ struct lut_rgb rgb_33c[33][33][MATRIX_33C__DIM_128_ALIGNED_LEN];
+
+ //TRANSFORMED
+ uint16_t linear_rgb[(33*33*33*4/128+1)*128];
+ };
+ uint16_t size;
+};
+
+struct dc_rmcm_3dlut {
+ bool isInUse;
+ const struct dc_stream_state *stream;
+ uint8_t protection_bits;
+};
+
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
@@ -1271,7 +1416,6 @@ union surface_update_flags {
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
uint32_t coeff_reduction_change:1;
- uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
uint32_t gamut_remap_change:1;
@@ -1305,7 +1449,7 @@ struct dc_plane_state {
struct rect clip_rect;
struct plane_size plane_size;
- union dc_tiling_info tiling_info;
+ struct dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
@@ -1372,11 +1516,13 @@ struct dc_plane_state {
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
unsigned int sdr_white_level_nits;
+ struct spl_sharpness_range sharpness_range;
+ enum sharpness_range_source sharpness_source;
};
struct dc_plane_info {
struct plane_size plane_size;
- union dc_tiling_info tiling_info;
+ struct dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
@@ -1403,15 +1549,184 @@ struct dc_scratch_space {
* store current value in plane states so we can still recover
* a valid current state during dc update.
*/
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state plane_states[MAX_SURFACES];
struct dc_stream_state stream_state;
};
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+ struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ enum dc_irq_source irq_source_read_request;/* Read Request */
+
+ bool is_hpd_filter_disabled;
+ bool dp_ss_off;
+
+ /**
+ * @link_state_valid:
+ *
+ * If there is no link and local sink, this variable should be set to
+ * false. Otherwise, it should be set to true; usually, the function
+ * core_link_enable_stream sets this field to true.
+ */
+ bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool skip_stream_reenable;
+ bool is_internal_display;
+ /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
+ bool is_dig_mapping_flexible;
+ bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
+
+ /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
+ * for every link training. This is incompatible with DP LL compliance automation,
+ * which expects the same link settings to be used every retry on a link loss.
+ * This flag is used to skip the fallback when link loss occurs during automation.
+ */
+ bool skip_fallback_on_link_loss;
+
+ bool edp_sink_present;
+
+ struct dp_trace dp_trace;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
+ struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
+ enum engine_id dpia_preferred_eng_id;
+
+ bool test_pattern_enabled;
+ /* Pending/Current test pattern are only used to perform and track
+ * FIXED_VS retimer test pattern/lane adjustment override state.
+ * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
+ * to perform specific lane adjust overrides before setting certain
+ * PHY test patterns. In cases when lane adjust and set test pattern
+ * calls are not performed atomically (i.e. performing link training),
+ * pending_test_pattern will be invalid or contain a non-PHY test pattern
+ * and current_test_pattern will contain required context for any future
+ * set pattern/set lane adjust to transition between override state(s).
+ * */
+ enum dp_test_pattern current_test_pattern;
+ enum dp_test_pattern pending_test_pattern;
+
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ enum dp_panel_mode panel_mode;
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct panel_cntl *panel_cntl;
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ uint32_t dongle_max_pix_clk;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ struct hdcp_caps hdcp_caps;
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+ struct psr_settings psr_settings;
+ struct replay_settings replay_settings;
+
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
+ bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
+ /* Forced DPIA into TBT3 compatibility mode. */
+ bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
+ bool blank_stream_on_ocs_change;
+ bool read_dpcd204h_on_irq_hpd;
+ bool force_dp_ffe_preset;
+ bool skip_phy_ssc_reduction;
+ } wa_flags;
+ union dc_dp_ffe_preset forced_dp_ffe_preset;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+ struct dprx_states dprx_states;
+
+ struct gpio *hpd_gpio;
+ enum dc_link_fec_state fec_state;
+ bool is_dds;
+ bool is_display_mux_present;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
+ uint32_t phy_transition_bitmask;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
+ bool skip_implict_edp_power_control;
+ enum backlight_control_type backlight_control_type;
+};
+
struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
struct dc_caps caps;
+ struct dc_check_config check_config;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_bounding_box_overrides bb_overrides;
@@ -1421,6 +1736,7 @@ struct dc {
uint8_t link_count;
struct dc_link *links[MAX_LINKS];
+ uint8_t lowest_dpia_link_index;
struct link_service *link_srv;
struct dc_state *current_state;
@@ -1444,12 +1760,15 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
- bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ /* For eDP to know the switching state of SmartMux */
+ bool is_switch_in_progress_orig;
+ bool is_switch_in_progress_dest;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -1475,13 +1794,14 @@ struct dc {
struct dc_scratch_space current_state;
struct dc_scratch_space new_state;
struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ struct dc_link temp_link;
bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */
} scratch;
struct dml2_configuration_options dml2_options;
- struct dml2_configuration_options dml2_tmp;
+ struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
-
+ struct soc_and_ip_translator *soc_and_ip_translator;
};
struct dc_scaling_info {
@@ -1534,6 +1854,29 @@ struct dc_surface_update {
struct dc_bias_and_scale bias_and_scale;
};
+struct dc_underflow_debug_data {
+ struct dcn_hubbub_reg_state *hubbub_reg_state;
+ struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES];
+ struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES];
+ struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES];
+ struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES];
+ struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES];
+ struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES];
+ struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES];
+};
+
+struct power_features {
+ bool ips;
+ bool rcg;
+ bool replay;
+ bool dds;
+ bool sprs;
+ bool psr;
+ bool fams;
+ bool mpo;
+ bool uclk_p_state;
+};
+
/*
* Create a new surface with default parameters;
*/
@@ -1552,8 +1895,6 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut);
void dc_post_update_surfaces_to_stream(
struct dc *dc);
-#include "dc_stream.h"
-
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
@@ -1580,25 +1921,19 @@ bool dc_validate_boot_timing(const struct dc *dc,
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
-void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
-
enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_set_generic_gpio_for_stereo(bool enable,
struct gpio_service *gpio_service);
-/*
- * fast_validate: we return after determining if we can support the new state,
- * but before we populate the programming info
- */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
@@ -1643,167 +1978,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
-
- bool is_hpd_filter_disabled;
- bool dp_ss_off;
-
- /**
- * @link_state_valid:
- *
- * If there is no link and local sink, this variable should be set to
- * false. Otherwise, it should be set to true; usually, the function
- * core_link_enable_stream sets this field to true.
- */
- bool link_state_valid;
- bool aux_access_disabled;
- bool sync_lt_in_progress;
- bool skip_stream_reenable;
- bool is_internal_display;
- /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
- bool is_dig_mapping_flexible;
- bool hpd_status; /* HPD status of link without physical HPD pin. */
- bool is_hpd_pending; /* Indicates a new received hpd */
-
- /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
- * for every link training. This is incompatible with DP LL compliance automation,
- * which expects the same link settings to be used every retry on a link loss.
- * This flag is used to skip the fallback when link loss occurs during automation.
- */
- bool skip_fallback_on_link_loss;
-
- bool edp_sink_present;
-
- struct dp_trace dp_trace;
-
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
- struct dc_link_settings preferred_link_setting;
- /* preferred_training_settings are override values that
- * come from DM. DM is responsible for the memory
- * management of the override pointers.
- */
- struct dc_link_training_overrides preferred_training_settings;
- struct dp_audio_test_data audio_test_data;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
- /* DIG link encoder ID. Used as index in link encoder resource pool.
- * For links with fixed mapping to DIG, this is not changed after dc_link
- * object creation.
- */
- enum engine_id eng_id;
- enum engine_id dpia_preferred_eng_id;
-
- bool test_pattern_enabled;
- /* Pending/Current test pattern are only used to perform and track
- * FIXED_VS retimer test pattern/lane adjustment override state.
- * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
- * to perform specific lane adjust overrides before setting certain
- * PHY test patterns. In cases when lane adjust and set test pattern
- * calls are not performed atomically (i.e. performing link training),
- * pending_test_pattern will be invalid or contain a non-PHY test pattern
- * and current_test_pattern will contain required context for any future
- * set pattern/set lane adjust to transition between override state(s).
- * */
- enum dp_test_pattern current_test_pattern;
- enum dp_test_pattern pending_test_pattern;
-
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- enum dp_panel_mode panel_mode;
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct panel_cntl *panel_cntl;
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- /* Endpoint type distinguishes display endpoints which do not have entries
- * in the BIOS connector table from those that do. Helps when tracking link
- * encoder to display endpoint assignments.
- */
- enum display_endpoint_type ep_type;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- uint32_t dongle_max_pix_clk;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
- struct hdcp_caps hdcp_caps;
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
- struct psr_settings psr_settings;
- struct replay_settings replay_settings;
-
- /* Drive settings read from integrated info table */
- struct dc_lane_settings bios_forced_drive_settings;
-
- /* Vendor specific LTTPR workaround variables */
- uint8_t vendor_specific_lttpr_link_rate_wa;
- bool apply_vendor_specific_lttpr_link_rate_wa;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- bool dp_skip_DID2;
- bool dp_skip_reset_segment;
- bool dp_skip_fs_144hz;
- bool dp_mot_reset_segment;
- /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
- bool dpia_mst_dsc_always_on;
- /* Forced DPIA into TBT3 compatibility mode. */
- bool dpia_forced_tbt3_mode;
- bool dongle_mode_timing_override;
- bool blank_stream_on_ocs_change;
- bool read_dpcd204h_on_irq_hpd;
- } wa_flags;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
- struct dprx_states dprx_states;
-
- struct gpio *hpd_gpio;
- enum dc_link_fec_state fec_state;
- bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
-
- struct dc_panel_config panel_config;
- struct phy_state phy_state;
- // BW ALLOCATON USB4 ONLY
- struct dc_dpia_bw_alloc dpia_bw_alloc_config;
- bool skip_implict_edp_power_control;
- enum backlight_control_type backlight_control_type;
-};
-
/* Return an enumerated dc_link.
* dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
@@ -1945,6 +2119,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
+struct ddc_service *
+dc_get_oem_i2c_device(struct dc *dc);
+
bool dc_is_oem_i2c_device_present(
struct dc *dc,
size_t slave_address
@@ -2025,6 +2202,24 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
+struct dp_audio_bandwidth_params {
+ const struct dc_crtc_timing *crtc_timing;
+ enum dp_link_encoding link_encoding;
+ uint32_t channel_count;
+ uint32_t sample_rate_hz;
+};
+
+/* The function calculates the minimum size of hblank (in bytes) needed to
+ * support the specified channel count and sample rate combination, given the
+ * link encoding and timing to be used. This calculation is not supported
+ * for 8b/10b SST.
+ *
+ * return - min hblank size in bytes, 0 if 8b/10b SST.
+ */
+uint32_t dc_link_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
/* The function takes a snapshot of current link resource allocation state
* @dc: pointer to dc of the dm calling this
* @map: a dc link resource snapshot defined internally to dc.
@@ -2322,19 +2517,6 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
/*
- * Handle function for when the status of the Request above is complete.
- * We will find out the result of allocating on CM and update structs.
- *
- * @link: pointer to the dc_link struct instance
- * @bw: Allocated or Estimated BW depending on the result
- * @result: Response type
- *
- * return: none
- */
-void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
- uint8_t bw, uint8_t result);
-
-/*
* Handle the USB4 BW Allocation related functionality here:
* Plug => Try to allocate max bw from timing parameters supported by the sink
* Unplug => de-allocate bw
@@ -2342,23 +2524,23 @@ void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
* @link: pointer to the dc_link struct instance
* @peak_bw: Peak bw used by the link/sink
*
- * return: allocated bw else return 0
*/
-int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw);
/*
- * Validate the BW of all the valid DPIA links to make sure it doesn't exceed
- * available BW for each host router
- *
- * @dc: pointer to dc struct
- * @stream: pointer to all possible streams
- * @count: number of valid DPIA streams
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ * return: dc_status
*/
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
- const unsigned int count);
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
+
+/*
+ * Get if ALPM is supported by the link
+ */
+void dc_link_get_alpm_support(struct dc_link *link, bool *auxless_support,
+ bool *auxwake_support);
/* Sink Interfaces - A sink corresponds to a display output device */
@@ -2384,6 +2566,13 @@ struct dc_sink_dsc_caps {
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
+struct dc_sink_hblank_expansion_caps {
+ // 'true' if these are virtual DPCD's HBlank expansion caps (immediately upstream of sink in MST topology),
+ // 'false' if they are sink's HBlank expansion caps
+ bool is_virtual_dpcd_hblank_expansion;
+ struct hblank_expansion_dpcd_caps dpcd_caps;
+};
+
struct dc_sink_fec_caps {
bool is_rx_fec_supported;
bool is_topology_fec_supported;
@@ -2410,6 +2599,7 @@ struct dc_sink {
struct scdc_caps scdc_caps;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
+ struct dc_sink_hblank_expansion_caps hblank_expansion_caps;
bool is_vsc_sdp_colorimetry_supported;
@@ -2529,6 +2719,13 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
uint32_t link_index,
struct aux_payload *payload);
+/*
+ * smart power OLED Interfaces
+ */
+bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits,
+ uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline);
+bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL);
+
/* Get dc link index from dpia port index */
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index);
@@ -2560,13 +2757,527 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
+
+void dc_log_preos_dmcub_info(const struct dc *dc);
+
/* DSC Interfaces */
#include "dc_dsc.h"
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color);
+
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
+bool dc_is_cursor_limit_pending(struct dc *dc);
+bool dc_can_clear_cursor_limit(const struct dc *dc);
+
+/**
+ * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
+ *
+ * @dc: Pointer to the display core context.
+ * @primary_otg_inst: Instance index of the primary OTG that underflowed.
+ * @out_data: Pointer to a dc_underflow_debug_data struct to be filled with debug information.
+ *
+ * This function collects and logs underflow-related HW states when underflow happens,
+ * including OTG underflow status, current read positions, frame count, and per-HUBP debug data.
+ * The results are stored in the provided out_data structure for further analysis or logging.
+ */
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data);
+
+void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, struct power_features *out_data);
+
+/**
+ * Software state variables used to program register fields across the display pipeline
+ */
+struct dc_register_software_state {
+ /* HUBP register programming variables for each pipe */
+ struct {
+ bool valid_plane_state;
+ bool valid_stream;
+ bool min_dc_gfx_version9;
+ uint32_t vtg_sel; /* DCHUBP_CNTL->HUBP_VTG_SEL from pipe_ctx->stream_res.tg->inst */
+ uint32_t hubp_clock_enable; /* HUBP_CLK_CNTL->HUBP_CLOCK_ENABLE from power management */
+ uint32_t surface_pixel_format; /* DCSURF_SURFACE_CONFIG->SURFACE_PIXEL_FORMAT from plane_state->format */
+ uint32_t rotation_angle; /* DCSURF_SURFACE_CONFIG->ROTATION_ANGLE from plane_state->rotation */
+ uint32_t h_mirror_en; /* DCSURF_SURFACE_CONFIG->H_MIRROR_EN from plane_state->horizontal_mirror */
+ uint32_t surface_dcc_en; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_EN from dcc->enable */
+ uint32_t surface_size_width; /* HUBP_SIZE->SURFACE_SIZE_WIDTH from plane_size.surface_size.width */
+ uint32_t surface_size_height; /* HUBP_SIZE->SURFACE_SIZE_HEIGHT from plane_size.surface_size.height */
+ uint32_t pri_viewport_width; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_WIDTH from scaler_data.viewport.width */
+ uint32_t pri_viewport_height; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_HEIGHT from scaler_data.viewport.height */
+ uint32_t pri_viewport_x_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_X_START from scaler_data.viewport.x */
+ uint32_t pri_viewport_y_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_Y_START from scaler_data.viewport.y */
+ uint32_t cursor_enable; /* CURSOR_CONTROL->CURSOR_ENABLE from cursor_attributes.enable */
+ uint32_t cursor_width; /* CURSOR_SETTINGS->CURSOR_WIDTH from cursor_position.width */
+ uint32_t cursor_height; /* CURSOR_SETTINGS->CURSOR_HEIGHT from cursor_position.height */
+
+ /* Additional DCC configuration */
+ uint32_t surface_dcc_ind_64b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_64B_BLK from dcc.independent_64b_blks */
+ uint32_t surface_dcc_ind_128b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_128B_BLK from dcc.independent_128b_blks */
+
+ /* Surface pitch configuration */
+ uint32_t surface_pitch; /* DCSURF_SURFACE_PITCH->PITCH from plane_size.surface_pitch */
+ uint32_t meta_pitch; /* DCSURF_SURFACE_PITCH->META_PITCH from dcc.meta_pitch */
+ uint32_t chroma_pitch; /* DCSURF_SURFACE_PITCH_C->PITCH_C from plane_size.chroma_pitch */
+ uint32_t meta_pitch_c; /* DCSURF_SURFACE_PITCH_C->META_PITCH_C from dcc.meta_pitch_c */
+
+ /* Surface addresses */
+ uint32_t primary_surface_address_low; /* DCSURF_PRIMARY_SURFACE_ADDRESS->PRIMARY_SURFACE_ADDRESS from address.grph.addr.low_part */
+ uint32_t primary_surface_address_high; /* DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH->PRIMARY_SURFACE_ADDRESS_HIGH from address.grph.addr.high_part */
+ uint32_t primary_meta_surface_address_low; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS->PRIMARY_META_SURFACE_ADDRESS from address.grph.meta_addr.low_part */
+ uint32_t primary_meta_surface_address_high; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH->PRIMARY_META_SURFACE_ADDRESS_HIGH from address.grph.meta_addr.high_part */
+
+ /* TMZ configuration */
+ uint32_t primary_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_TMZ from address.tmz_surface */
+ uint32_t primary_meta_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_META_SURFACE_TMZ from address.tmz_surface */
+
+ /* Tiling configuration */
+ uint32_t sw_mode; /* DCSURF_TILING_CONFIG->SW_MODE from tiling_info.gfx9.swizzle */
+ uint32_t num_pipes; /* DCSURF_ADDR_CONFIG->NUM_PIPES from tiling_info.gfx9.num_pipes */
+ uint32_t num_banks; /* DCSURF_ADDR_CONFIG->NUM_BANKS from tiling_info.gfx9.num_banks */
+ uint32_t pipe_interleave; /* DCSURF_ADDR_CONFIG->PIPE_INTERLEAVE from tiling_info.gfx9.pipe_interleave */
+ uint32_t num_shader_engines; /* DCSURF_ADDR_CONFIG->NUM_SE from tiling_info.gfx9.num_shader_engines */
+ uint32_t num_rb_per_se; /* DCSURF_ADDR_CONFIG->NUM_RB_PER_SE from tiling_info.gfx9.num_rb_per_se */
+ uint32_t num_pkrs; /* DCSURF_ADDR_CONFIG->NUM_PKRS from tiling_info.gfx9.num_pkrs */
+
+ /* DML Request Size Configuration - Luma */
+ uint32_t rq_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->CHUNK_SIZE from rq_regs.rq_regs_l.chunk_size */
+ uint32_t rq_min_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_CHUNK_SIZE from rq_regs.rq_regs_l.min_chunk_size */
+ uint32_t rq_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->META_CHUNK_SIZE from rq_regs.rq_regs_l.meta_chunk_size */
+ uint32_t rq_min_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_META_CHUNK_SIZE from rq_regs.rq_regs_l.min_meta_chunk_size */
+ uint32_t rq_dpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->DPTE_GROUP_SIZE from rq_regs.rq_regs_l.dpte_group_size */
+ uint32_t rq_mpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->MPTE_GROUP_SIZE from rq_regs.rq_regs_l.mpte_group_size */
+ uint32_t rq_swath_height_l; /* DCHUBP_REQ_SIZE_CONFIG->SWATH_HEIGHT_L from rq_regs.rq_regs_l.swath_height */
+ uint32_t rq_pte_row_height_l; /* DCHUBP_REQ_SIZE_CONFIG->PTE_ROW_HEIGHT_L from rq_regs.rq_regs_l.pte_row_height */
+
+ /* DML Request Size Configuration - Chroma */
+ uint32_t rq_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->CHUNK_SIZE_C from rq_regs.rq_regs_c.chunk_size */
+ uint32_t rq_min_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_chunk_size */
+ uint32_t rq_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->META_CHUNK_SIZE_C from rq_regs.rq_regs_c.meta_chunk_size */
+ uint32_t rq_min_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_META_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_meta_chunk_size */
+ uint32_t rq_dpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->DPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.dpte_group_size */
+ uint32_t rq_mpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.mpte_group_size */
+ uint32_t rq_swath_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->SWATH_HEIGHT_C from rq_regs.rq_regs_c.swath_height */
+ uint32_t rq_pte_row_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->PTE_ROW_HEIGHT_C from rq_regs.rq_regs_c.pte_row_height */
+
+ /* DML Expansion Modes */
+ uint32_t drq_expansion_mode; /* DCN_EXPANSION_MODE->DRQ_EXPANSION_MODE from rq_regs.drq_expansion_mode */
+ uint32_t prq_expansion_mode; /* DCN_EXPANSION_MODE->PRQ_EXPANSION_MODE from rq_regs.prq_expansion_mode */
+ uint32_t mrq_expansion_mode; /* DCN_EXPANSION_MODE->MRQ_EXPANSION_MODE from rq_regs.mrq_expansion_mode */
+ uint32_t crq_expansion_mode; /* DCN_EXPANSION_MODE->CRQ_EXPANSION_MODE from rq_regs.crq_expansion_mode */
+
+ /* DML DLG parameters - nominal */
+ uint32_t dst_y_per_vm_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_VM_VBLANK from dlg_regs.dst_y_per_vm_vblank */
+ uint32_t dst_y_per_row_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_ROW_VBLANK from dlg_regs.dst_y_per_row_vblank */
+ uint32_t dst_y_per_vm_flip; /* NOM_PARAMETERS_1->DST_Y_PER_VM_FLIP from dlg_regs.dst_y_per_vm_flip */
+ uint32_t dst_y_per_row_flip; /* NOM_PARAMETERS_1->DST_Y_PER_ROW_FLIP from dlg_regs.dst_y_per_row_flip */
+
+ /* DML prefetch settings */
+ uint32_t dst_y_prefetch; /* PREFETCH_SETTINS->DST_Y_PREFETCH from dlg_regs.dst_y_prefetch */
+ uint32_t vratio_prefetch; /* PREFETCH_SETTINS->VRATIO_PREFETCH from dlg_regs.vratio_prefetch */
+ uint32_t vratio_prefetch_c; /* PREFETCH_SETTINS_C->VRATIO_PREFETCH_C from dlg_regs.vratio_prefetch_c */
+
+ /* TTU parameters */
+ uint32_t qos_level_low_wm; /* TTU_CNTL1->QoSLevelLowWaterMark from ttu_regs.qos_level_low_wm */
+ uint32_t qos_level_high_wm; /* TTU_CNTL1->QoSLevelHighWaterMark from ttu_regs.qos_level_high_wm */
+ uint32_t qos_level_flip; /* TTU_CNTL2->QoS_LEVEL_FLIP_L from ttu_regs.qos_level_flip */
+ uint32_t min_ttu_vblank; /* DCN_GLOBAL_TTU_CNTL->MIN_TTU_VBLANK from ttu_regs.min_ttu_vblank */
+ } hubp[MAX_PIPES];
+
+ /* HUBBUB register programming variables */
+ struct {
+ /* Individual DET buffer control per pipe - software state that programs DET registers */
+ uint32_t det0_size; /* DCHUBBUB_DET0_CTRL->DET0_SIZE from hubbub->funcs->program_det_size(hubbub, 0, det_buffer_size_kb) */
+ uint32_t det1_size; /* DCHUBBUB_DET1_CTRL->DET1_SIZE from hubbub->funcs->program_det_size(hubbub, 1, det_buffer_size_kb) */
+ uint32_t det2_size; /* DCHUBBUB_DET2_CTRL->DET2_SIZE from hubbub->funcs->program_det_size(hubbub, 2, det_buffer_size_kb) */
+ uint32_t det3_size; /* DCHUBBUB_DET3_CTRL->DET3_SIZE from hubbub->funcs->program_det_size(hubbub, 3, det_buffer_size_kb) */
+
+ /* Compression buffer control - software state that programs COMPBUF registers */
+ uint32_t compbuf_size; /* DCHUBBUB_COMPBUF_CTRL->COMPBUF_SIZE from hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, safe_to_increase) */
+ uint32_t compbuf_reserved_space_64b; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_64B from hubbub2->pixel_chunk_size / 32 */
+ uint32_t compbuf_reserved_space_zs; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_ZS from hubbub2->pixel_chunk_size / 128 */
+ } hubbub;
+
+ /* DPP register programming variables for each pipe (simplified for available fields) */
+ struct {
+ uint32_t dpp_clock_enable; /* DPP_CONTROL->DPP_CLOCK_ENABLE from dppclk_enable */
+
+ /* Recout (Rectangle of Interest) configuration */
+ uint32_t recout_start_x; /* RECOUT_START->RECOUT_START_X from pipe_ctx->plane_res.scl_data.recout.x */
+ uint32_t recout_start_y; /* RECOUT_START->RECOUT_START_Y from pipe_ctx->plane_res.scl_data.recout.y */
+ uint32_t recout_width; /* RECOUT_SIZE->RECOUT_WIDTH from pipe_ctx->plane_res.scl_data.recout.width */
+ uint32_t recout_height; /* RECOUT_SIZE->RECOUT_HEIGHT from pipe_ctx->plane_res.scl_data.recout.height */
+
+ /* MPC (Multiple Pipe/Plane Combiner) size configuration */
+ uint32_t mpc_width; /* MPC_SIZE->MPC_WIDTH from pipe_ctx->plane_res.scl_data.h_active */
+ uint32_t mpc_height; /* MPC_SIZE->MPC_HEIGHT from pipe_ctx->plane_res.scl_data.v_active */
+
+ /* DSCL mode configuration */
+ uint32_t dscl_mode; /* SCL_MODE->DSCL_MODE from pipe_ctx->plane_res.scl_data.dscl_prog_data.dscl_mode */
+
+ /* Scaler ratios (simplified to integer parts) */
+ uint32_t horz_ratio_int; /* SCL_HORZ_FILTER_SCALE_RATIO->SCL_H_SCALE_RATIO integer part from ratios.horz */
+ uint32_t vert_ratio_int; /* SCL_VERT_FILTER_SCALE_RATIO->SCL_V_SCALE_RATIO integer part from ratios.vert */
+
+ /* Basic scaler taps */
+ uint32_t h_taps; /* SCL_TAP_CONTROL->SCL_H_NUM_TAPS from taps.h_taps */
+ uint32_t v_taps; /* SCL_TAP_CONTROL->SCL_V_NUM_TAPS from taps.v_taps */
+ } dpp[MAX_PIPES];
+
+ /* DCCG register programming variables */
+ struct {
+ /* Core Display Clock Control */
+ uint32_t dispclk_khz; /* DENTIST_DISPCLK_CNTL->DENTIST_DISPCLK_WDIVIDER from clk_mgr.dispclk_khz */
+ uint32_t dc_mem_global_pwr_req_dis; /* DC_MEM_GLOBAL_PWR_REQ_CNTL->DC_MEM_GLOBAL_PWR_REQ_DIS from memory power management settings */
+
+ /* DPP Clock Control - 4 fields per pipe */
+ uint32_t dppclk_khz[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK_R_GATE_DISABLE from dpp_clocks[pipe] */
+ uint32_t dppclk_enable[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK0_EN,DPPCLK1_EN,DPPCLK2_EN,DPPCLK3_EN from dccg31_update_dpp_dto() */
+ uint32_t dppclk_dto_enable[MAX_PIPES]; /* DPPCLK_DTO_CTRL->DPPCLK_DTO_ENABLE from dccg->dpp_clock_gated[dpp_inst] state */
+ uint32_t dppclk_dto_phase[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_PHASE from phase calculation req_dppclk/ref_dppclk */
+ uint32_t dppclk_dto_modulo[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_MODULO from modulo = 0xff */
+
+ /* DSC Clock Control - 4 fields per DSC resource */
+ uint32_t dscclk_khz[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK_DTO_ENABLE from dsc_clocks */
+ uint32_t dscclk_dto_enable[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK0_DTO_ENABLE,DSCCLK1_DTO_ENABLE,DSCCLK2_DTO_ENABLE,DSCCLK3_DTO_ENABLE */
+ uint32_t dscclk_dto_phase[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_PHASE from dccg31_enable_dscclk() */
+ uint32_t dscclk_dto_modulo[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_MODULO from dccg31_enable_dscclk() */
+
+ /* Pixel Clock Control - per pipe */
+ uint32_t pixclk_khz[MAX_PIPES]; /* PIXCLK_RESYNC_CNTL->PIXCLK_RESYNC_ENABLE from stream.timing.pix_clk_100hz */
+ uint32_t otg_pixel_rate_div[MAX_PIPES]; /* OTG_PIXEL_RATE_DIV->OTG_PIXEL_RATE_DIV from OTG pixel rate divider control */
+ uint32_t dtbclk_dto_enable[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_ENABLE from dccg31_set_dtbclk_dto() */
+ uint32_t pipe_dto_src_sel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->PIPE_DTO_SRC_SEL from dccg31_set_dtbclk_dto() source selection */
+ uint32_t dtbclk_dto_div[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_DIV from dtbdto_div calculation */
+ uint32_t otg_add_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_ADD_PIXEL from dccg31_otg_add_pixel() */
+ uint32_t otg_drop_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_DROP_PIXEL from dccg31_otg_drop_pixel() */
+
+ /* DTBCLK DTO Control - 4 DTOs */
+ uint32_t dtbclk_dto_modulo[4]; /* DTBCLK_DTO0_MODULO->DTBCLK_DTO0_MODULO from dccg31_set_dtbclk_dto() modulo calculation */
+ uint32_t dtbclk_dto_phase[4]; /* DTBCLK_DTO0_PHASE->DTBCLK_DTO0_PHASE from phase calculation pixclk_khz/ref_dtbclk_khz */
+ uint32_t dtbclk_dto_dbuf_en; /* DTBCLK_DTO_DBUF_EN->DTBCLK DTO data buffer enable */
+
+ /* DP Stream Clock Control - 4 pipes */
+ uint32_t dpstreamclk_enable[MAX_PIPES]; /* DPSTREAMCLK_CNTL->DPSTREAMCLK_PIPE0_EN,DPSTREAMCLK_PIPE1_EN,DPSTREAMCLK_PIPE2_EN,DPSTREAMCLK_PIPE3_EN */
+ uint32_t dp_dto_modulo[4]; /* DP_DTO0_MODULO->DP_DTO0_MODULO from DP stream DTO programming */
+ uint32_t dp_dto_phase[4]; /* DP_DTO0_PHASE->DP_DTO0_PHASE from DP stream DTO programming */
+ uint32_t dp_dto_dbuf_en; /* DP_DTO_DBUF_EN->DP DTO data buffer enable */
+
+ /* PHY Symbol Clock Control - 5 PHYs (A,B,C,D,E) */
+ uint32_t phy_symclk_force_en[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_EN from dccg31_set_physymclk() force_enable */
+ uint32_t phy_symclk_force_src_sel[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_SRC_SEL from dccg31_set_physymclk() clk_src */
+ uint32_t phy_symclk_gate_disable[5]; /* DCCG_GATE_DISABLE_CNTL2->PHYASYMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.physymclk */
+
+ /* SYMCLK32 SE Control - 4 instances */
+ uint32_t symclk32_se_src_sel[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_SRC_SEL from dccg31_enable_symclk32_se() with get_phy_mux_symclk() mapping */
+ uint32_t symclk32_se_enable[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_EN from dccg31_enable_symclk32_se() enable */
+ uint32_t symclk32_se_gate_disable[4]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_SE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_se */
+
+ /* SYMCLK32 LE Control - 2 instances */
+ uint32_t symclk32_le_src_sel[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_SRC_SEL from dccg31_enable_symclk32_le() phyd32clk source */
+ uint32_t symclk32_le_enable[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_EN from dccg31_enable_symclk32_le() enable */
+ uint32_t symclk32_le_gate_disable[2]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_LE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_le */
+
+ /* DPIA Clock Control */
+ uint32_t dpiaclk_540m_dto_modulo; /* DPIACLK_540M_DTO_MODULO->DPIA 540MHz DTO modulo */
+ uint32_t dpiaclk_540m_dto_phase; /* DPIACLK_540M_DTO_PHASE->DPIA 540MHz DTO phase */
+ uint32_t dpiaclk_810m_dto_modulo; /* DPIACLK_810M_DTO_MODULO->DPIA 810MHz DTO modulo */
+ uint32_t dpiaclk_810m_dto_phase; /* DPIACLK_810M_DTO_PHASE->DPIA 810MHz DTO phase */
+ uint32_t dpiaclk_dto_cntl; /* DPIACLK_DTO_CNTL->DPIA clock DTO control */
+ uint32_t dpiasymclk_cntl; /* DPIASYMCLK_CNTL->DPIA symbol clock control */
+
+ /* Clock Gating Control */
+ uint32_t dccg_gate_disable_cntl; /* DCCG_GATE_DISABLE_CNTL->Clock gate disable control from dccg31_init() */
+ uint32_t dpstreamclk_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
+ uint32_t dpstreamclk_root_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_ROOT_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
+
+ /* VSync Control */
+ uint32_t vsync_cnt_ctrl; /* DCCG_VSYNC_CNT_CTRL->VSync counter control */
+ uint32_t vsync_cnt_int_ctrl; /* DCCG_VSYNC_CNT_INT_CTRL->VSync counter interrupt control */
+ uint32_t vsync_otg_latch_value[6]; /* DCCG_VSYNC_OTG0_LATCH_VALUE->OTG0 VSync latch value (for OTG0-5) */
+
+ /* Time Base Control */
+ uint32_t microsecond_time_base_div; /* MICROSECOND_TIME_BASE_DIV->Microsecond time base divider */
+ uint32_t millisecond_time_base_div; /* MILLISECOND_TIME_BASE_DIV->Millisecond time base divider */
+ } dccg;
+
+ /* DSC essential configuration for underflow analysis */
+ struct {
+ /* DSC active state - critical for bandwidth analysis */
+ uint32_t dsc_clock_enable; /* DSC enabled - affects bandwidth requirements */
+
+ /* DSC configuration affecting bandwidth and timing */
+ uint32_t dsc_num_slices_h; /* Horizontal slice count - affects throughput */
+ uint32_t dsc_num_slices_v; /* Vertical slice count - affects throughput */
+ uint32_t dsc_bits_per_pixel; /* Compression ratio - affects bandwidth */
+
+ /* OPP integration - affects pipeline flow */
+ uint32_t dscrm_dsc_forward_enable; /* DSC forwarding to OPP enabled */
+ uint32_t dscrm_dsc_opp_pipe_source; /* Which OPP receives DSC output */
+ } dsc[MAX_PIPES];
+
+ /* MPC register programming variables */
+ struct {
+ /* MPCC blending tree and mode control */
+ uint32_t mpcc_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_MODE from blend_cfg.blend_mode */
+ uint32_t mpcc_alpha_blend_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_BLND_MODE from blend_cfg.alpha_mode */
+ uint32_t mpcc_alpha_multiplied_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_MULTIPLIED_MODE from blend_cfg.pre_multiplied_alpha */
+ uint32_t mpcc_blnd_active_overlap_only[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BLND_ACTIVE_OVERLAP_ONLY from blend_cfg.overlap_only */
+ uint32_t mpcc_global_alpha[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_ALPHA from blend_cfg.global_alpha */
+ uint32_t mpcc_global_gain[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_GAIN from blend_cfg.global_gain */
+ uint32_t mpcc_bg_bpc[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BG_BPC from background color depth */
+ uint32_t mpcc_bot_gain_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BOT_GAIN_MODE from bottom layer gain control */
+
+ /* MPCC blending tree connections */
+ uint32_t mpcc_bot_sel[MAX_PIPES]; /* MPCC_BOT_SEL->MPCC_BOT_SEL from mpcc_state->bot_sel */
+ uint32_t mpcc_top_sel[MAX_PIPES]; /* MPCC_TOP_SEL->MPCC_TOP_SEL from mpcc_state->dpp_id */
+
+ /* MPCC output gamma control */
+ uint32_t mpcc_ogam_mode[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_MODE from output gamma mode */
+ uint32_t mpcc_ogam_select[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_SELECT from gamma LUT bank selection */
+ uint32_t mpcc_ogam_pwl_disable[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_PWL_DISABLE from PWL control */
+
+ /* MPCC pipe assignment and status */
+ uint32_t mpcc_opp_id[MAX_PIPES]; /* MPCC_OPP_ID->MPCC_OPP_ID from mpcc_state->opp_id */
+ uint32_t mpcc_idle[MAX_PIPES]; /* MPCC_STATUS->MPCC_IDLE from mpcc idle status */
+ uint32_t mpcc_busy[MAX_PIPES]; /* MPCC_STATUS->MPCC_BUSY from mpcc busy status */
+
+ /* MPC output processing */
+ uint32_t mpc_out_csc_mode; /* MPC_OUT_CSC_COEF->MPC_OUT_CSC_MODE from output_csc */
+ uint32_t mpc_out_gamma_mode; /* MPC_OUT_GAMMA_LUT->MPC_OUT_GAMMA_MODE from output_gamma */
+ } mpc;
+
+ /* OPP register programming variables for each pipe */
+ struct {
+ /* Display Pattern Generator (DPG) Control - 19 fields from DPG_CONTROL register */
+ uint32_t dpg_enable; /* DPG_CONTROL->DPG_EN from test_pattern parameter (enable/disable) */
+
+ /* Format Control (FMT) - 18 fields from FMT_CONTROL register */
+ uint32_t fmt_pixel_encoding; /* FMT_CONTROL->FMT_PIXEL_ENCODING from clamping->pixel_encoding */
+ uint32_t fmt_subsampling_mode; /* FMT_CONTROL->FMT_SUBSAMPLING_MODE from force_chroma_subsampling_1tap */
+ uint32_t fmt_cbcr_bit_reduction_bypass; /* FMT_CONTROL->FMT_CBCR_BIT_REDUCTION_BYPASS from pixel_encoding bypass control */
+ uint32_t fmt_stereosync_override; /* FMT_CONTROL->FMT_STEREOSYNC_OVERRIDE from stereo timing override */
+ uint32_t fmt_spatial_dither_frame_counter_max; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX from fmt_bit_depth->flags */
+ uint32_t fmt_spatial_dither_frame_counter_bit_swap; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP from dither control */
+ uint32_t fmt_truncate_enable; /* FMT_CONTROL->FMT_TRUNCATE_EN from fmt_bit_depth->flags.TRUNCATE_ENABLED */
+ uint32_t fmt_truncate_depth; /* FMT_CONTROL->FMT_TRUNCATE_DEPTH from fmt_bit_depth->flags.TRUNCATE_DEPTH */
+ uint32_t fmt_truncate_mode; /* FMT_CONTROL->FMT_TRUNCATE_MODE from fmt_bit_depth->flags.TRUNCATE_MODE */
+ uint32_t fmt_spatial_dither_enable; /* FMT_CONTROL->FMT_SPATIAL_DITHER_EN from fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED */
+ uint32_t fmt_spatial_dither_mode; /* FMT_CONTROL->FMT_SPATIAL_DITHER_MODE from fmt_bit_depth->flags.SPATIAL_DITHER_MODE */
+ uint32_t fmt_spatial_dither_depth; /* FMT_CONTROL->FMT_SPATIAL_DITHER_DEPTH from fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH */
+ uint32_t fmt_temporal_dither_enable; /* FMT_CONTROL->FMT_TEMPORAL_DITHER_EN from fmt_bit_depth->flags.TEMPORAL_DITHER_ENABLED */
+ uint32_t fmt_clamp_data_enable; /* FMT_CONTROL->FMT_CLAMP_DATA_EN from clamping->clamping_range enable */
+ uint32_t fmt_clamp_color_format; /* FMT_CONTROL->FMT_CLAMP_COLOR_FORMAT from clamping->color_format */
+ uint32_t fmt_dynamic_exp_enable; /* FMT_CONTROL->FMT_DYNAMIC_EXP_EN from color_sp/color_dpth/signal */
+ uint32_t fmt_dynamic_exp_mode; /* FMT_CONTROL->FMT_DYNAMIC_EXP_MODE from color space mode mapping */
+ uint32_t fmt_bit_depth_control; /* Legacy field - kept for compatibility */
+
+ /* OPP Pipe Control - 1 field from OPP_PIPE_CONTROL register */
+ uint32_t opp_pipe_clock_enable; /* OPP_PIPE_CONTROL->OPP_PIPE_CLOCK_EN from enable parameter (bool) */
+
+ /* OPP CRC Control - 3 fields from OPP_PIPE_CRC_CONTROL register */
+ uint32_t opp_crc_enable; /* OPP_PIPE_CRC_CONTROL->CRC_EN from CRC enable control */
+ uint32_t opp_crc_select_source; /* OPP_PIPE_CRC_CONTROL->CRC_SELECT_SOURCE from CRC source selection */
+ uint32_t opp_crc_stereo_cont; /* OPP_PIPE_CRC_CONTROL->CRC_STEREO_CONT from stereo continuous CRC */
+
+ /* Output Buffer (OPPBUF) Control - 6 fields from OPPBUF_CONTROL register */
+ uint32_t oppbuf_active_width; /* OPPBUF_CONTROL->OPPBUF_ACTIVE_WIDTH from oppbuf_params->active_width */
+ uint32_t oppbuf_pixel_repetition; /* OPPBUF_CONTROL->OPPBUF_PIXEL_REPETITION from oppbuf_params->pixel_repetition */
+ uint32_t oppbuf_display_segmentation; /* OPPBUF_CONTROL->OPPBUF_DISPLAY_SEGMENTATION from oppbuf_params->mso_segmentation */
+ uint32_t oppbuf_overlap_pixel_num; /* OPPBUF_CONTROL->OPPBUF_OVERLAP_PIXEL_NUM from oppbuf_params->mso_overlap_pixel_num */
+ uint32_t oppbuf_3d_vact_space1_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE1_SIZE from 3D timing space1_size */
+ uint32_t oppbuf_3d_vact_space2_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE2_SIZE from 3D timing space2_size */
+
+ /* DSC Forward Config - 3 fields from DSCRM_DSC_FORWARD_CONFIG register */
+ uint32_t dscrm_dsc_forward_enable; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN from DSC forward enable control */
+ uint32_t dscrm_dsc_opp_pipe_source; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_OPP_PIPE_SOURCE from opp_pipe parameter */
+ uint32_t dscrm_dsc_forward_enable_status; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN_STATUS from DSC forward status (read-only) */
+ } opp[MAX_PIPES];
+
+ /* OPTC register programming variables for each pipe */
+ struct {
+ uint32_t otg_master_inst;
+
+ /* OTG_CONTROL register - 5 fields for OTG control */
+ uint32_t otg_master_enable; /* OTG_CONTROL->OTG_MASTER_EN from timing enable/disable control */
+ uint32_t otg_disable_point_cntl; /* OTG_CONTROL->OTG_DISABLE_POINT_CNTL from disable timing control */
+ uint32_t otg_start_point_cntl; /* OTG_CONTROL->OTG_START_POINT_CNTL from start timing control */
+ uint32_t otg_field_number_cntl; /* OTG_CONTROL->OTG_FIELD_NUMBER_CNTL from interlace field control */
+ uint32_t otg_out_mux; /* OTG_CONTROL->OTG_OUT_MUX from output mux selection */
+
+ /* OTG Horizontal Timing - 7 fields */
+ uint32_t otg_h_total; /* OTG_H_TOTAL->OTG_H_TOTAL from dc_crtc_timing->h_total */
+ uint32_t otg_h_blank_start; /* OTG_H_BLANK_START_END->OTG_H_BLANK_START from dc_crtc_timing->h_front_porch */
+ uint32_t otg_h_blank_end; /* OTG_H_BLANK_START_END->OTG_H_BLANK_END from dc_crtc_timing->h_addressable_video_pixel_width */
+ uint32_t otg_h_sync_start; /* OTG_H_SYNC_A->OTG_H_SYNC_A_START from dc_crtc_timing->h_sync_width */
+ uint32_t otg_h_sync_end; /* OTG_H_SYNC_A->OTG_H_SYNC_A_END from calculated sync end position */
+ uint32_t otg_h_sync_polarity; /* OTG_H_SYNC_A_CNTL->OTG_H_SYNC_A_POL from dc_crtc_timing->flags.HSYNC_POSITIVE_POLARITY */
+ uint32_t otg_h_timing_div_mode; /* OTG_H_TIMING_CNTL->OTG_H_TIMING_DIV_MODE from horizontal timing division mode */
+
+ /* OTG Vertical Timing - 7 fields */
+ uint32_t otg_v_total; /* OTG_V_TOTAL->OTG_V_TOTAL from dc_crtc_timing->v_total */
+ uint32_t otg_v_blank_start; /* OTG_V_BLANK_START_END->OTG_V_BLANK_START from dc_crtc_timing->v_front_porch */
+ uint32_t otg_v_blank_end; /* OTG_V_BLANK_START_END->OTG_V_BLANK_END from dc_crtc_timing->v_addressable_video_line_width */
+ uint32_t otg_v_sync_start; /* OTG_V_SYNC_A->OTG_V_SYNC_A_START from dc_crtc_timing->v_sync_width */
+ uint32_t otg_v_sync_end; /* OTG_V_SYNC_A->OTG_V_SYNC_A_END from calculated sync end position */
+ uint32_t otg_v_sync_polarity; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_A_POL from dc_crtc_timing->flags.VSYNC_POSITIVE_POLARITY */
+ uint32_t otg_v_sync_mode; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_MODE from sync mode selection */
+
+ /* OTG DRR (Dynamic Refresh Rate) Control - 8 fields */
+ uint32_t otg_v_total_max; /* OTG_V_TOTAL_MAX->OTG_V_TOTAL_MAX from drr_params->vertical_total_max */
+ uint32_t otg_v_total_min; /* OTG_V_TOTAL_MIN->OTG_V_TOTAL_MIN from drr_params->vertical_total_min */
+ uint32_t otg_v_total_mid; /* OTG_V_TOTAL_MID->OTG_V_TOTAL_MID from drr_params->vertical_total_mid */
+ uint32_t otg_v_total_max_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MAX_SEL from DRR max selection enable */
+ uint32_t otg_v_total_min_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MIN_SEL from DRR min selection enable */
+ uint32_t otg_vtotal_mid_replacing_max_en; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_REPLACING_MAX_EN from DRR mid-frame enable */
+ uint32_t otg_vtotal_mid_frame_num; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_FRAME_NUM from drr_params->vertical_total_mid_frame_num */
+ uint32_t otg_set_v_total_min_mask; /* OTG_V_TOTAL_CONTROL->OTG_SET_V_TOTAL_MIN_MASK from DRR trigger mask */
+ uint32_t otg_force_lock_on_event; /* OTG_V_TOTAL_CONTROL->OTG_FORCE_LOCK_ON_EVENT from DRR force lock control */
+
+ /* OPTC Data Source and ODM - 6 fields */
+ uint32_t optc_seg0_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG0_SRC_SEL from opp_id[0] ODM segment 0 source */
+ uint32_t optc_seg1_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG1_SRC_SEL from opp_id[1] ODM segment 1 source */
+ uint32_t optc_seg2_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG2_SRC_SEL from opp_id[2] ODM segment 2 source */
+ uint32_t optc_seg3_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG3_SRC_SEL from opp_id[3] ODM segment 3 source */
+ uint32_t optc_num_of_input_segment; /* OPTC_DATA_SOURCE_SELECT->OPTC_NUM_OF_INPUT_SEGMENT from opp_cnt-1 number of input segments */
+ uint32_t optc_mem_sel; /* OPTC_MEMORY_CONFIG->OPTC_MEM_SEL from memory_mask ODM memory selection */
+
+ /* OPTC Data Format and DSC - 4 fields */
+ uint32_t optc_data_format; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DATA_FORMAT from data format selection */
+ uint32_t optc_dsc_mode; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DSC_MODE from dsc_mode parameter */
+ uint32_t optc_dsc_bytes_per_pixel; /* OPTC_BYTES_PER_PIXEL->OPTC_DSC_BYTES_PER_PIXEL from dsc_bytes_per_pixel parameter */
+ uint32_t optc_segment_width; /* OPTC_WIDTH_CONTROL->OPTC_SEGMENT_WIDTH from segment_width parameter */
+ uint32_t optc_dsc_slice_width; /* OPTC_WIDTH_CONTROL->OPTC_DSC_SLICE_WIDTH from dsc_slice_width parameter */
+
+ /* OPTC Clock and Underflow Control - 4 fields */
+ uint32_t optc_input_pix_clk_en; /* OPTC_INPUT_CLOCK_CONTROL->OPTC_INPUT_PIX_CLK_EN from pixel clock enable */
+ uint32_t optc_underflow_occurred_status; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_OCCURRED_STATUS from underflow status (read-only) */
+ uint32_t optc_underflow_clear; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_CLEAR from underflow clear control */
+ uint32_t otg_clock_enable; /* OTG_CLOCK_CONTROL->OTG_CLOCK_EN from OTG clock enable */
+ uint32_t otg_clock_gate_dis; /* OTG_CLOCK_CONTROL->OTG_CLOCK_GATE_DIS from clock gate disable */
+
+ /* OTG Stereo and 3D Control - 6 fields */
+ uint32_t otg_stereo_enable; /* OTG_STEREO_CONTROL->OTG_STEREO_EN from stereo enable control */
+ uint32_t otg_stereo_sync_output_line_num; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_LINE_NUM from timing->stereo_3d_format line num */
+ uint32_t otg_stereo_sync_output_polarity; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_POLARITY from stereo polarity control */
+ uint32_t otg_3d_structure_en; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_EN from 3D structure enable */
+ uint32_t otg_3d_structure_v_update_mode; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_V_UPDATE_MODE from 3D vertical update mode */
+ uint32_t otg_3d_structure_stereo_sel_ovr; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_STEREO_SEL_OVR from 3D stereo selection override */
+ uint32_t otg_interlace_enable; /* OTG_INTERLACE_CONTROL->OTG_INTERLACE_ENABLE from dc_crtc_timing->flags.INTERLACE */
+
+ /* OTG GSL (Global Sync Lock) Control - 5 fields */
+ uint32_t otg_gsl0_en; /* OTG_GSL_CONTROL->OTG_GSL0_EN from GSL group 0 enable */
+ uint32_t otg_gsl1_en; /* OTG_GSL_CONTROL->OTG_GSL1_EN from GSL group 1 enable */
+ uint32_t otg_gsl2_en; /* OTG_GSL_CONTROL->OTG_GSL2_EN from GSL group 2 enable */
+ uint32_t otg_gsl_master_en; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_EN from GSL master enable */
+ uint32_t otg_gsl_master_mode; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_MODE from gsl_params->gsl_master mode */
+
+ /* OTG DRR Advanced Control - 4 fields */
+ uint32_t otg_v_total_last_used_by_drr; /* OTG_DRR_CONTROL->OTG_V_TOTAL_LAST_USED_BY_DRR from last used DRR V_TOTAL (read-only) */
+ uint32_t otg_drr_trigger_window_start_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_START_X from window_start parameter */
+ uint32_t otg_drr_trigger_window_end_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_END_X from window_end parameter */
+ uint32_t otg_drr_v_total_change_limit; /* OTG_DRR_V_TOTAL_CHANGE->OTG_DRR_V_TOTAL_CHANGE_LIMIT from limit parameter */
+
+ /* OTG DSC Position Control - 2 fields */
+ uint32_t otg_dsc_start_position_x; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_X from DSC start X position */
+ uint32_t otg_dsc_start_position_line_num; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_LINE_NUM from DSC start line number */
+
+ /* OTG Double Buffer Control - 2 fields */
+ uint32_t otg_drr_timing_dbuf_update_mode; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_DRR_TIMING_DBUF_UPDATE_MODE from DRR double buffer mode */
+ uint32_t otg_blank_data_double_buffer_en; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_BLANK_DATA_DOUBLE_BUFFER_EN from blank data double buffer enable */
+
+ /* OTG Vertical Interrupts - 6 fields */
+ uint32_t otg_vertical_interrupt0_int_enable; /* OTG_VERTICAL_INTERRUPT0_CONTROL->OTG_VERTICAL_INTERRUPT0_INT_ENABLE from interrupt 0 enable */
+ uint32_t otg_vertical_interrupt0_line_start; /* OTG_VERTICAL_INTERRUPT0_POSITION->OTG_VERTICAL_INTERRUPT0_LINE_START from start_line parameter */
+ uint32_t otg_vertical_interrupt1_int_enable; /* OTG_VERTICAL_INTERRUPT1_CONTROL->OTG_VERTICAL_INTERRUPT1_INT_ENABLE from interrupt 1 enable */
+ uint32_t otg_vertical_interrupt1_line_start; /* OTG_VERTICAL_INTERRUPT1_POSITION->OTG_VERTICAL_INTERRUPT1_LINE_START from start_line parameter */
+ uint32_t otg_vertical_interrupt2_int_enable; /* OTG_VERTICAL_INTERRUPT2_CONTROL->OTG_VERTICAL_INTERRUPT2_INT_ENABLE from interrupt 2 enable */
+ uint32_t otg_vertical_interrupt2_line_start; /* OTG_VERTICAL_INTERRUPT2_POSITION->OTG_VERTICAL_INTERRUPT2_LINE_START from start_line parameter */
+
+ /* OTG Global Sync Parameters - 6 fields */
+ uint32_t otg_vready_offset; /* OTG_VREADY_PARAM->OTG_VREADY_OFFSET from vready_offset parameter */
+ uint32_t otg_vstartup_start; /* OTG_VSTARTUP_PARAM->OTG_VSTARTUP_START from vstartup_start parameter */
+ uint32_t otg_vupdate_offset; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_OFFSET from vupdate_offset parameter */
+ uint32_t otg_vupdate_width; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_WIDTH from vupdate_width parameter */
+ uint32_t master_update_lock_vupdate_keepout_start_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET from pstate_keepout start */
+ uint32_t master_update_lock_vupdate_keepout_end_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET from pstate_keepout end */
+
+ /* OTG Manual Trigger Control - 11 fields */
+ uint32_t otg_triga_source_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_SELECT from trigger A source selection */
+ uint32_t otg_triga_source_pipe_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_PIPE_SELECT from trigger A pipe selection */
+ uint32_t otg_triga_rising_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_RISING_EDGE_DETECT_CNTL from trigger A rising edge detect */
+ uint32_t otg_triga_falling_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_FALLING_EDGE_DETECT_CNTL from trigger A falling edge detect */
+ uint32_t otg_triga_polarity_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_POLARITY_SELECT from trigger A polarity selection */
+ uint32_t otg_triga_frequency_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_FREQUENCY_SELECT from trigger A frequency selection */
+ uint32_t otg_triga_delay; /* OTG_TRIGA_CNTL->OTG_TRIGA_DELAY from trigger A delay */
+ uint32_t otg_triga_clear; /* OTG_TRIGA_CNTL->OTG_TRIGA_CLEAR from trigger A clear */
+ uint32_t otg_triga_manual_trig; /* OTG_TRIGA_MANUAL_TRIG->OTG_TRIGA_MANUAL_TRIG from manual trigger A */
+ uint32_t otg_trigb_source_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_SOURCE_SELECT from trigger B source selection */
+ uint32_t otg_trigb_polarity_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_POLARITY_SELECT from trigger B polarity selection */
+ uint32_t otg_trigb_manual_trig; /* OTG_TRIGB_MANUAL_TRIG->OTG_TRIGB_MANUAL_TRIG from manual trigger B */
+
+ /* OTG Static Screen and Update Control - 6 fields */
+ uint32_t otg_static_screen_event_mask; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_EVENT_MASK from event_triggers parameter */
+ uint32_t otg_static_screen_frame_count; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_FRAME_COUNT from num_frames parameter */
+ uint32_t master_update_lock; /* OTG_MASTER_UPDATE_LOCK->MASTER_UPDATE_LOCK from update lock control */
+ uint32_t master_update_mode; /* OTG_MASTER_UPDATE_MODE->MASTER_UPDATE_MODE from update mode selection */
+ uint32_t otg_force_count_now_mode; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_MODE from force count mode */
+ uint32_t otg_force_count_now_clear; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_CLEAR from force count clear */
+
+ /* VTG Control - 3 fields */
+ uint32_t vtg0_enable; /* CONTROL->VTG0_ENABLE from VTG enable control */
+ uint32_t vtg0_fp2; /* CONTROL->VTG0_FP2 from VTG front porch 2 */
+ uint32_t vtg0_vcount_init; /* CONTROL->VTG0_VCOUNT_INIT from VTG vertical count init */
+
+ /* OTG Status (Read-Only) - 12 fields */
+ uint32_t otg_v_blank; /* OTG_STATUS->OTG_V_BLANK from vertical blank status (read-only) */
+ uint32_t otg_v_active_disp; /* OTG_STATUS->OTG_V_ACTIVE_DISP from vertical active display (read-only) */
+ uint32_t otg_frame_count; /* OTG_STATUS_FRAME_COUNT->OTG_FRAME_COUNT from frame count (read-only) */
+ uint32_t otg_horz_count; /* OTG_STATUS_POSITION->OTG_HORZ_COUNT from horizontal position (read-only) */
+ uint32_t otg_vert_count; /* OTG_STATUS_POSITION->OTG_VERT_COUNT from vertical position (read-only) */
+ uint32_t otg_horz_count_hv; /* OTG_STATUS_HV_COUNT->OTG_HORZ_COUNT from horizontal count (read-only) */
+ uint32_t otg_vert_count_nom; /* OTG_STATUS_HV_COUNT->OTG_VERT_COUNT_NOM from vertical count nominal (read-only) */
+ uint32_t otg_flip_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_FLIP_PENDING from flip pending status (read-only) */
+ uint32_t otg_dc_reg_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_DC_REG_UPDATE_PENDING from DC register update pending (read-only) */
+ uint32_t otg_cursor_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_CURSOR_UPDATE_PENDING from cursor update pending (read-only) */
+ uint32_t otg_vupdate_keepout_status; /* OTG_PIPE_UPDATE_STATUS->OTG_VUPDATE_KEEPOUT_STATUS from VUPDATE keepout status (read-only) */
+ } optc[MAX_PIPES];
+
+ /* Metadata */
+ uint32_t active_pipe_count;
+ uint32_t active_stream_count;
+ bool state_valid;
+};
+
+/**
+ * dc_capture_register_software_state() - Capture software state for register programming
+ * @dc: DC context containing current display configuration
+ * @state: Pointer to dc_register_software_state structure to populate
+ *
+ * Extracts all software state variables that are used to program hardware register
+ * fields across the display driver pipeline. This provides a complete snapshot
+ * of the software configuration that drives hardware register programming.
+ *
+ * The function traverses the DC context and extracts values from:
+ * - Stream configurations (timing, format, DSC settings)
+ * - Plane states (surface format, rotation, scaling, cursor)
+ * - Pipe contexts (resource allocation, blending, viewport)
+ * - Clock manager (display clocks, DPP clocks, pixel clocks)
+ * - Resource context (DET buffer allocation, ODM configuration)
+ *
+ * This is essential for underflow debugging as it captures the exact software
+ * state that determines how registers are programmed, allowing analysis of
+ * whether underflow is caused by incorrect register programming or timing issues.
+ *
+ * Return: true if state was successfully captured, false on error
+ */
+bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 5fa5e2b63fb7..40d7a7d83c40 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -91,9 +91,17 @@ struct dc_vbios_funcs {
struct device_id id);
/* COMMANDS */
+ enum bp_result (*select_crtc_source)(
+ struct dc_bios *bios,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*encoder_control)(
struct dc_bios *bios,
struct bp_encoder_control *cntl);
+ enum bp_result (*dac_load_detection)(
+ struct dc_bios *bios,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id);
enum bp_result (*transmitter_control)(
struct dc_bios *bios,
struct bp_transmitter_control *cntl);
@@ -165,6 +173,7 @@ struct dc_vbios_funcs {
};
struct bios_registers {
+ uint32_t BIOS_SCRATCH_0;
uint32_t BIOS_SCRATCH_3;
uint32_t BIOS_SCRATCH_6;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 44ff9abe2880..7b09af1cb306 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -39,6 +39,7 @@
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
+#define GPINT_RETRY_NUM 20
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -70,20 +71,28 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dmub_srv *dmub;
+ struct dc_context *dc_ctx;
enum dmub_status status;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_pending(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
+
+ return status == DMUB_STATUS_OK;
}
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
@@ -126,7 +135,49 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
}
}
-bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ struct dc_context *dc_ctx;
+ struct dmub_srv *dmub;
+ enum dmub_status status = DMUB_STATUS_OK;
+ int i;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
+ for (i = 0 ; i < count; i++) {
+ /* confirm no messages pending */
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
+ /* queue command */
+ if (status == DMUB_STATUS_OK)
+ status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
+
+ /* check for errors */
+ if (status != DMUB_STATUS_OK) {
+ break;
+ }
+ }
+
+ if (status != DMUB_STATUS_OK) {
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
@@ -143,20 +194,25 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
for (i = 0 ; i < count; i++) {
// Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
+ dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
+ } else {
+ status = DMUB_STATUS_QUEUE_FULL;
+ }
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
}
if (status != DMUB_STATUS_OK) {
@@ -168,7 +224,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
}
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
@@ -180,6 +236,26 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
return true;
}
+bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ bool res = false;
+
+ if (dc_dmub_srv && dc_dmub_srv->dmub) {
+ if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
+ res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ } else {
+ res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ }
+
+ if (res)
+ res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
+ }
+
+ return res;
+}
+
bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
enum dm_dmub_wait_type wait_type,
union dmub_rb_cmd *cmd_list)
@@ -200,18 +276,20 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- if (!dmub->debug.timeout_occured) {
- dmub->debug.timeout_occured = true;
- dmub->debug.timeout_cmd = *cmd_list;
- dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
+ if (!dmub->debug.timeout_info.timeout_occured) {
+ dmub->debug.timeout_info.timeout_occured = true;
+ if (cmd_list)
+ dmub->debug.timeout_info.timeout_cmd = *cmd_list;
+ dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
// Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+ if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
+ dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
+ }
}
return true;
@@ -224,74 +302,10 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd
bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
- struct dc_context *dc_ctx;
- struct dmub_srv *dmub;
- enum dmub_status status;
- int i;
-
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
return false;
- dc_ctx = dc_dmub_srv->ctx;
- dmub = dc_dmub_srv->dmub;
-
- for (i = 0 ; i < count; i++) {
- // Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
-
- if (status == DMUB_STATUS_QUEUE_FULL) {
- /* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
- if (status == DMUB_STATUS_POWER_STATE_D3)
- return false;
-
- status = dmub_srv_wait_for_idle(dmub, 100000);
- if (status != DMUB_STATUS_OK)
- return false;
-
- /* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
- }
-
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
- return false;
- }
- }
-
- status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
- return false;
- }
-
- // Wait for DMUB to process command
- if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
- do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
- } while (status != DMUB_STATUS_OK);
- } else
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK) {
- DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- return false;
- }
-
- // Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
- }
-
- return true;
+ return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
}
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
@@ -428,7 +442,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
int i = 0, k = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
uint8_t visual_confirm_enabled;
- int pipe_idx = 0;
struct dc_stream_status *stream_status = NULL;
if (dc == NULL)
@@ -443,7 +456,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
if (should_manage_pstate) {
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -458,7 +471,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
break;
}
- pipe_idx++;
}
}
@@ -858,7 +870,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
bool enable)
{
uint8_t cmd_pipe_index = 0;
- uint32_t i, pipe_idx;
+ uint32_t i;
uint8_t subvp_count = 0;
union dmub_rb_cmd cmd;
struct pipe_ctx *subvp_pipes[2];
@@ -885,7 +897,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (enable) {
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
@@ -908,7 +920,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
}
- pipe_idx++;
}
if (subvp_count == 2) {
update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
@@ -927,16 +938,15 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
+bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
- if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
- return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data);
+ return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub);
}
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_diagnostic_data diag_data = {0};
uint32_t i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
@@ -946,102 +956,56 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
- if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
+ if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) {
DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
return;
}
DC_LOG_DEBUG("DMCUB STATE:");
- DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version);
- DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]);
- DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]);
- DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]);
- DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]);
- DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]);
- DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]);
- DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]);
- DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]);
- DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]);
- DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]);
- DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]);
- DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]);
- DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]);
- DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]);
- DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]);
- DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]);
+ DC_LOG_DEBUG(" dmcub_version : %08x", dc_dmub_srv->dmub->debug.dmcub_version);
+ DC_LOG_DEBUG(" scratch [0] : %08x", dc_dmub_srv->dmub->debug.scratch[0]);
+ DC_LOG_DEBUG(" scratch [1] : %08x", dc_dmub_srv->dmub->debug.scratch[1]);
+ DC_LOG_DEBUG(" scratch [2] : %08x", dc_dmub_srv->dmub->debug.scratch[2]);
+ DC_LOG_DEBUG(" scratch [3] : %08x", dc_dmub_srv->dmub->debug.scratch[3]);
+ DC_LOG_DEBUG(" scratch [4] : %08x", dc_dmub_srv->dmub->debug.scratch[4]);
+ DC_LOG_DEBUG(" scratch [5] : %08x", dc_dmub_srv->dmub->debug.scratch[5]);
+ DC_LOG_DEBUG(" scratch [6] : %08x", dc_dmub_srv->dmub->debug.scratch[6]);
+ DC_LOG_DEBUG(" scratch [7] : %08x", dc_dmub_srv->dmub->debug.scratch[7]);
+ DC_LOG_DEBUG(" scratch [8] : %08x", dc_dmub_srv->dmub->debug.scratch[8]);
+ DC_LOG_DEBUG(" scratch [9] : %08x", dc_dmub_srv->dmub->debug.scratch[9]);
+ DC_LOG_DEBUG(" scratch [10] : %08x", dc_dmub_srv->dmub->debug.scratch[10]);
+ DC_LOG_DEBUG(" scratch [11] : %08x", dc_dmub_srv->dmub->debug.scratch[11]);
+ DC_LOG_DEBUG(" scratch [12] : %08x", dc_dmub_srv->dmub->debug.scratch[12]);
+ DC_LOG_DEBUG(" scratch [13] : %08x", dc_dmub_srv->dmub->debug.scratch[13]);
+ DC_LOG_DEBUG(" scratch [14] : %08x", dc_dmub_srv->dmub->debug.scratch[14]);
+ DC_LOG_DEBUG(" scratch [15] : %08x", dc_dmub_srv->dmub->debug.scratch[15]);
for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
- DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]);
- DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr);
- DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr);
- DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr);
- DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr);
- DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr);
- DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size);
- DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr);
- DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr);
- DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size);
- DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr);
- DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr);
- DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size);
- DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled);
- DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset);
- DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset);
- DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en);
- DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled);
- DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled);
-}
-
-static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
+ DC_LOG_DEBUG(" pc[%d] : %08x", i, dc_dmub_srv->dmub->debug.pc[i]);
+ DC_LOG_DEBUG(" unk_fault_addr : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr);
+ DC_LOG_DEBUG(" inst_fault_addr : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr);
+ DC_LOG_DEBUG(" data_fault_addr : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr);
+ DC_LOG_DEBUG(" inbox1_rptr : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr);
+ DC_LOG_DEBUG(" inbox1_wptr : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr);
+ DC_LOG_DEBUG(" inbox1_size : %08x", dc_dmub_srv->dmub->debug.inbox1_size);
+ DC_LOG_DEBUG(" inbox0_rptr : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr);
+ DC_LOG_DEBUG(" inbox0_wptr : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr);
+ DC_LOG_DEBUG(" inbox0_size : %08x", dc_dmub_srv->dmub->debug.inbox0_size);
+ DC_LOG_DEBUG(" outbox1_rptr : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr);
+ DC_LOG_DEBUG(" outbox1_wptr : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr);
+ DC_LOG_DEBUG(" outbox1_size : %08x", dc_dmub_srv->dmub->debug.outbox1_size);
+ DC_LOG_DEBUG(" is_enabled : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled);
+ DC_LOG_DEBUG(" is_soft_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset);
+ DC_LOG_DEBUG(" is_secure_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset);
+ DC_LOG_DEBUG(" is_traceport_en : %d", dc_dmub_srv->dmub->debug.is_traceport_en);
+ DC_LOG_DEBUG(" is_cw0_en : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled);
+ DC_LOG_DEBUG(" is_cw6_en : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled);
}
static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state != NULL) {
- if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- return false;
-
- if (dc_can_pipe_disable_cursor(pipe_ctx))
+ if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ resource_can_pipe_disable_cursor(pipe_ctx))
return false;
}
@@ -1207,6 +1171,100 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
}
+void dc_dmub_srv_cursor_offload_init(struct dc *dc)
+{
+ struct dmub_rb_cmd_cursor_offload_init *init;
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd;
+
+ if (!dc->config.enable_cursor_offload)
+ return;
+
+ if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_v1)
+ return;
+
+ if (!dc_dmub_srv->dmub->shared_state)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ init = &cmd.cursor_offload_init;
+ init->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT;
+ init->header.payload_bytes = sizeof(init->init_data);
+ init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr;
+ init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ dc_dmub_srv->cursor_offload_enabled = true;
+}
+
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable)
+{
+ struct pipe_ctx const *pipe_ctx;
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!stream)
+ return;
+
+ pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type =
+ enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+
+ cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst;
+ cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull,
+ stream->timing.pix_clk_100hz / 10));
+
+ cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ?
+ stream->adjust.v_total_max :
+ stream->timing.v_total;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd,
+ enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!pipe || !pipe->stream || !pipe->stream_res.tg)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+ cntl->data.otg_inst = pipe->stream_res.tg->inst;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
struct dc_context *dc_ctx;
@@ -1290,7 +1348,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1302,12 +1360,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
@@ -1319,6 +1381,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else {
/* RCG only */
new_signals.bits.allow_pg = 0;
@@ -1326,8 +1390,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips2 = 0;
new_signals.bits.allow_z10 = 0;
}
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ }
+ // Setting RCG allow bits (IPSv2.0)
+ if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) {
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ }
+ // IPS dynamic allow bits (IPSv2 change, vpb use case)
+ if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ new_signals.bits.allow_dynamic_ips1_z8 = 1;
}
-
ips_driver->signals = new_signals;
dc_dmub_srv->driver_signals = ips_driver->signals;
}
@@ -1351,7 +1435,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv;
- uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1371,45 +1455,49 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
rcg_exit_count = ips_fw->rcg_exit_count;
ips1_exit_count = ips_fw->ips1_exit_count;
ips2_exit_count = ips_fw->ips2_exit_count;
+ ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count;
ips_driver->signals.all = 0;
dc_dmub_srv->driver_signals = ips_driver->signals;
DC_LOG_IPS(
- "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
+ "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)",
__func__,
ips_driver->signals.bits.allow_ips1,
ips_driver->signals.bits.allow_ips2,
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit,
ips_fw->rcg_entry_count,
ips_fw->ips1_entry_count,
- ips_fw->ips2_entry_count);
+ ips_fw->ips2_entry_count,
+ ips_fw->ips1_z8ret_entry_count);
/* Note: register access has technically not resumed for DCN here, but we
* need to be message PMFW through our standard register interface.
*/
dc_dmub_srv->needs_idle_wake = false;
- if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
+ if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
(!dc->debug.optimize_ips_handshake ||
- ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) {
DC_LOG_IPS(
- "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
+ "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
udelay(dc->debug.ips2_eval_delay_us);
- if (ips_fw->signals.bits.ips2_commit) {
- DC_LOG_IPS(
- "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
- ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
- // Tell PMFW to exit low power state
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ // Tell PMFW to exit low power state
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+
+ if (ips_fw->signals.bits.ips2_commit) {
DC_LOG_IPS(
"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
@@ -1447,35 +1535,38 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
+ dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
DC_LOG_IPS(
- "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
DC_LOG_IPS(
- "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
+ DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)",
__func__,
rcg_exit_count,
ips1_exit_count,
- ips2_exit_count);
+ ips2_exit_count,
+ ips1z8_exit_count);
}
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
@@ -1688,7 +1779,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
return result;
}
-void dc_dmub_srv_fams2_update_config(struct dc *dc,
+static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc,
struct dc_state *context,
bool enable)
{
@@ -1701,7 +1792,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ global_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
if (enable) {
/* send global configuration parameters */
@@ -1720,11 +1812,13 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* configure command header */
stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_base_cmd->header.multi_cmd_pending = 1;
stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_sub_state_cmd->header.multi_cmd_pending = 1;
/* copy stream static base state */
memcpy(&stream_base_cmd->config,
@@ -1751,6 +1845,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr;
+ union dmub_rb_cmd cmd;
+ uint32_t i;
+
+ memset(config, 0, sizeof(*config));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG;
+
+ cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr;
+ cmd.ib_fams2_config.ib_data.size = sizeof(*config);
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
+ /* copy static feature configuration overrides */
+ config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
+ config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
+ config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
+
+ /* send global configuration parameters */
+ memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config,
+ sizeof(struct dmub_cmd_fams2_global_config));
+
+ /* construct per-stream configs */
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
+ /* copy stream static base state */
+ memcpy(&config->stream_v1[i].base,
+ &context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
+ sizeof(config->stream_v1[i].base));
+
+ /* copy stream static sub-state */
+ memcpy(&config->stream_v1[i].sub_state,
+ &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i],
+ sizeof(config->stream_v1[i].sub_state));
+ }
+ }
+
+ config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ config->global.features.bits.enable = enable;
+
+ dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ if (dc->debug.fams_version.major == 2)
+ dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable);
+ if (dc->debug.fams_version.major == 3)
+ dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable);
+}
+
void dc_dmub_srv_fams2_drr_update(struct dc *dc,
uint32_t tg_inst,
uint32_t vtotal_min,
@@ -1770,7 +1921,8 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc,
cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
- cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
+ cmd.fams2_drr_update.header.payload_bytes =
+ sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -1806,7 +1958,8 @@ void dc_dmub_srv_fams2_passthrough_flip(
/* build command header */
cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
- cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
+ cmds[num_cmds].fams2_flip.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
/* for chaining multiple commands, all but last command should set to 1 */
cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
@@ -1874,80 +2027,344 @@ void dc_dmub_srv_fams2_passthrough_flip(
}
}
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
+
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS;
+ cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL;
+ cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data);
+
+ // only panel_inst=0 is supported at the moment
+ cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst;
+ cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
+
+ return true;
+}
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t bytes = sizeof(struct dmub_ips_residency_info);
+
+ dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb);
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS;
+ cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO;
+ cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data);
+
+ cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.ips_query_residency_info.info_data.size = bytes;
+ cmd.ips_query_residency_info.info_data.panel_inst = panel_inst;
+ cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) ||
+ cmd.ips_query_residency_info.header.ret_status == 0)
+ return false;
+
+ // copy the result to the output since ret_status != 0 means the command returned data
+ memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+ return true;
+}
+
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
bool result;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ if (!dc_dmub_srv->dmub->feature_caps.lsdma_support_in_dmu)
return false;
- result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
- start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr;
+ lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Init failed in DMUB");
return result;
}
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
+bool dmub_lsdma_send_linear_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count
+)
{
- uint32_t i;
- enum dmub_gpint_command command_code;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1
+ lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_sub_window_copy_data.tmz = copy_data.tmz;
+ lsdma_data->u.linear_sub_window_copy_data.element_size = copy_data.element_size;
+ lsdma_data->u.linear_sub_window_copy_data.src_lo = copy_data.src_lo;
+ lsdma_data->u.linear_sub_window_copy_data.src_hi = copy_data.src_hi;
+ lsdma_data->u.linear_sub_window_copy_data.src_x = copy_data.src_x;
+ lsdma_data->u.linear_sub_window_copy_data.src_y = copy_data.src_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_pitch = copy_data.src_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch = copy_data.src_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_lo = copy_data.dst_lo;
+ lsdma_data->u.linear_sub_window_copy_data.dst_hi = copy_data.dst_hi;
+ lsdma_data->u.linear_sub_window_copy_data.dst_x = copy_data.dst_x;
+ lsdma_data->u.linear_sub_window_copy_data.dst_y = copy_data.dst_y;
+ lsdma_data->u.linear_sub_window_copy_data.dst_pitch = copy_data.dst_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch = copy_data.dst_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.rect_x = copy_data.rect_x;
+ lsdma_data->u.linear_sub_window_copy_data.rect_y = copy_data.rect_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy;
+ lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_x = params.src_x;
+ lsdma_data->u.tiled_copy_data.src_y = params.src_y;
+ lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
+ lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width;
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width;
+ lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.rect_x = params.rect_x;
+ lsdma_data->u.tiled_copy_data.rect_y = params.rect_y;
+ lsdma_data->u.tiled_copy_data.dcc = params.dcc;
+ lsdma_data->u.tiled_copy_data.tmz = params.tmz;
+ lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
+ lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height;
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height;
+ lsdma_data->u.tiled_copy_data.data_format = params.data_format;
+ lsdma_data->u.tiled_copy_data.max_com = params.max_com;
+ lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable;
+ lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1;
+ lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.data = data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Constfill failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.reg_write_data.reg_addr = reg_addr;
+ lsdma_data->u.reg_write_data.reg_data = reg_data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Poll Reg failed in DMUB");
+
+ return result;
+}
+
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc)
+{
+ return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled;
+}
+
+void dc_dmub_srv_release_hw(const struct dc *dc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd = {0};
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
- switch (output->ips_mode) {
- case DMUB_IPS_MODE_IPS1_MAX:
- command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS2:
- command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_RCG:
- command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_ONO2_ON:
- command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
- break;
- default:
- command_code = DMUB_GPINT__INVALID_COMMAND;
- break;
- }
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW;
+ cmd.idle_opt_notify_idle.header.payload_bytes =
+ sizeof(cmd.idle_opt_notify_idle) -
+ sizeof(cmd.idle_opt_notify_idle.header);
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
- if (command_code == DMUB_GPINT__INVALID_COMMAND)
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
- // send gpint commands and wait for ack
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
- (uint16_t)(output->ips_mode),
- &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->residency_percent = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
- (uint16_t)(output->ips_mode),
- &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->entry_counter = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[1] = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[1] = 0;
-
- // NUM_IPS_HISTOGRAM_BUCKETS = 16
- for (i = 0; i < 16; i++)
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
- DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->histogram[i] = 0;
+ dmub = dc_dmub_srv->dmub;
+
+ if (dmub_srv_get_preos_info(dmub)) {
+ DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__);
+ DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version);
+ DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options);
+ DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status);
+ DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr);
+ DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size);
+ DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base);
+ DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 10b48198b7a6..72e0a41f39f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -56,9 +56,10 @@ struct dc_dmub_srv {
union dmub_shared_state_ips_driver_signals driver_signals;
bool idle_allowed;
bool needs_idle_wake;
+ bool cursor_offload_enabled;
};
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
@@ -94,7 +95,7 @@ void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
-bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
+bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
@@ -210,6 +211,94 @@ void dc_dmub_srv_fams2_passthrough_flip(
struct dc_surface_update *srf_updates,
int surface_count);
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dmub_lsdma_send_linear_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count);
+
+struct lsdma_linear_sub_window_copy_params {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t padding : 22;
+};
+
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+);
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable);
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data);
+
+struct lsdma_send_tiled_to_tiled_copy_command_params {
+ uint64_t src_addr;
+ uint64_t dst_addr;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t dst_width : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_height : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t data_format : 6;
+ uint32_t swizzle_mode : 5;
+ uint32_t element_size : 3;
+ uint32_t dcc : 1;
+ uint32_t tmz : 1;
+ uint32_t read_compress : 2;
+ uint32_t write_compress : 2;
+ uint32_t max_com : 2;
+ uint32_t max_uncom : 1;
+ uint32_t padding : 9;
+};
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params);
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data);
+
/**
* struct ips_residency_info - struct containing info from dmub_ips_residency_stats
*
@@ -223,7 +312,7 @@ void dc_dmub_srv_fams2_passthrough_flip(
* @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c
*/
struct ips_residency_info {
- enum dmub_ips_mode ips_mode;
+ enum ips_residency_mode ips_mode;
unsigned int residency_percent;
unsigned int entry_counter;
unsigned int total_active_time_us[2];
@@ -231,21 +320,58 @@ struct ips_residency_info {
unsigned int histogram[16];
};
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement);
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst,
+ struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode);
+
/**
- * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status
+ * dc_dmub_srv_cursor_offload_init() - Enables or disables cursor offloading for a stream.
*
- * @dc_dmub_srv: The DC DMUB service pointer
- * @start_measurement: Describes whether to start or stop measurement
+ * @dc: pointer to DC object
+ */
+void dc_dmub_srv_cursor_offload_init(struct dc *dc);
+
+/**
+ * dc_dmub_srv_control_cursor_offload() - Enables or disables cursor offloading for a stream.
+ *
+ * @dc: pointer to DC object
+ * @context: the DC context to reference for pipe allocations
+ * @stream: the stream to control
+ * @enable: true to enable cursor offload, false to disable
+ */
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable);
+
+/**
+ * dc_dmub_srv_program_cursor_now() - Requests immediate cursor programming for a given pipe.
+ *
+ * @dc: pointer to DC object
+ * @pipe: top-most pipe for a stream.
+ */
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe);
+
+/**
+ * dc_dmub_srv_is_cursor_offload_enabled() - Checks if cursor offload is supported.
+ *
+ * @dc: pointer to DC object
+ *
+ * Return: true if cursor offload is supported, false otherwise
+ */
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc);
+
+/**
+ * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
*
- * Return: true if GPINT was sent successfully, false otherwise
+ * @dc - pointer to DC object
*/
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement);
+void dc_dmub_srv_release_hw(const struct dc *dc);
/**
- * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info
+ * dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info.
*
- * @dc_dmub_srv: The DC DMUB service pointer
- * @output: Output struct to copy the the residency info to
+ * @dc - pointer to DC object
*/
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output);
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 8dd6eb044829..79e1696def63 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -159,6 +159,16 @@ struct dc_link_settings {
uint8_t link_rate_set;
};
+struct dc_tunnel_settings {
+ bool should_enable_dp_tunneling;
+ bool should_use_dp_bw_allocation;
+ uint8_t cm_id;
+ uint8_t group_id;
+ uint32_t bw_granularity;
+ uint32_t estimated_bw;
+ uint32_t allocated_bw;
+};
+
union dc_dp_ffe_preset {
struct {
uint8_t level : 4;
@@ -300,6 +310,19 @@ union lane_align_status_updated {
uint8_t raw;
};
+union link_service_irq_vector_esi0 {
+ struct {
+ uint8_t DP_LINK_RX_CAP_CHANGED:1;
+ uint8_t DP_LINK_STATUS_CHANGED:1;
+ uint8_t DP_LINK_STREAM_STATUS_CHANGED:1;
+ uint8_t DP_LINK_HDMI_LINK_STATUS_CHANGED:1;
+ uint8_t DP_LINK_CONNECTED_OFF_ENTRY_REQUESTED:1;
+ uint8_t DP_LINK_TUNNELING_IRQ:1;
+ uint8_t reserved:2;
+ } bits;
+ uint8_t raw;
+};
+
union lane_adjust {
struct {
uint8_t VOLTAGE_SWING_LANE:2;
@@ -410,14 +433,6 @@ union dwnstream_port_caps_byte3_hdmi {
uint8_t raw;
};
-union hdmi_sink_encoded_link_bw_support {
- struct {
- uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3;
- uint8_t RESERVED:5;
- } bits;
- uint8_t raw;
-};
-
union hdmi_encoded_link_bw {
struct {
uint8_t FRL_MODE:1; // Bit 0
@@ -427,7 +442,28 @@ union hdmi_encoded_link_bw {
uint8_t BW_32Gbps:1;
uint8_t BW_40Gbps:1;
uint8_t BW_48Gbps:1;
- uint8_t RESERVED:1; // Bit 7
+ uint8_t FRL_LINK_TRAINING_FINISHED:1; // Bit 7
+ } bits;
+ uint8_t raw;
+};
+
+union hdmi_tx_link_status {
+ struct {
+ uint8_t HDMI_TX_LINK_ACTIVE_STATUS:1;
+ uint8_t HDMI_TX_READY_STATUS:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+union autonomous_mode_and_frl_link_status {
+ struct {
+ uint8_t FRL_LT_IN_PROGRESS_STATUS:1;
+ uint8_t FRL_LT_LINK_CONFIG_IN_PROGRESS:3;
+ uint8_t RESERVED:1;
+ uint8_t FALLBACK_POLICY:1;
+ uint8_t FALLBACK_POLICY_VALID:1;
+ uint8_t REGULATED_AUTONOMOUS_MODE_SUPPORTED:1;
} bits;
uint8_t raw;
};
@@ -470,8 +506,10 @@ union sink_status {
uint8_t raw;
};
-/*6-byte structure corresponding to 6 registers (200h-205h)
-read during handling of HPD-IRQ*/
+/* 7-byte structure corresponding to 6 registers (200h-205h)
+ * and LINK_SERVICE_IRQ_ESI0 (2005h) for tunneling IRQ
+ * read during handling of HPD-IRQ
+ */
union hpd_irq_data {
struct {
union sink_count sink_cnt;/* 200h */
@@ -479,9 +517,10 @@ union hpd_irq_data {
union lane_status lane01_status;/* 202h */
union lane_status lane23_status;/* 203h */
union lane_align_status_updated lane_status_updated;/* 204h */
- union sink_status sink_status;
+ union sink_status sink_status;/* 205h */
+ union link_service_irq_vector_esi0 link_service_irq_esi0;/* 2005h */
} bytes;
- uint8_t raw[6];
+ uint8_t raw[7];
};
union down_stream_port_count {
@@ -914,10 +953,30 @@ union dpia_info {
uint8_t raw;
};
+/* DPCD[0xE0020] USB4_DRIVER_BW_CAPABILITY register. */
+union usb4_driver_bw_cap {
+ struct {
+ uint8_t rsvd :7;
+ uint8_t driver_bw_alloc_support :1;
+ } bits;
+ uint8_t raw;
+};
+
+/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */
+union dpia_tunnel_info {
+ struct {
+ uint8_t group_id :3;
+ uint8_t rsvd :5;
+ } bits;
+ uint8_t raw;
+};
+
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
+ union usb4_driver_bw_cap driver_bw_cap;
+ union dpia_tunnel_info dpia_tunnel_info;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
@@ -959,6 +1018,15 @@ union dp_128b_132b_supported_lttpr_link_rates {
uint8_t raw;
};
+union dp_alpm_lttpr_cap {
+ struct {
+ uint8_t AUX_LESS_ALPM_SUPPORTED :1;
+ uint8_t ASSR_SUPPORTED :1;
+ uint8_t RESERVED :6;
+ } bits;
+ uint8_t raw;
+};
+
union dp_sink_video_fallback_formats {
struct {
uint8_t dp_1024x768_60Hz_24bpp_support :1;
@@ -969,6 +1037,21 @@ union dp_sink_video_fallback_formats {
uint8_t raw;
};
+union dp_receive_port0_cap {
+ struct {
+ uint8_t RESERVED :1;
+ uint8_t LOCAL_EDID_PRESENT :1;
+ uint8_t ASSOCIATED_TO_PRECEDING_PORT:1;
+ uint8_t HBLANK_EXPANSION_CAPABLE :1;
+ uint8_t BUFFER_SIZE_UNIT :1;
+ uint8_t BUFFER_SIZE_PER_PORT :1;
+ uint8_t HBLANK_REDUCTION_CAPABLE :1;
+ uint8_t RESERVED2:1;
+ uint8_t BUFFER_SIZE:8;
+ } bits;
+ uint8_t raw[2];
+};
+
union dpcd_max_uncompressed_pixel_rate_cap {
struct {
uint16_t max_uncompressed_pixel_rate_cap :15;
@@ -1037,10 +1120,11 @@ union dp_128b_132b_training_aux_rd_interval {
union edp_alpm_caps {
struct {
- uint8_t AUX_WAKE_ALPM_CAP :1;
- uint8_t PM_STATE_2A_SUPPORT :1;
- uint8_t AUX_LESS_ALPM_CAP :1;
- uint8_t RESERVED :5;
+ uint8_t AUX_WAKE_ALPM_CAP :1;
+ uint8_t PM_STATE_2A_SUPPORT :1;
+ uint8_t AUX_LESS_ALPM_CAP :1;
+ uint8_t AUX_LESS_ALPM_ML_PHY_SLEEP_STATUS_SUPPORTED :1;
+ uint8_t RESERVED :4;
} bits;
uint8_t raw;
};
@@ -1073,6 +1157,16 @@ struct dprx_states {
bool cable_id_written;
};
+union dpcd_panel_replay_capability_supported {
+ struct {
+ unsigned char PANEL_REPLAY_SUPPORT :1;
+ unsigned char SELECTIVE_UPDATE_SUPPORT :1;
+ unsigned char EARLY_TRANSPORT_SUPPORT :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
enum dpcd_downstream_port_max_bpc {
DOWN_STREAM_MAX_8BPC = 0,
DOWN_STREAM_MAX_10BPC,
@@ -1103,7 +1197,10 @@ struct dc_lttpr_caps {
uint8_t max_ext_timeout;
union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
+ union dp_alpm_lttpr_cap alpm;
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+ uint8_t lttpr_ieee_oui[3]; // Always read from closest LTTPR to host
+ uint8_t lttpr_device_id[6]; // Always read from closest LTTPR to host
};
struct dc_dongle_dfp_cap_ext {
@@ -1132,6 +1229,7 @@ struct dc_dongle_caps {
uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk_in_khz;
uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
+ uint32_t dp_hdmi_regulated_autonomous_mode_support;
struct dc_dongle_dfp_cap_ext dfp_cap_ext;
};
@@ -1192,7 +1290,12 @@ struct dpcd_caps {
struct edp_psr_info psr_info;
struct replay_info pr_info;
+ union dpcd_panel_replay_capability_supported pr_caps_supported;
uint16_t edp_oled_emission_rate;
+ union dp_receive_port0_cap receive_port0_cap;
+ /* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
+ uint8_t mso_cap_sst_links_supported;
+ uint8_t dp_edp_general_cap_2;
};
union dpcd_sink_ext_caps {
@@ -1254,11 +1357,38 @@ union dpcd_replay_configuration {
unsigned char raw;
};
+union panel_replay_enable_and_configuration_1 {
+ struct {
+ unsigned char PANEL_REPLAY_ENABLE :1;
+ unsigned char PANEL_REPLAY_CRC_ENABLE :1;
+ unsigned char IRQ_HPD_ASSDP_MISSING :1;
+ unsigned char IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR :1;
+ unsigned char IRQ_HPD_RFB_ERROR :1;
+ unsigned char IRQ_HPD_ACTIVE_FRAME_CRC_ERROR :1;
+ unsigned char PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE :1;
+ unsigned char PANEL_REPLAY_EARLY_TRANSPORT_ENABLE :1;
+ } bits;
+ unsigned char raw;
+};
+
+union panel_replay_enable_and_configuration_2 {
+ struct {
+ unsigned char SINK_REFRESH_RATE_UNLOCK_GRANTED :1;
+ unsigned char RESERVED :1;
+ unsigned char SU_Y_GRANULARITY_EXT_VALUE_ENABLED :1;
+ unsigned char SU_Y_GRANULARITY_EXT_VALUE :4;
+ unsigned char SU_REGION_SCAN_LINE_CAPTURE_INDICATION :1;
+ } bits;
+ unsigned char raw;
+};
+
union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
+ unsigned char ALPM_MODE_SEL : 1;
+ unsigned char ACDS_PERIOD_DURATION : 1;
+ unsigned char RESERVED : 4;
} bits;
unsigned char raw;
};
@@ -1354,6 +1484,12 @@ struct dp_trace {
#ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP
#define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP 0x221c
#endif
+#ifndef DP_LTTPR_ALPM_CAPABILITIES
+#define DP_LTTPR_ALPM_CAPABILITIES 0xF0009
+#endif
+#ifndef DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS
+#define DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS 0x303C
+#endif
#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#endif
@@ -1363,6 +1499,12 @@ struct dp_trace {
#ifndef DP_BRANCH_VENDOR_SPECIFIC_START
#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
#endif
+#ifndef DP_LTTPR_IEEE_OUI
+#define DP_LTTPR_IEEE_OUI 0xF003D
+#endif
+#ifndef DP_LTTPR_DEVICE_ID
+#define DP_LTTPR_DEVICE_ID 0xF0040
+#endif
/** USB4 DPCD BW Allocation Registers Chapter 10.7 **/
#ifndef DP_TUNNELING_CAPABILITIES
#define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */
@@ -1400,4 +1542,26 @@ struct dp_trace {
#ifndef REQUESTED_BW
#define REQUESTED_BW 0xE0031 /* 1.4a */
#endif
+# ifndef DP_TUNNELING_BW_ALLOC_BITS_MASK
+# define DP_TUNNELING_BW_ALLOC_BITS_MASK (0x0F << 0)
+# endif
+# ifndef DP_TUNNELING_BW_REQUEST_FAILED
+# define DP_TUNNELING_BW_REQUEST_FAILED (1 << 0)
+# endif
+# ifndef DP_TUNNELING_BW_REQUEST_SUCCEEDED
+# define DP_TUNNELING_BW_REQUEST_SUCCEEDED (1 << 1)
+# endif
+# ifndef DP_TUNNELING_ESTIMATED_BW_CHANGED
+# define DP_TUNNELING_ESTIMATED_BW_CHANGED (1 << 2)
+# endif
+# ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED
+# define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3)
+# endif
+# ifndef DPTX_BW_ALLOC_UNMASK_IRQ
+# define DPTX_BW_ALLOC_UNMASK_IRQ (1 << 6)
+# endif
+# ifndef DPTX_BW_ALLOC_MODE_ENABLE
+# define DPTX_BW_ALLOC_MODE_ENABLE (1 << 7)
+# endif
+
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 9014c2409817..9d18f1c08079 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -94,6 +94,11 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
const int num_slices_h,
const bool is_dp);
+void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
+ const struct dsc_dec_dpcd_caps *dsc_sink_caps);
+void dc_dsc_dump_encoder_caps(const struct display_stream_compressor *dsc,
+ const struct dc_crtc_timing *timing);
+
/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM,
* and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
* Hardware/specs limitation should not be writable by DM.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.c b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
new file mode 100644
index 000000000000..fee69642fb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dc_fused_io.h"
+
+#include "dm_helpers.h"
+#include "gpio.h"
+
+static bool op_i2c_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_i2c *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line,
+ bool over_aux
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_i2c *loc = &req->u.i2c;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = false;
+ loc->ddc_line = ddc_line;
+ loc->over_aux = over_aux;
+ loc->address = op->address;
+ loc->offset = op->offset;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool op_aux_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_aux *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_aux *loc = &req->u.aux;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = true;
+ loc->ddc_line = ddc_line;
+ loc->address = op->address;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool atomic_write_poll_read(
+ struct dc_link *link,
+ union dmub_rb_cmd commands[3],
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ const uint8_t count = 3;
+ const uint32_t timeout_per_request_us = 10000;
+ const uint32_t timeout_per_aux_transaction_us = 10000;
+ uint64_t timeout_us = 0;
+
+ commands[1].fused_io.request.poll_mask_msb = poll_mask_msb;
+ commands[1].fused_io.request.timeout_us = poll_timeout_us;
+
+ for (uint8_t i = 0; i < count; i++) {
+ struct dmub_rb_cmd_fused_io *io = &commands[i].fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_EXECUTE;
+ io->header.multi_cmd_pending = i != count - 1;
+ io->header.payload_bytes = sizeof(commands[i].fused_io) - sizeof(io->header);
+
+ timeout_us += timeout_per_request_us + io->request.timeout_us;
+ if (!io->request.timeout_us && io->request.u.aux.is_aux)
+ timeout_us += timeout_per_aux_transaction_us * (io->request.u.aux.length / 16);
+ }
+
+ if (!dm_helpers_execute_fused_io(link->ctx, link, commands, count, timeout_us))
+ return false;
+
+ return commands[0].fused_io.request.status == FUSED_REQUEST_STATUS_SUCCESS;
+}
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const bool over_aux = false;
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_i2c_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line, over_aux)
+ && op_i2c_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line, over_aux)
+ && op_i2c_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line, over_aux);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_aux_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line)
+ && op_aux_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line)
+ && op_aux_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.h b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
new file mode 100644
index 000000000000..c74917240985
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DC_FUSED_IO_H__
+#define __DC_FUSED_IO_H__
+
+#include "dc.h"
+#include "mod_hdcp.h"
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+#endif // __DC_FUSED_IO_H__
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index b402be59b2c8..5a365bd19933 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -682,13 +682,19 @@ void reg_sequence_wait_done(const struct dc_context *ctx)
if (offload &&
ctx->dc->debug.dmub_offload_enabled &&
!ctx->dc->debug.dmcub_emulation) {
- dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
}
}
char *dce_version_to_string(const int version)
{
switch (version) {
+ case DCE_VERSION_6_0:
+ return "DCE 6.0";
+ case DCE_VERSION_6_1:
+ return "DCE 6.1";
+ case DCE_VERSION_6_4:
+ return "DCE 6.4";
case DCE_VERSION_8_0:
return "DCE 8.0";
case DCE_VERSION_8_1:
@@ -726,7 +732,7 @@ char *dce_version_to_string(const int version)
case DCN_VERSION_3_03:
return "DCN 3.0.3";
case DCN_VERSION_3_1:
- return "DCN 3.1";
+ return "DCN 3.1.2";
case DCN_VERSION_3_14:
return "DCN 3.1.4";
case DCN_VERSION_3_15:
@@ -741,9 +747,16 @@ char *dce_version_to_string(const int version)
return "DCN 3.5";
case DCN_VERSION_3_51:
return "DCN 3.5.1";
+ case DCN_VERSION_3_6:
+ return "DCN 3.6";
case DCN_VERSION_4_01:
return "DCN 4.0.1";
default:
return "Unknown";
}
}
+
+bool dc_supports_vrr(const enum dce_version v)
+{
+ return v >= DCE_VERSION_8_0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index c10567ec1c81..667852517246 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -68,7 +68,7 @@ enum dc_plane_addr_type {
struct dc_plane_address {
enum dc_plane_addr_type type;
- bool tmz_surface;
+ uint8_t tmz_surface;
union {
struct{
PHYSICAL_ADDRESS_LOC addr;
@@ -341,89 +341,101 @@ enum swizzle_mode_addr3_values {
DC_ADDR3_SW_UNKNOWN = DC_ADDR3_SW_MAX
};
-union dc_tiling_info {
+enum dc_gfxversion {
+ DcGfxVersion7 = 0,
+ DcGfxVersion8,
+ DcGfxVersion9,
+ DcGfxVersion10,
+ DcGfxVersion11,
+ DcGfxAddr3,
+ DcGfxVersionUnknown
+};
- struct {
- /* Specifies the number of memory banks for tiling
- * purposes.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 2,4,8,16
- */
- unsigned int num_banks;
- /* Specifies the number of tiles in the x direction
- * to be incorporated into the same bank.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 1,2,4,8
- */
- unsigned int bank_width;
- unsigned int bank_width_c;
- /* Specifies the number of tiles in the y direction to
- * be incorporated into the same bank.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 1,2,4,8
- */
- unsigned int bank_height;
- unsigned int bank_height_c;
- /* Specifies the macro tile aspect ratio. Only applies
- * to 2D and 3D tiling modes.
- */
- unsigned int tile_aspect;
- unsigned int tile_aspect_c;
- /* Specifies the number of bytes that will be stored
- * contiguously for each tile.
- * If the tile data requires more storage than this
- * amount, it is split into multiple slices.
- * This field must not be larger than
- * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
- * Only applies to 2D and 3D tiling modes.
- * For color render targets, TILE_SPLIT >= 256B.
- */
- enum tile_split_values tile_split;
- enum tile_split_values tile_split_c;
- /* Specifies the addressing within a tile.
- * 0x0 - DISPLAY_MICRO_TILING
- * 0x1 - THIN_MICRO_TILING
- * 0x2 - DEPTH_MICRO_TILING
- * 0x3 - ROTATED_MICRO_TILING
- */
- enum tile_mode_values tile_mode;
- enum tile_mode_values tile_mode_c;
- /* Specifies the number of pipes and how they are
- * interleaved in the surface.
- * Refer to memory addressing document for complete
- * details and constraints.
- */
- unsigned int pipe_config;
- /* Specifies the tiling mode of the surface.
- * THIN tiles use an 8x8x1 tile size.
- * THICK tiles use an 8x8x4 tile size.
- * 2D tiling modes rotate banks for successive Z slices
- * 3D tiling modes rotate pipes and banks for Z slices
- * Refer to memory addressing document for complete
- * details and constraints.
- */
- enum array_mode_values array_mode;
- } gfx8;
+ struct dc_tiling_info {
+ unsigned int gfxversion; // Specifies which part of the union to use. Must use DalGfxVersion enum
+ union {
+ struct {
+ /* Specifies the number of memory banks for tiling
+ * purposes.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 2,4,8,16
+ */
+ unsigned int num_banks;
+ /* Specifies the number of tiles in the x direction
+ * to be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_width;
+ unsigned int bank_width_c;
+ /* Specifies the number of tiles in the y direction to
+ * be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_height;
+ unsigned int bank_height_c;
+ /* Specifies the macro tile aspect ratio. Only applies
+ * to 2D and 3D tiling modes.
+ */
+ unsigned int tile_aspect;
+ unsigned int tile_aspect_c;
+ /* Specifies the number of bytes that will be stored
+ * contiguously for each tile.
+ * If the tile data requires more storage than this
+ * amount, it is split into multiple slices.
+ * This field must not be larger than
+ * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+ * Only applies to 2D and 3D tiling modes.
+ * For color render targets, TILE_SPLIT >= 256B.
+ */
+ enum tile_split_values tile_split;
+ enum tile_split_values tile_split_c;
+ /* Specifies the addressing within a tile.
+ * 0x0 - DISPLAY_MICRO_TILING
+ * 0x1 - THIN_MICRO_TILING
+ * 0x2 - DEPTH_MICRO_TILING
+ * 0x3 - ROTATED_MICRO_TILING
+ */
+ enum tile_mode_values tile_mode;
+ enum tile_mode_values tile_mode_c;
+ /* Specifies the number of pipes and how they are
+ * interleaved in the surface.
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ unsigned int pipe_config;
+ /* Specifies the tiling mode of the surface.
+ * THIN tiles use an 8x8x1 tile size.
+ * THICK tiles use an 8x8x4 tile size.
+ * 2D tiling modes rotate banks for successive Z slices
+ * 3D tiling modes rotate pipes and banks for Z slices
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ enum array_mode_values array_mode;
+ } gfx8;
- struct {
- enum swizzle_mode_values swizzle;
- unsigned int num_pipes;
- unsigned int max_compressed_frags;
- unsigned int pipe_interleave;
-
- unsigned int num_banks;
- unsigned int num_shader_engines;
- unsigned int num_rb_per_se;
- bool shaderEnable;
-
- bool meta_linear;
- bool rb_aligned;
- bool pipe_aligned;
- unsigned int num_pkrs;
- } gfx9;/*gfx9, gfx10 and above*/
- struct {
- enum swizzle_mode_addr3_values swizzle;
- } gfx_addr3;/*gfx with addr3 and above*/
+ struct {
+ enum swizzle_mode_values swizzle;
+ unsigned int num_pipes;
+ unsigned int max_compressed_frags;
+ unsigned int pipe_interleave;
+
+ unsigned int num_banks;
+ unsigned int num_shader_engines;
+ unsigned int num_rb_per_se;
+ bool shaderEnable;
+
+ bool meta_linear;
+ bool rb_aligned;
+ bool pipe_aligned;
+ unsigned int num_pkrs;
+ } gfx9;/*gfx9, gfx10 and above*/
+ struct {
+ enum swizzle_mode_addr3_values swizzle;
+ } gfx_addr3;/*gfx with addr3 and above*/
+ };
};
/* Rotation angle */
@@ -641,7 +653,8 @@ enum dc_color_space {
COLOR_SPACE_YCBCR709_LIMITED,
COLOR_SPACE_2020_RGB_FULLRANGE,
COLOR_SPACE_2020_RGB_LIMITEDRANGE,
- COLOR_SPACE_2020_YCBCR,
+ COLOR_SPACE_2020_YCBCR_LIMITED,
+ COLOR_SPACE_2020_YCBCR_FULL,
COLOR_SPACE_ADOBERGB,
COLOR_SPACE_DCIP3,
COLOR_SPACE_DISPLAYNATIVE,
@@ -649,6 +662,7 @@ enum dc_color_space {
COLOR_SPACE_APPCTRL,
COLOR_SPACE_CUSTOMPOINTS,
COLOR_SPACE_YCBCR709_BLACK,
+ COLOR_SPACE_2020_YCBCR = COLOR_SPACE_2020_YCBCR_LIMITED,
};
enum dc_dither_option {
@@ -960,6 +974,7 @@ struct dc_crtc_timing {
uint32_t pix_clk_100hz;
uint32_t min_refresh_in_uhz;
+ uint32_t max_refresh_in_uhz;
uint32_t vic;
uint32_t hdmi_vic;
@@ -975,6 +990,9 @@ struct dc_crtc_timing {
struct dc_crtc_timing_flags flags;
uint32_t dsc_fixed_bits_per_pixel_x16; /* DSC target bitrate in 1/16 of bpp (e.g. 128 -> 8bpp) */
struct dc_dsc_config dsc_cfg;
+
+ /* The number of pixels that HBlank has been expanded by from the original EDID timing. */
+ uint32_t expanded_hblank;
};
enum trigger_delay {
@@ -1000,6 +1018,7 @@ struct dc_crtc_timing_adjust {
uint32_t v_total_mid;
uint32_t v_total_mid_frame_num;
uint32_t allow_otg_v_count_halt;
+ uint8_t timing_adjust_pending;
};
@@ -1085,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select {
enum mpcc_gamut_remap_id {
MPCC_OGAM_GAMUT_REMAP,
MPCC_MCM_FIRST_GAMUT_REMAP,
- MPCC_MCM_SECOND_GAMUT_REMAP
+ MPCC_MCM_SECOND_GAMUT_REMAP,
+ MPCC_RMCM_GAMUT_REMAP,
};
enum cursor_matrix_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index fabcefeda288..14feb843e694 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -28,13 +28,24 @@
#include "dc_hw_types.h"
+union dc_plane_status_update_flags {
+ struct {
+ uint32_t address : 1;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state);
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
-void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
- bool clear_tiling);
+void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
+ bool clear_tiling);
+
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src);
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 3518eb1b8cd1..37d1a79e8241 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -3,7 +3,6 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl_translate.h"
-#include "spl/dc_spl_types.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn32/dcn32_dpp.h"
#include "dcn401/dcn401_dpp.h"
@@ -129,7 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
- stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow;
+ stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
// Make spl input basic out info output_size height point to v active
spl_in->basic_out.output_size.height =
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
@@ -148,6 +147,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->prefer_easf = false;
else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2)
spl_in->disable_easf = true;
+ else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 3)
+ spl_in->override_easf = true;
/* Translate adaptive sharpening preference */
unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness;
unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level;
@@ -157,15 +158,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->adaptive_sharpness.enable = true;
spl_in->adaptive_sharpness.sharpness_level = 0;
} else if (sharpness_setting == SHARPNESS_CUSTOM) {
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500;
+ /* SAT: read harpness_range from dc_plane_state */
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid;
if (force_sharpness_level > 0) {
if (force_sharpness_level > 10)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
index 1a12ef579ff4..1d9bae56ff6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -105,4 +105,24 @@ bool dc_state_is_fams2_in_use(
const struct dc *dc,
const struct dc_state *state);
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_is_subvp_in_use(struct dc_state *state);
+
#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 713884103aea..321cfe92d799 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -44,6 +44,8 @@ struct mall_stream_config {
*/
enum mall_stream_type type;
struct dc_stream_state *paired_stream; // master / slave stream
+ bool subvp_limit_cursor_size; /* stream has/is using subvp limiting hw cursor support */
+ bool cursor_size_limit_subvp; /* stream is using hw cursor config preventing subvp */
};
struct dc_stream_status {
@@ -56,7 +58,7 @@ struct dc_stream_status {
int plane_count;
int audio_inst;
struct timing_sync_info timing_sync_info;
- struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state *plane_states[MAX_SURFACES];
bool is_abm_supported;
struct mall_stream_config mall_stream_config;
bool fpo_in_use;
@@ -201,6 +203,7 @@ struct dc_stream_state {
struct dc_info_packet hfvsif_infopacket;
struct dc_info_packet vtem_infopacket;
struct dc_info_packet adaptive_sync_infopacket;
+ struct dc_info_packet avi_infopacket;
uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -333,6 +336,8 @@ struct dc_stream_update {
struct dc_info_packet *hfvsif_infopacket;
struct dc_info_packet *vtem_infopacket;
struct dc_info_packet *adaptive_sync_infopacket;
+ struct dc_info_packet *avi_infopacket;
+
bool *dpms_off;
bool integer_scaling_update;
bool *allow_freesync;
@@ -468,12 +473,11 @@ void dc_enable_stereo(
/* Triggers multi-stream synchronization. */
void dc_trigger_sync(struct dc *dc, struct dc_state *context);
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status);
+ struct dc_stream_update *stream_update);
/**
* Create a new default stream for the requested sink
@@ -487,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
-struct dc_stream_status *dc_stream_get_status(
- struct dc_stream_state *dc_stream);
+struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream);
+const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream);
/*******************************************************************************
* Cursor interfaces - To manages the cursor within a stream
@@ -503,6 +507,11 @@ void program_cursor_position(
struct dc *dc,
struct dc_stream_state *stream);
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
+ const struct dc_cursor_attributes *attributes);
+
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes);
@@ -528,27 +537,29 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
struct dc_stream_state *stream,
uint32_t *refresh_rate);
-bool dc_stream_get_crtc_position(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- unsigned int *v_pos,
- unsigned int *nom_v_pos);
-
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
struct rect *rect,
uint8_t phy_id,
bool is_stop);
+
+bool dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
+ struct crc_window *window,
+ uint8_t phy_id,
+ bool stop);
#endif
bool dc_stream_configure_crc(struct dc *dc,
struct dc_stream_state *stream,
struct crc_params *crc_window,
bool enable,
- bool continuous);
+ bool continuous,
+ uint8_t idx,
+ bool reset);
bool dc_stream_get_crc(struct dc *dc,
struct dc_stream_state *stream,
+ uint8_t idx,
uint32_t *r_cr,
uint32_t *g_y,
uint32_t *b_cb);
@@ -570,11 +581,16 @@ bool dc_stream_set_gamut_remap(struct dc *dc,
bool dc_stream_program_csc_matrix(struct dc *dc,
struct dc_stream_state *stream);
-bool dc_stream_get_crtc_position(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- unsigned int *v_pos,
- unsigned int *nom_v_pos);
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one);
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream);
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc);
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream);
@@ -583,4 +599,8 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
struct dc_state *context);
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream);
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream);
+
#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index edf4df1d03b5..f46039f64203 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -76,7 +76,6 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define MAX_SURFACE_NUM 6
#define NUM_PIXEL_FORMATS 10
enum tiling_mode {
@@ -176,14 +175,20 @@ struct dc_panel_patch {
unsigned int embedded_tiled_slave;
unsigned int disable_fams;
unsigned int skip_avmute;
+ unsigned int skip_audio_sab_check;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
unsigned int disable_colorimetry;
uint8_t blankstream_before_otg_off;
bool oled_optimize_display_on;
unsigned int force_mst_blocked_discovery;
+ unsigned int wait_after_dpcd_poweroff_ms;
};
+/**
+ * struct dc_edid_caps - Capabilities read from EDID.
+ * @analog: Whether the monitor is analog. Used by DVI-I handling.
+ */
struct dc_edid_caps {
/* sink identification */
uint16_t manufacturer_id;
@@ -210,6 +215,9 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
+ bool rr_capable;
+ bool scdc_present;
+ bool analog;
struct dc_panel_patch panel_patch;
};
@@ -262,6 +270,7 @@ enum dc_timing_source {
TIMING_SOURCE_EDID_4BYTE,
TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB,
TIMING_SOURCE_EDID_CEA_RID,
+ TIMING_SOURCE_EDID_DISPLAYID_TYPE5,
TIMING_SOURCE_VBIOS,
TIMING_SOURCE_CV,
TIMING_SOURCE_TV,
@@ -344,7 +353,8 @@ enum dc_connection_type {
dc_connection_none,
dc_connection_single,
dc_connection_mst_branch,
- dc_connection_sst_branch
+ dc_connection_sst_branch,
+ dc_connection_dac_load
};
struct dc_csc_adjustments {
@@ -560,6 +570,12 @@ struct dc_info_packet_128 {
uint8_t sb[128];
};
+struct dc_edid_read_policy {
+ uint32_t max_retry_count;
+ uint32_t delay_time_ms;
+ uint32_t ignore_checksum;
+};
+
#define DC_PLANE_UPDATE_TIMES_MAX 10
struct dc_plane_flip_time {
@@ -568,6 +584,12 @@ struct dc_plane_flip_time {
unsigned int prev_update_time_in_us;
};
+enum dc_alpm_mode {
+ DC_ALPM_AUXWAKE = 0,
+ DC_ALPM_AUXLESS = 1,
+ DC_ALPM_UNSUPPORTED = 0xF,
+};
+
enum dc_psr_state {
PSR_STATE0 = 0x0,
PSR_STATE1,
@@ -613,6 +635,7 @@ struct psr_config {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
union dmcu_psr_level {
@@ -725,6 +748,7 @@ struct psr_context {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
struct colorspace_transform {
@@ -875,6 +899,14 @@ struct dsc_dec_dpcd_caps {
bool is_dp; /* Decoded format */
};
+struct hblank_expansion_dpcd_caps {
+ bool expansion_supported;
+ bool reduction_supported;
+ bool buffer_unit_bytes; /* True: buffer size in bytes. False: buffer size in pixels*/
+ bool buffer_per_port; /* True: buffer size per port. False: buffer size per lane*/
+ uint32_t buffer_size; /* Add 1 to value and multiply by 32 */
+};
+
struct dc_golden_table {
uint16_t dc_golden_table_ver;
uint32_t aux_dphy_rx_control0_val;
@@ -909,6 +941,12 @@ enum dc_psr_version {
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
+enum dc_replay_version {
+ DC_FREESYNC_REPLAY = 0,
+ DC_VESA_PANEL_REPLAY = 1,
+ DC_REPLAY_VERSION_UNSUPPORTED = 0XFF,
+};
+
/* Possible values of display_endpoint_id.endpoint */
enum display_endpoint_type {
DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
@@ -932,10 +970,17 @@ enum backlight_control_type {
};
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+#define MAX_CRC_WINDOW_NUM 2
+
struct otg_phy_mux {
uint8_t phy_output_num;
uint8_t otg_output_num;
};
+
+struct crc_window {
+ struct rect rect;
+ bool enable;
+};
#endif
enum dc_detect_reason {
@@ -1019,6 +1064,13 @@ struct psr_settings {
unsigned int psr_sdp_transmit_line_num_deadline;
uint8_t force_ffu_mode;
unsigned int psr_power_opt;
+
+ /**
+ * Some panels cannot handle idle pattern during PSR entry.
+ * To power down phy before disable stream to avoid sending
+ * idle pattern.
+ */
+ uint8_t power_down_phy_before_disable_stream;
};
enum replay_coasting_vtotal_type {
@@ -1047,15 +1099,19 @@ enum replay_FW_Message_type {
Replay_Set_Residency_Frameupdate_Timer,
Replay_Set_Pseudo_VTotal,
Replay_Disabled_Adaptive_Sync_SDP,
+ Replay_Set_Version,
Replay_Set_General_Cmd,
};
union replay_error_status {
struct {
- unsigned char STATE_TRANSITION_ERROR :1;
- unsigned char LINK_CRC_ERROR :1;
- unsigned char DESYNC_ERROR :1;
- unsigned char RESERVED :5;
+ unsigned int STATE_TRANSITION_ERROR :1;
+ unsigned int LINK_CRC_ERROR :1;
+ unsigned int DESYNC_ERROR :1;
+ unsigned int RESERVED_3 :1;
+ unsigned int LOW_RR_INCORRECT_VTOTAL :1;
+ unsigned int NO_DOUBLED_RR :1;
+ unsigned int RESERVED_6_7 :2;
} bits;
unsigned char raw;
};
@@ -1064,7 +1120,8 @@ union replay_low_refresh_rate_enable_options {
struct {
//BIT[0-3]: Replay Low Hz Support control
unsigned int ENABLE_LOW_RR_SUPPORT :1;
- unsigned int RESERVED_1_3 :3;
+ unsigned int SKIP_ASIC_CHECK :1;
+ unsigned int RESERVED_2_3 :2;
//BIT[4-15]: Replay Low Hz Enable Scenarios
unsigned int ENABLE_STATIC_SCREEN :1;
unsigned int ENABLE_FULL_SCREEN_VIDEO :1;
@@ -1078,6 +1135,8 @@ union replay_low_refresh_rate_enable_options {
};
struct replay_config {
+ /* Replay version */
+ enum dc_replay_version replay_version;
/* Replay feature is supported */
bool replay_supported;
/* Replay caps support DPCD & EDID caps*/
@@ -1102,6 +1161,16 @@ struct replay_config {
union replay_error_status replay_error_status;
/* Replay Low Hz enable Options */
union replay_low_refresh_rate_enable_options low_rr_enable_options;
+ /* Replay coasting vtotal is within low refresh rate range. */
+ bool low_rr_activated;
+ /* Replay low refresh rate supported*/
+ bool low_rr_supported;
+ /* Replay Video Conferencing Optimization Enabled */
+ bool replay_video_conferencing_optimization_enabled;
+ /* Replay alpm mode */
+ enum dc_alpm_mode alpm_mode;
+ /* Replay full screen only */
+ bool os_request_force_ffu;
};
/* Replay feature flags*/
@@ -1124,12 +1193,20 @@ struct replay_settings {
uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Defer Update Coasting vtotal table */
uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ /* Skip frame number table */
+ uint32_t frame_skip_number_table[PR_COASTING_TYPE_NUM];
+ /* Defer skip frame number table */
+ uint32_t defer_frame_skip_number_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
uint32_t link_off_frame_count;
- /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
- uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+ /* Replay pseudo vtotal for low refresh rate*/
+ uint16_t low_rr_full_screen_video_pseudo_vtotal;
/* Replay last pseudo vtotal set to DMUB */
uint16_t last_pseudo_vtotal;
+ /* Replay desync error */
+ uint32_t replay_desync_error_fail_count;
+ /* The frame skip number dal send to DMUB */
+ uint16_t frame_skip_number;
};
/* To split out "global" and "per-panel" config settings.
@@ -1162,6 +1239,7 @@ struct dc_panel_config {
bool rc_disable;
bool rc_allow_static_screen;
bool rc_allow_fullscreen_VPB;
+ bool read_psrcap_again;
unsigned int replay_enable_option;
} psr;
/* ABM */
@@ -1195,7 +1273,6 @@ struct dc_dpia_bw_alloc {
int bw_granularity; // BW Granularity
int dp_overhead; // DP overhead in dp tunneling
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
- bool response_ready; // Response ready from the CM side
uint8_t nrd_max_lane_count; // Non-reduced max lane count
uint8_t nrd_max_link_rate; // Non-reduced max link rate
};
@@ -1242,7 +1319,7 @@ struct dc_cm2_gpu_mem_format_parameters {
enum dc_cm2_gpu_mem_size {
DC_CM2_GPU_MEM_SIZE_171717,
- DC_CM2_GPU_MEM_SIZE_TRANSFORMED
+ DC_CM2_GPU_MEM_SIZE_TRANSFORMED,
};
struct dc_cm2_gpu_mem_parameters {
@@ -1251,6 +1328,7 @@ struct dc_cm2_gpu_mem_parameters {
struct dc_cm2_gpu_mem_format_parameters format_params;
enum dc_cm2_gpu_mem_pixel_component_order component_order;
enum dc_cm2_gpu_mem_size size;
+ uint16_t bit_depth;
};
enum dc_cm2_transfer_func_source {
@@ -1274,6 +1352,11 @@ struct dc_cm2_func_luts {
const struct dc_3dlut *lut3d_func;
struct dc_cm2_gpu_mem_parameters gpu_mem_params;
};
+ bool rmcm_3dlut_shaper_select;
+ bool mpc_3dlut_enable;
+ bool rmcm_3dlut_enable;
+ bool mpc_mcm_post_blend;
+ uint8_t rmcm_tmz;
} lut3d_data;
const struct dc_transfer_func *lut1d_func;
};
@@ -1331,4 +1414,19 @@ struct set_backlight_level_params {
uint8_t aux_inst;
};
+enum dc_validate_mode {
+ /* validate the mode and program HW */
+ DC_VALIDATE_MODE_AND_PROGRAMMING = 0,
+ /* only validate the mode */
+ DC_VALIDATE_MODE_ONLY = 1,
+ /* validate the mode and get the max state (voltage level) */
+ DC_VALIDATE_MODE_AND_STATE_INDEX = 2,
+};
+
+struct dc_validation_dpia_set {
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *tunnel_settings;
+ uint32_t required_bw;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
index 5999b2da3a01..33d8bd91cb01 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
@@ -148,7 +148,7 @@ struct dccg *dccg2_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
- struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index 160c299419b7..8bdffd9ff31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -379,53 +379,117 @@ struct dccg_mask {
DCCG401_REG_FIELD_LIST(uint32_t)
};
+#define DCCG_REG_VARIABLE_LIST \
+ uint32_t DPPCLK_DTO_CTRL; \
+ uint32_t DPPCLK_DTO_PARAM[6]; \
+ uint32_t REFCLK_CNTL; \
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL; \
+ uint32_t OTG_PIXEL_RATE_CNTL[MAX_PIPES]; \
+ uint32_t HDMICHARCLK_CLOCK_CNTL[6]; \
+ uint32_t PHYASYMCLK_CLOCK_CNTL; \
+ uint32_t PHYBSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYCSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYDSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYESYMCLK_CLOCK_CNTL; \
+ uint32_t DTBCLK_DTO_MODULO[MAX_PIPES]; \
+ uint32_t DTBCLK_DTO_PHASE[MAX_PIPES]; \
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_MODULO; \
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_PHASE; \
+ uint32_t DCCG_AUDIO_DTO_SOURCE; \
+ uint32_t DPSTREAMCLK_CNTL; \
+ uint32_t HDMISTREAMCLK_CNTL; \
+ uint32_t SYMCLK32_SE_CNTL; \
+ uint32_t SYMCLK32_LE_CNTL; \
+ uint32_t DENTIST_DISPCLK_CNTL; \
+ uint32_t DSCCLK_DTO_CTRL; \
+ uint32_t DSCCLK0_DTO_PARAM; \
+ uint32_t DSCCLK1_DTO_PARAM; \
+ uint32_t DSCCLK2_DTO_PARAM; \
+ uint32_t DSCCLK3_DTO_PARAM; \
+ uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE; \
+ uint32_t DPSTREAMCLK_GATE_DISABLE; \
+ uint32_t DCCG_GATE_DISABLE_CNTL; \
+ uint32_t DCCG_GATE_DISABLE_CNTL2; \
+ uint32_t DCCG_GATE_DISABLE_CNTL3; \
+ uint32_t HDMISTREAMCLK0_DTO_PARAM; \
+ uint32_t DCCG_GATE_DISABLE_CNTL4; \
+ uint32_t OTG_PIXEL_RATE_DIV; \
+ uint32_t DTBCLK_P_CNTL; \
+ uint32_t DPPCLK_CTRL; \
+ uint32_t DCCG_GATE_DISABLE_CNTL5; \
+ uint32_t DCCG_GATE_DISABLE_CNTL6; \
+ uint32_t DCCG_GLOBAL_FGCG_REP_CNTL; \
+ uint32_t SYMCLKA_CLOCK_ENABLE; \
+ uint32_t SYMCLKB_CLOCK_ENABLE; \
+ uint32_t SYMCLKC_CLOCK_ENABLE; \
+ uint32_t SYMCLKD_CLOCK_ENABLE; \
+ uint32_t SYMCLKE_CLOCK_ENABLE; \
+ uint32_t DP_DTO_MODULO[MAX_PIPES]; \
+ uint32_t DP_DTO_PHASE[MAX_PIPES]; \
+ uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \
+ uint32_t DCCG_AUDIO_DTO0_MODULE; \
+ uint32_t DCCG_AUDIO_DTO0_PHASE; \
+ uint32_t DCCG_AUDIO_DTO1_MODULE; \
+ uint32_t DCCG_AUDIO_DTO1_PHASE; \
+ uint32_t DCCG_CAC_STATUS; \
+ uint32_t DCCG_CAC_STATUS2; \
+ uint32_t DCCG_DISP_CNTL_REG; \
+ uint32_t DCCG_DS_CNTL; \
+ uint32_t DCCG_DS_DTO_INCR; \
+ uint32_t DCCG_DS_DTO_MODULO; \
+ uint32_t DCCG_DS_HW_CAL_INTERVAL; \
+ uint32_t DCCG_GTC_CNTL; \
+ uint32_t DCCG_GTC_CURRENT; \
+ uint32_t DCCG_GTC_DTO_INCR; \
+ uint32_t DCCG_GTC_DTO_MODULO; \
+ uint32_t DCCG_PERFMON_CNTL; \
+ uint32_t DCCG_PERFMON_CNTL2; \
+ uint32_t DCCG_SOFT_RESET; \
+ uint32_t DCCG_TEST_CLK_SEL; \
+ uint32_t DCCG_VSYNC_CNT_CTRL; \
+ uint32_t DCCG_VSYNC_CNT_INT_CTRL; \
+ uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \
+ uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DP_DTO_DBUF_EN; \
+ uint32_t DPIACLK_540M_DTO_MODULO; \
+ uint32_t DPIACLK_540M_DTO_PHASE; \
+ uint32_t DPIACLK_810M_DTO_MODULO; \
+ uint32_t DPIACLK_810M_DTO_PHASE; \
+ uint32_t DPIACLK_DTO_CNTL; \
+ uint32_t DPIASYMCLK_CNTL; \
+ uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CNTL; \
+ uint32_t DTBCLK_DTO_DBUF_EN; \
+ uint32_t FORCE_SYMCLK_DISABLE; \
+ uint32_t HDMICHARCLK0_CLOCK_CNTL; \
+ uint32_t MICROSECOND_TIME_BASE_DIV; \
+ uint32_t MILLISECOND_TIME_BASE_DIV; \
+ uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG0_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PIXEL_RATE_CNTL; \
+ uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \
+ uint32_t REFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_PSP_CNTL
+
struct dccg_registers {
- uint32_t DPPCLK_DTO_CTRL;
- uint32_t DPPCLK_DTO_PARAM[6];
- uint32_t REFCLK_CNTL;
- uint32_t DISPCLK_FREQ_CHANGE_CNTL;
- uint32_t OTG_PIXEL_RATE_CNTL[MAX_PIPES];
- uint32_t HDMICHARCLK_CLOCK_CNTL[6];
- uint32_t PHYASYMCLK_CLOCK_CNTL;
- uint32_t PHYBSYMCLK_CLOCK_CNTL;
- uint32_t PHYCSYMCLK_CLOCK_CNTL;
- uint32_t PHYDSYMCLK_CLOCK_CNTL;
- uint32_t PHYESYMCLK_CLOCK_CNTL;
- uint32_t DTBCLK_DTO_MODULO[MAX_PIPES];
- uint32_t DTBCLK_DTO_PHASE[MAX_PIPES];
- uint32_t DCCG_AUDIO_DTBCLK_DTO_MODULO;
- uint32_t DCCG_AUDIO_DTBCLK_DTO_PHASE;
- uint32_t DCCG_AUDIO_DTO_SOURCE;
- uint32_t DPSTREAMCLK_CNTL;
- uint32_t HDMISTREAMCLK_CNTL;
- uint32_t SYMCLK32_SE_CNTL;
- uint32_t SYMCLK32_LE_CNTL;
- uint32_t DENTIST_DISPCLK_CNTL;
- uint32_t DSCCLK_DTO_CTRL;
- uint32_t DSCCLK0_DTO_PARAM;
- uint32_t DSCCLK1_DTO_PARAM;
- uint32_t DSCCLK2_DTO_PARAM;
- uint32_t DSCCLK3_DTO_PARAM;
- uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
- uint32_t DPSTREAMCLK_GATE_DISABLE;
- uint32_t DCCG_GATE_DISABLE_CNTL;
- uint32_t DCCG_GATE_DISABLE_CNTL2;
- uint32_t DCCG_GATE_DISABLE_CNTL3;
- uint32_t HDMISTREAMCLK0_DTO_PARAM;
- uint32_t DCCG_GATE_DISABLE_CNTL4;
- uint32_t OTG_PIXEL_RATE_DIV;
- uint32_t DTBCLK_P_CNTL;
- uint32_t DPPCLK_CTRL;
- uint32_t DCCG_GATE_DISABLE_CNTL5;
- uint32_t DCCG_GATE_DISABLE_CNTL6;
- uint32_t DCCG_GLOBAL_FGCG_REP_CNTL;
- uint32_t SYMCLKA_CLOCK_ENABLE;
- uint32_t SYMCLKB_CLOCK_ENABLE;
- uint32_t SYMCLKC_CLOCK_ENABLE;
- uint32_t SYMCLKD_CLOCK_ENABLE;
- uint32_t SYMCLKE_CLOCK_ENABLE;
- uint32_t DP_DTO_MODULO[MAX_PIPES];
- uint32_t DP_DTO_PHASE[MAX_PIPES];
+ DCCG_REG_VARIABLE_LIST;
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
index 8664f0c4c9b7..97df04b7e39d 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
@@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg,
OTG_DROP_PIXEL[otg_inst], 1);
}
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL);
+ dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO);
+ dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE);
+ dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE);
+ dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE);
+ dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE);
+ dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE);
+ dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE);
+ dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS);
+ dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2);
+ dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG);
+ dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL);
+ dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR);
+ dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO);
+ dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL);
+ dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL);
+ dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2);
+ dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3);
+ dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4);
+ dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5);
+ dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6);
+ dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL);
+ dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL);
+ dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT);
+ dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR);
+ dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO);
+ dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL);
+ dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2);
+ dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET);
+ dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL);
+ dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL);
+ dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL);
+ dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE);
+ dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL);
+ dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN);
+ dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]);
+ dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]);
+ dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]);
+ dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]);
+ dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]);
+ dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]);
+ dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]);
+ dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]);
+ dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL);
+ dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL);
+ dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL);
+ dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL);
+ dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]);
+ dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]);
+ dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]);
+ dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]);
+ dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL);
+ dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL);
+ dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL);
+ dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM);
+ dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM);
+ dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM);
+ dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM);
+ dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN);
+ dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]);
+ dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]);
+ dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]);
+ dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]);
+ dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]);
+ dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]);
+ dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]);
+ dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]);
+ dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL);
+ dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE);
+ dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL);
+ dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL);
+ dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM);
+ dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV);
+ dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV);
+ dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV);
+ dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL);
+ dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL);
+ dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL);
+ dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL);
+ dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE);
+ dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE);
+ dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE);
+ dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE);
+ dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE);
+}
+
static const struct dccg_funcs dccg31_funcs = {
.update_dpp_dto = dccg31_update_dpp_dto,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
@@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = {
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
.disable_dsc = dccg31_disable_dscclk,
.enable_dsc = dccg31_enable_dscclk,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg31_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
index cd261051dc2c..bf659920d4cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
@@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst);
void dccg31_enable_dscclk(struct dccg *dccg, int inst);
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
+
#endif //__DCN31_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
index 8f6edd8e9beb..ef3db6beba25 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
@@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = {
.get_pixel_rate_div = dccg314_get_pixel_rate_div,
.trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync,
.set_valid_pixel_rate = dccg314_set_valid_pixel_rate,
- .set_dtbclk_p_src = dccg314_set_dtbclk_p_src
+ .set_dtbclk_p_src = dccg314_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg314_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
index 60ea1d248deb..a609635f35db 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
@@ -74,8 +74,7 @@
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL),\
- SR(DCCG_AUDIO_DTO_SOURCE)
+ SR(DTBCLK_P_CNTL)
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index b363f5360818..bd2f528137b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -39,6 +39,7 @@
#define CTX \
dccg_dcn->base.ctx
+#include "logger_types.h"
#define DC_LOGGER \
dccg->ctx->logger
@@ -133,30 +134,34 @@ enum dsc_clk_source {
};
-static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable)
+static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool allow_rcg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && allow_rcg)
return;
switch (inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+
+ /* Wait for clock to ramp */
+ if (!allow_rcg)
+ udelay(10);
}
static void dccg35_set_symclk32_se_rcg(
@@ -385,32 +390,34 @@ static void dccg35_set_dtbclk_p_rcg(struct dccg *dccg, int inst, bool enable)
}
}
-static void dccg35_set_dppclk_rcg(struct dccg *dccg,
- int inst, bool enable)
+static void dccg35_set_dppclk_rcg(struct dccg *dccg, int inst, bool allow_rcg)
{
-
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && allow_rcg)
return;
switch (inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
+
+ /* Wait for clock to ramp */
+ if (!allow_rcg)
+ udelay(10);
}
static void dccg35_set_dpstreamclk_rcg(
@@ -1035,6 +1042,7 @@ static void dccg35_enable_dpp_clk_new(
DPPCLK0_DTO_MODULO, 0xFF);
}
+
static void dccg35_disable_dpp_clk_new(
struct dccg *dccg,
int inst)
@@ -1106,36 +1114,40 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg)
if (dispclk_rdivider_value != 0)
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
+static void dccg35_wait_for_dentist_change_done(
+ struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
+
+ REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+}
static void dcn35_set_dppclk_enable(struct dccg *dccg,
uint32_t dpp_inst, uint32_t enable)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
break;
default:
break;
}
+ DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1163,41 +1175,50 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
ASSERT(false);
phase = 0xff;
}
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, phase,
DPPCLK0_DTO_MODULO, modulo);
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
- } else
+ } else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ }
+ udelay(10);
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
- uint32_t dpp_inst, uint32_t enable)
+ uint32_t dpp_inst, uint32_t disallow_rcg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && !disallow_rcg)
return;
+
switch (dpp_inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, disallow_rcg);
break;
default:
break;
}
+
+ /* Wait for clock to ramp */
+ if (disallow_rcg)
+ udelay(10);
}
static void dccg35_get_pixel_rate_div(
@@ -1289,6 +1310,8 @@ static void dccg35_set_pixel_rate_div(
BREAK_TO_DEBUGGER();
return;
}
+ if (otg_inst < 4)
+ dccg35_wait_for_dentist_change_done(dccg);
}
static void dccg35_set_dtbclk_p_src(
@@ -1396,7 +1419,11 @@ static void dccg35_set_dtbclk_dto(
* PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
* programming is handled in program_pix_clk() regardless, so it can be removed from here.
*/
- } else {
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO enabled: pixclk_khz=%d, ref_dtbclk_khz=%d, req_dtbclk_khz=%d, phase=%d, modulo=%d\n",
+ __func__, params->otg_inst, params->pixclk_khz,
+ params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
+
+ } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
switch (params->otg_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
@@ -1421,6 +1448,8 @@ static void dccg35_set_dtbclk_dto(
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO disabled\n", __func__, params->otg_inst);
}
}
@@ -1465,6 +1494,8 @@ static void dccg35_set_dpstreamclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_EN = %d, DPSTREAMCLK_SRC_SEL = %d\n",
+ __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst);
}
@@ -1504,6 +1535,8 @@ static void dccg35_set_dpstreamclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_ROOT_GATE_DISABLE = %d\n",
+ __func__, dp_hpo_inst, enable ? 1 : 0);
}
@@ -1521,28 +1554,30 @@ static void dccg35_set_physymclk_root_clock_gating(
switch (phy_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 4:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE: %d\n", __func__, phy_inst, enable ? 0 : 1);
+
}
static void dccg35_set_physymclk(
@@ -1614,6 +1649,8 @@ static void dccg35_set_physymclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: phy_inst(%d) PHYxSYMCLK_EN = %d, PHYxSYMCLK_SRC_SEL = %d\n",
+ __func__, phy_inst, force_enable ? 1 : 0, clk_src);
}
static void dccg35_set_valid_pixel_rate(
@@ -1639,10 +1676,12 @@ static void dccg35_dpp_root_clock_control(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (dccg->dpp_clock_gated[dpp_inst] == clock_on)
+ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
return;
if (clock_on) {
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
+
/* turn off the DTO and leave phase/modulo at max */
dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
@@ -1654,9 +1693,15 @@ static void dccg35_dpp_root_clock_control(
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
+ /*we have this in hwss: disable_plane*/
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ // wait for clock to fully ramp
+ udelay(10);
+
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
+ DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on);
}
static void dccg35_disable_symclk32_se(
@@ -1715,6 +1760,7 @@ static void dccg35_disable_symclk32_se(
BREAK_TO_DEBUGGER();
return;
}
+
}
static void dccg35_init_cb(struct dccg *dccg)
@@ -1722,7 +1768,6 @@ static void dccg35_init_cb(struct dccg *dccg)
(void)dccg;
/* Any RCG should be done when driver enter low power mode*/
}
-
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -1737,6 +1782,8 @@ void dccg35_init(struct dccg *dccg)
for (otg_inst = 0; otg_inst < 2; otg_inst++) {
dccg31_disable_symclk32_le(dccg, otg_inst);
dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d SYMCLK32_LE disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
@@ -1749,6 +1796,8 @@ void dccg35_init(struct dccg *dccg)
dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d DPSTREAMCLK disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
/*
@@ -1771,41 +1820,44 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
//Disable DTO
switch (inst) {
case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
break;
case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
break;
case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
break;
case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+
+ /* Wait for clock to ramp */
+ udelay(10);
}
static void dccg35_disable_dscclk(struct dccg *dccg,
@@ -1813,9 +1865,6 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- return;
-
switch (inst) {
case 0:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0);
@@ -1852,6 +1901,9 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
default:
return;
}
+
+ /* Wait for clock ramp */
+ udelay(10);
}
static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
@@ -2337,10 +2389,7 @@ static void dccg35_disable_symclk_se_cb(
void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating)
{
-
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
- dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
- }
+ dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
}
static const struct dccg_funcs dccg35_funcs_new = {
@@ -2404,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = {
.disable_symclk_se = dccg35_disable_symclk_se,
.set_dtbclk_p_src = dccg35_set_dtbclk_p_src,
.dccg_root_gate_disable_control = dccg35_root_gate_disable_control,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
index 51f98c5c51c4..7b9c36456cd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
@@ -41,8 +41,9 @@
SR(SYMCLKA_CLOCK_ENABLE),\
SR(SYMCLKB_CLOCK_ENABLE),\
SR(SYMCLKC_CLOCK_ENABLE),\
- SR(SYMCLKD_CLOCK_ENABLE),\
- SR(SYMCLKE_CLOCK_ENABLE)
+ SR(SYMCLKD_CLOCK_ENABLE), \
+ SR(SYMCLKE_CLOCK_ENABLE),\
+ SR(SYMCLK_PSP_CNTL)
#define DCCG_MASK_SH_LIST_DCN35(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
@@ -231,6 +232,14 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
struct dccg *dccg35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index d3e46c3cfa57..663a18ee5162 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -116,7 +116,7 @@ static void dccg401_wait_for_dentist_change_done(
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
}
-static void dccg401_get_pixel_rate_div(
+void dccg401_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
uint32_t *tmds_div,
@@ -154,7 +154,7 @@ static void dccg401_get_pixel_rate_div(
*tmds_div = val_tmds_div == 0 ? PIXEL_RATE_DIV_BY_2 : PIXEL_RATE_DIV_BY_4;
}
-static void dccg401_set_pixel_rate_div(
+void dccg401_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
enum pixel_rate_div tmds_div,
@@ -209,7 +209,7 @@ static void dccg401_set_pixel_rate_div(
}
-static void dccg401_set_dtbclk_p_src(
+void dccg401_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst)
@@ -348,7 +348,7 @@ void dccg401_set_physymclk(
}
}
-static void dccg401_get_dccg_ref_freq(struct dccg *dccg,
+void dccg401_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz)
{
@@ -378,7 +378,7 @@ static void dccg401_otg_drop_pixel(struct dccg *dccg,
OTG_DROP_PIXEL[otg_inst], 1);
}
-static void dccg401_enable_symclk32_le(
+void dccg401_enable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst,
enum phyd32clk_clock_source phyd32clk)
@@ -429,7 +429,7 @@ static void dccg401_enable_symclk32_le(
}
}
-static void dccg401_disable_symclk32_le(
+void dccg401_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst)
{
@@ -531,7 +531,7 @@ static void dccg401_enable_dpstreamclk(struct dccg *dccg, int otg_inst, int dp_h
DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
}
-static void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
+void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -574,7 +574,7 @@ static void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
}
}
-static void dccg401_set_dpstreamclk(
+void dccg401_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
@@ -587,7 +587,7 @@ static void dccg401_set_dpstreamclk(
dccg401_enable_dpstreamclk(dccg, otg_inst, dp_hpo_inst);
}
-static void dccg401_set_dp_dto(
+void dccg401_set_dp_dto(
struct dccg *dccg,
const struct dp_dto_params *params)
{
@@ -619,7 +619,7 @@ static void dccg401_set_dp_dto(
dto_integer = div_u64(params->pixclk_hz, dto_modulo_hz);
dto_phase_hz = params->pixclk_hz - dto_integer * dto_modulo_hz;
- if (dto_phase_hz <= 0) {
+ if (dto_phase_hz <= 0 && dto_integer <= 0) {
/* negative pixel rate should never happen */
BREAK_TO_DEBUGGER();
return;
@@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg)
}
}
-static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -763,7 +763,7 @@ static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
}
}
-static void dccg401_set_ref_dscclk(struct dccg *dccg,
+void dccg401_set_ref_dscclk(struct dccg *dccg,
uint32_t dsc_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -798,7 +798,7 @@ static void dccg401_set_ref_dscclk(struct dccg *dccg,
}
}
-static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -834,7 +834,7 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
}
}
-static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = {
.enable_symclk_se = dccg401_enable_symclk_se,
.disable_symclk_se = dccg401_disable_symclk_se,
.set_dtbclk_p_src = dccg401_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg401_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index a196ce9e8127..5947a35363aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -193,11 +193,47 @@
void dccg401_init(struct dccg *dccg);
void dccg401_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
-
+void dccg401_get_dccg_ref_freq(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+void dccg401_set_dpstreamclk(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ int otg_inst,
+ int dp_hpo_inst);
+void dccg401_enable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk);
+void dccg401_disable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst);
+void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst);
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h);
+void dccg401_set_ref_dscclk(struct dccg *dccg,
+ uint32_t dsc_inst);
void dccg401_set_src_sel(
struct dccg *dccg,
const struct dtbclk_dto_params *params);
-
+void dccg401_set_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div tmds_div,
+ enum pixel_rate_div unused);
+void dccg401_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ uint32_t *tmds_div,
+ uint32_t *dp_dto_int);
+void dccg401_set_dp_dto(
+ struct dccg *dccg,
+ const struct dp_dto_params *params);
+void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
+void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
+void dccg401_set_dtbclk_p_src(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ uint32_t otg_inst);
struct dccg *dccg401_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index a6006776333d..2dcf394edf22 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
const struct dce_abm_shift *abm_shift,
const struct dce_abm_mask *abm_mask)
{
- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
if (abm_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index eeed840073fe..fcad61c618a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -1143,7 +1143,8 @@ void dce_aud_wall_dto_setup(
REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
- REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ if (aud->masks->DCCG_AUDIO_DTO2_USE_512FBR_DTO)
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index bb4ac5042c80..673bb87d2c17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -725,14 +725,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
for (i = 0; i < AUX_MAX_RETRIES; i++) {
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d",
+ "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
payload->address,
payload->length,
(unsigned int) payload->write,
- (unsigned int) payload->mot);
+ (unsigned int) payload->mot,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (payload->write)
dce_aux_log_payload(" write", payload->data, payload->length, 16);
@@ -746,7 +750,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
+ "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d "
+ "payload->reply=%u is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
@@ -756,7 +762,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
(unsigned int) payload->mot,
ret,
(int)operation_result,
- (unsigned int) *payload->reply);
+ (unsigned int) *payload->reply,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (!payload->write)
dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 077337698e0a..b4f5b4a6331a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,11 +976,12 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- // Apply ssed(spread spectrum) dpref clock for edp only.
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
- && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
- && encoding == DP_8b_10b_ENCODING)
+ // Apply ssed(spread spectrum) dpref clock for edp and dp
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 &&
+ dc_is_dp_signal(pix_clk_params->signal_type) &&
+ encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
+
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 0721ae895ae9..94128f7a18b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -257,7 +257,7 @@ bool dce110_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
struct dc_bios *bios,
- enum clock_source_id,
+ enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a8e79104b684..5f8fba45d98d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -1126,7 +1126,7 @@ struct dmcu *dcn10_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1147,7 +1147,7 @@ struct dmcu *dcn20_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1168,7 +1168,7 @@ struct dmcu *dcn21_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index d28826c3ae5f..365dd2e37aea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -292,9 +292,35 @@ static void set_speed(
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
+static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ switch (arbitrate) {
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW:
+ return true;
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW:
+ return false;
+ case DC_I2C_STATUS__DC_I2C_STATUS_IDLE:
+ default:
+ break;
+ }
+
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return false;
+
+ return true;
+}
+
static bool setup_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
+ // Deassert soft reset to unblock I2C engine registers
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false);
+
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
uint32_t reset_length = 0;
@@ -309,8 +335,8 @@ static bool setup_engine(
REG_UPDATE_N(SETUP, 1,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1);
- /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
+ if (!acquire_engine(dce_i2c_hw))
+ return false;
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
@@ -319,9 +345,8 @@ static bool setup_engine(
i2c_setup_limit = dce_i2c_hw->setup_limit;
/* Program pin select */
- REG_UPDATE_6(DC_I2C_CONTROL,
+ REG_UPDATE_5(DC_I2C_CONTROL,
DC_I2C_GO, 0,
- DC_I2C_SOFT_RESET, 0,
DC_I2C_SEND_RESET, 0,
DC_I2C_SW_STATUS_RESET, 1,
DC_I2C_TRANSACTION_COUNT, 0,
@@ -351,6 +376,32 @@ static bool setup_engine(
return true;
}
+/**
+ * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state
+ * @dce_i2c_hw: Pointer to dce_i2c_hw structure
+ *
+ * If we boot without an HDMI display, the I2C engine does not get initialized
+ * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after
+ * acquire. After setting SW_DONE_USING_I2C on release, the engine gets
+ * immediately reacquired by SW, preventing DMUB from using it.
+ *
+ * This function checks the I2C arbitration status and applies a release
+ * workaround if necessary.
+ */
+static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return;
+
+ // Still acquired after release, release again as a workaround
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW);
+}
+
static void release_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
@@ -378,9 +429,9 @@ static void release_engine(
/*for HW HDCP Ri polling failure w/a test*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp);
- /* Release I2C after reset, so HW or DMCU could use it */
- REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1,
- DC_I2C_SW_USE_I2C_REG_REQ, 0);
+ // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ cntl_stuck_hw_workaround(dce_i2c_hw);
if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) {
if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL)
@@ -540,7 +591,7 @@ static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index e188447c8156..2d73b94c515c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -451,7 +451,7 @@ static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 4a9d07c31bc5..87dbb8d7ed27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -302,6 +302,10 @@ static void setup_panel_mode(
if (ctx->dc->caps.psp_setup_panel_mode)
return;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output(
return true;
}
+static bool dce110_link_encoder_validate_rgb_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ /* When the VBIOS doesn't specify any limits, use 400 MHz.
+ * The value comes from amdgpu_atombios_get_clock_info.
+ */
+ uint32_t max_pixel_clock_khz = 400000;
+
+ if (enc110->base.ctx->dc_bios->fw_info_valid &&
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) {
+ max_pixel_clock_khz =
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock;
+ }
+
+ if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10)
+ return false;
+
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_888)
+ return false;
+
+ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+ return false;
+
+ return true;
+}
+
void dce110_link_encoder_construct(
struct dce110_link_encoder *enc110,
const struct encoder_init_data *init_data,
@@ -824,6 +855,7 @@ void dce110_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -847,6 +879,11 @@ void dce110_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -885,6 +922,13 @@ void dce110_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
@@ -896,13 +940,13 @@ void dce110_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
@@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream(
is_valid = dce110_link_encoder_validate_dp_output(
enc110, &stream->timing);
break;
+ case SIGNAL_TYPE_RGB:
+ is_valid = dce110_link_encoder_validate_rgb_output(
+ enc110, &stream->timing);
+ break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_LVDS:
is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
@@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init(
cntl.coherent = false;
cntl.hpd_sel = enc110->base.hpd_source;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (enc110->base.connector.id == CONNECTOR_ID_EDP)
cntl.signal = SIGNAL_TYPE_EDP;
@@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup(
/* DP MST */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
break;
+ case SIGNAL_TYPE_RGB:
+ break;
default:
ASSERT_CRITICAL(false);
/* invalid mode ! */
@@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
+ switch (enc->analog_engine) {
+ case ENGINE_ID_DACA:
+ REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0);
+ break;
+ case ENGINE_ID_DACB:
+ /* DACB doesn't seem to be present on DCE6+,
+ * although there are references to it in the register file.
+ */
+ DC_LOG_ERROR("%s DACB is unsupported\n", __func__);
+ break;
+ default:
+ break;
+ }
+
+ /* The code below only applies to connectors that support digital signals. */
+ if (enc->transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (!dce110_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
return;
@@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
@@ -1798,13 +1883,13 @@ void dce60_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 261c70e01e33..c58b69bc319b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -101,18 +101,21 @@
SRI(DP_SEC_CNTL, DP, id), \
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
- SRI(DP_SEC_CNTL1, DP, id)
+ SRI(DP_SEC_CNTL1, DP, id), \
+ SR(DAC_ENABLE)
#endif
#define LE_DCE80_REG_LIST(id)\
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- LE_COMMON_REG_LIST_BASE(id)
+ LE_COMMON_REG_LIST_BASE(id), \
+ SR(DAC_ENABLE)
#define LE_DCE100_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- SR(DCI_MEM_PWR_STATUS)
+ SR(DCI_MEM_PWR_STATUS), \
+ SR(DAC_ENABLE)
#define LE_DCE110_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
@@ -181,6 +184,9 @@ struct dce110_link_enc_registers {
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
+
+ /* DAC registers */
+ uint32_t DAC_ENABLE;
};
struct dce110_link_encoder {
@@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output(
enum signal_type signal,
const struct dc_crtc_timing *crtc_timing);
-bool dce110_link_encoder_validate_rgb_output(
- const struct dce110_link_encoder *enc110,
- const struct dc_crtc_timing *crtc_timing);
-
bool dce110_link_encoder_validate_dp_output(
const struct dce110_link_encoder *enc110,
const struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index ebd174be5786..1c2009e38aa1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -98,7 +98,7 @@ static enum mi_bits_per_pixel get_mi_bpp(
}
static enum mi_tiling_format get_mi_tiling(
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
switch (tiling_info->gfx8.array_mode) {
case DC_ARRAY_1D_TILED_THIN1:
@@ -133,7 +133,7 @@ static bool is_vert_scan(enum dc_rotation_angle rotation)
static void dce_mi_program_pte_vm(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation)
{
struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
@@ -430,7 +430,7 @@ static void dce120_mi_program_display_marks(struct mem_input *mi,
}
static void program_tiling(
- struct dce_mem_input *dce_mi, const union dc_tiling_info *info)
+ struct dce_mem_input *dce_mi, const struct dc_tiling_info *info)
{
if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
REG_UPDATE_6(GRPH_CONTROL,
@@ -650,7 +650,7 @@ static void dce_mi_clear_tiling(
static void dce_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -670,7 +670,7 @@ static void dce_mi_program_surface_config(
static void dce60_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation, /* not used in DCE6 */
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index d199e4ed2e59..574618d5d4a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -418,7 +418,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
dynamic_range_rgb = 1; /*limited range*/
break;
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -430,6 +430,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_APPCTRL:
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
+ default:
/* do nothing */
break;
}
@@ -1566,3 +1567,17 @@ void dce110_stream_encoder_construct(
enc110->se_shift = se_shift;
enc110->se_mask = se_mask;
}
+
+static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {};
+
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id)
+{
+ enc110->base.funcs = &dce110_an_str_enc_funcs;
+ enc110->base.ctx = ctx;
+ enc110->base.id = eng_id;
+ enc110->base.bp = bp;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
index cc5020a8e1e1..068de1392121 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -708,6 +708,11 @@ void dce110_stream_encoder_construct(
const struct dce_stream_encoder_shift *se_shift,
const struct dce_stream_encoder_mask *se_mask);
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id);
void dce110_se_audio_mute_control(
struct stream_encoder *enc, bool mute);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 2b1673d69ea8..1ab5ae9b5ea5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration(
REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
if (data->taps.h_taps + data->taps.v_taps <= 2) {
- /* Set bypass */
-
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ /* Disable scaler functionality */
+ REG_WRITE(SCL_SCALER_ENABLE, 0);
+ /* Clear registers that can cause glitches even when the scaler is off */
+ REG_WRITE(SCL_TAP_CONTROL, 0);
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
return false;
}
@@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration(
SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ REG_WRITE(SCL_SCALER_ENABLE, 1);
/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
@@ -502,6 +505,8 @@ static void dce60_transform_set_scaler(
REG_SET(DC_LB_MEM_SIZE, 0,
DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+ REG_WRITE(SCL_UPDATE, 0x00010000);
+
/* Clear SCL_F_SHARP_CONTROL value to 0 */
REG_WRITE(SCL_F_SHARP_CONTROL, 0);
@@ -527,8 +532,7 @@ static void dce60_transform_set_scaler(
if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
/* 4. Program vertical filters */
if (xfm_dce->filter_v == NULL)
- REG_SET(SCL_VERT_FILTER_CONTROL, 0,
- SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_VERT_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.v_taps,
@@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(
/* 5. Program horizontal filters */
if (xfm_dce->filter_h == NULL)
- REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
- SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.h_taps,
@@ -566,6 +569,8 @@ static void dce60_transform_set_scaler(
/* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
/* DCE6 DATA_FORMAT register does not support ALPHA_EN */
+
+ REG_WRITE(SCL_UPDATE, 0);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
index cbce194ec7b8..eb716e8337e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -155,6 +155,9 @@
SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
SRI(VIEWPORT_START, SCL, id), \
SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_SCALER_ENABLE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \
SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_INIT, SCL, id), \
@@ -590,6 +593,7 @@ struct dce_transform_registers {
uint32_t SCL_VERT_FILTER_SCALE_RATIO;
uint32_t SCL_HORZ_FILTER_INIT;
#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t SCL_SCALER_ENABLE;
uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;
uint32_t SCL_HORZ_FILTER_INIT_CHROMA;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index ccc154b0281c..3b9011ef9b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "core_types.h"
#include "dmub_cmd.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 0d7e7f3b81a1..a641ae04450c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -240,7 +240,8 @@ bool dmub_abm_save_restore(
cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
- cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+ cmd.abm_save_restore.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header);
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..5bfa2b0d2afd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -61,13 +61,49 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);
}
-bool should_use_dmub_lock(struct dc_link *link)
+bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link)
{
+ if (!link)
+ return false;
+
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
if (link->replay_settings.replay_feature_enabled)
return true;
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ if (edp_num == 1)
+ return true;
+ }
+ return false;
+}
+
+bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context)
+{
+ if (!context)
+ return false;
+ for (int i = 0; i < context->stream_count; i++) {
+ const struct dc_link *link = context->streams[i]->link;
+
+ if (dmub_hw_lock_mgr_does_link_require_lock(dc, link))
+ return true;
+ }
return false;
}
+
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link)
+{
+ /* ASIC doesn't support DMUB */
+ if (!dc->ctx->dmub_srv)
+ return false;
+
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ return false;
+
+ return dmub_hw_lock_mgr_does_link_require_lock(dc, link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
index 5a72b168fb4a..4c80ca8484ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -37,6 +37,16 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_cmd_lock_hw hw_lock_cmd);
-bool should_use_dmub_lock(struct dc_link *link);
+/**
+ * should_use_dmub_inbox1_lock() - Checks if the DMCUB hardware lock via inbox1 should be used.
+ *
+ * @dc: pointer to DC object
+ * @link: optional pointer to the link object to check for enabled link features
+ *
+ * Return: true if the inbox1 lock should be used, false otherwise
+ */
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link);
+bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link);
+bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context);
#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 88c75c243bf8..87af4fdc04a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -391,7 +391,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
+ copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode || psr_context->os_request_force_ffu;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!link->dc->debug.disable_fec) &&
@@ -418,6 +418,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->relock_delay_frame_cnt = 0;
if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
copy_settings_data->relock_delay_frame_cnt = 2;
+
+ copy_settings_data->power_down_phy_before_disable_stream =
+ link->psr_settings.power_down_phy_before_disable_stream;
+
copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index c31e4f26a305..cf1372aaff6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -3,6 +3,7 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc.h"
+#include "link_service.h"
#include "dc_dmub_srv.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
@@ -168,6 +169,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+ copy_settings_data->replay_support_fast_resync_in_ultra_sleep_mode = link->replay_settings.config.replay_support_fast_resync_in_ultra_sleep_mode;
copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
@@ -189,6 +191,18 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
+ copy_settings_data->flags.bitfields.alpm_mode = (enum dmub_alpm_mode)link->replay_settings.config.alpm_mode;
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ copy_settings_data->auxless_alpm_data.lfps_setup_ns = dc->dc->debug.auxless_alpm_lfps_setup_ns;
+ copy_settings_data->auxless_alpm_data.lfps_period_ns = dc->dc->debug.auxless_alpm_lfps_period_ns;
+ copy_settings_data->auxless_alpm_data.lfps_silence_ns = dc->dc->debug.auxless_alpm_lfps_silence_ns;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_override_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_us;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_offset_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_offset_us;
+ copy_settings_data->auxless_alpm_data.lttpr_count = link->dc->link_srv->dp_get_lttpr_count(link);
+ }
+
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
@@ -199,7 +213,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
*/
static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
uint32_t coasting_vtotal,
- uint8_t panel_inst)
+ uint8_t panel_inst,
+ uint16_t frame_skip_number)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -213,6 +228,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
+ pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -269,7 +285,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
* Set REPLAY power optimization flags and coasting vtotal.
*/
static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal)
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -280,11 +296,14 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
memset(&cmd, 0, sizeof(cmd));
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
- pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ pCmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal) -
+ sizeof(struct dmub_cmd_header);
pCmd->replay_set_power_opt_data.power_opt = power_opt;
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
+ pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -319,7 +338,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_timing_sync.header.sub_type =
DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
cmd.replay_set_timing_sync.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
cmd_element->sync_data.panel_inst;
@@ -331,7 +351,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_frameupdate_timer.header.sub_type =
DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
cmd.replay_set_frameupdate_timer.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_frameupdate_timer.data.panel_inst =
cmd_element->panel_inst;
@@ -345,7 +366,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_pseudo_vtotal.header.sub_type =
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL;
cmd.replay_set_pseudo_vtotal.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_pseudo_vtotal.data.panel_inst =
cmd_element->pseudo_vtotal_data.panel_inst;
@@ -357,19 +379,34 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.header.sub_type =
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP;
cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp);
+ sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst =
cmd_element->disabled_adaptive_sync_sdp_data.panel_inst;
cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled =
cmd_element->disabled_adaptive_sync_sdp_data.force_disabled;
break;
+ case Replay_Set_Version:
+ //Header
+ cmd.replay_set_version.header.sub_type =
+ DMUB_CMD__REPLAY_SET_VERSION;
+ cmd.replay_set_version.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_version) -
+ sizeof(struct dmub_cmd_header);
+ //Cmd Body
+ cmd.replay_set_version.replay_set_version_data.panel_inst =
+ cmd_element->version_data.panel_inst;
+ cmd.replay_set_version.replay_set_version_data.version =
+ cmd_element->version_data.version;
+ break;
case Replay_Set_General_Cmd:
//Header
cmd.replay_set_general_cmd.header.sub_type =
DMUB_CMD__REPLAY_SET_GENERAL_CMD;
cmd.replay_set_general_cmd.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_general_cmd);
+ sizeof(struct dmub_rb_cmd_replay_set_general_cmd) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_general_cmd.data.panel_inst =
cmd_element->set_general_cmd_data.panel_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e6346c0ffc0e..07c79739a980 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -27,11 +27,12 @@ struct dmub_replay_funcs {
void (*replay_send_cmd)(struct dmub_replay *dmub,
enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element);
void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint32_t coasting_vtotal,
- uint8_t panel_inst);
+ uint8_t panel_inst, uint16_t frame_skip_number);
void (*replay_residency)(struct dmub_replay *dmub,
uint8_t panel_inst, uint32_t *residency, const bool is_start, const enum pr_residency_mode mode);
void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal);
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal,
+ uint16_t frame_skip_number);
};
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index d241ee13b293..59a0961b49da 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -409,19 +409,6 @@ void dce110_compressor_destroy(struct compressor **compressor)
*compressor = NULL;
}
-void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
-{
- *max_x = FBC_MAX_X;
- *max_y = FBC_MAX_Y;
-
- /* if (m_smallLocalFrameBufferMemory == 1)
- * {
- * *max_x = FBC_MAX_X_SG;
- * *max_y = FBC_MAX_Y_SG;
- * }
- */
-}
-
static const struct compressor_funcs dce110_compressor_funcs = {
.power_up_fbc = dce110_compressor_power_up_fbc,
.enable_fbc = dce110_compressor_enable_fbc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
index 26c7335a1cbf..223c57941e92 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
@@ -75,7 +75,5 @@ void dce110_compressor_program_lpt_control(struct compressor *cp,
bool dce110_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
-void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 8a3fbf95c48f..2c43c2422638 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -162,7 +162,7 @@ static void enable(struct dce_mem_input *mem_input110)
static void program_tiling(
struct dce_mem_input *mem_input110,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
uint32_t value = 0;
@@ -523,7 +523,7 @@ static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
/* Helper to get table entry from surface info */
static const unsigned int *get_dvmm_hw_setting(
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum surface_pixel_format format,
bool chroma)
{
@@ -563,7 +563,7 @@ static const unsigned int *get_dvmm_hw_setting(
static void dce_mem_input_v_program_pte_vm(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation)
{
struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
@@ -636,7 +636,7 @@ static void dce_mem_input_v_program_pte_vm(
static void dce_mem_input_v_program_surface_config(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index fa422a8cbced..61b0807693fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -2127,70 +2127,131 @@ bool dce110_configure_crc(struct timing_generator *tg,
cntl_addr = CRTC_REG(mmCRTC_CRC_CNTL);
- /* First, disable CRC before we configure it. */
- dm_write_reg(tg->ctx, cntl_addr, 0);
+ if (!params->enable || params->reset)
+ /* First, disable CRC before we configure it. */
+ dm_write_reg(tg->ctx, cntl_addr, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
- set_reg_field_value(value, params->windowa_x_start,
- CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_START);
- set_reg_field_value(value, params->windowa_x_end,
- CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window A y axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
- set_reg_field_value(value, params->windowa_y_start,
- CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_START);
- set_reg_field_value(value, params->windowa_y_end,
- CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window B x axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
- set_reg_field_value(value, params->windowb_x_start,
- CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_START);
- set_reg_field_value(value, params->windowb_x_end,
- CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window B y axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
- set_reg_field_value(value, params->windowb_y_start,
- CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_START);
- set_reg_field_value(value, params->windowb_y_end,
- CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- value = 0;
- set_reg_field_value(value, params->continuous_mode ? 1 : 0,
- CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
- set_reg_field_value(value, params->selection,
- CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
- set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
- dm_write_reg(tg->ctx, cntl_addr, value);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable.*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable.*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC1_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+ break;
+ default:
+ return false;
+ }
return true;
}
-bool dce110_get_crc(struct timing_generator *tg,
+bool dce110_get_crc(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t addr = 0;
@@ -2206,14 +2267,30 @@ bool dce110_get_crc(struct timing_generator *tg,
if (!field)
return false;
- addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
- value = dm_read_reg(tg->ctx, addr);
- *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
- *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
+ switch (idx) {
+ case 0:
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
- addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
- value = dm_read_reg(tg->ctx, addr);
- *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+ break;
+ case 1:
+ addr = CRTC_REG(mmCRTC_CRC1_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC1_DATA_RG, CRC1_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC1_DATA_RG, CRC1_G_Y);
+
+ addr = CRTC_REG(mmCRTC_CRC1_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC1_DATA_B, CRC1_B_CB);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index ee4de740aceb..e4f5cad64f32 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -286,7 +286,7 @@ bool dce110_arm_vert_intr(
bool dce110_configure_crc(struct timing_generator *tg,
const struct crc_params *params);
-bool dce110_get_crc(struct timing_generator *tg,
+bool dce110_get_crc(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
bool dce110_is_two_pixels_per_container(const struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index fcf59348eb62..31c4f44ceaac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -1100,45 +1100,79 @@ static bool dce120_configure_crc(struct timing_generator *tg,
if (!dce120_is_tg_enabled(tg))
return false;
- /* First, disable CRC before we configure it. */
- dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
- tg110->offsets.crtc, 0);
+ if (!params->enable || params->reset)
+ /* First, disable CRC before we configure it. */
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
+ tg110->offsets.crtc, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
- CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
-
- /* Window A y axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
-
- /* Window B x axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
- CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
-
- /* Window B y axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
- CRTC_CRC_EN, params->continuous_mode ? 1 : 0,
- CRTC_CRC0_SELECT, params->selection,
- CRTC_CRC_EN, 1);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC0_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable */
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC1_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
-static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
- uint32_t *g_y, uint32_t *b_cb)
+static bool dce120_get_crc(struct timing_generator *tg, uint8_t idx,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t value, field;
@@ -1151,14 +1185,30 @@ static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
if (!field)
return false;
- value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
- tg110->offsets.crtc);
- *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
- *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
+ switch (idx) {
+ case 0:
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
- value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
- tg110->offsets.crtc);
- *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+ break;
+ case 1:
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC1_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_RG, CRC1_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_RG, CRC1_G_Y);
+
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC1_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_B, CRC1_B_CB);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index eede83ad91fa..824f73eb3326 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -25,8 +25,7 @@
CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
-DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
- dce60_resource.o
+DCE60 = dce60_timing_generator.o
AMD_DAL_DCE60 = $(addprefix $(AMDDALPATH)/dc/dce60/,$(DCE60))
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index e5fb0e8333e4..e691a1cf3356 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = {
dce60_timing_generator_enable_advanced_request,
.configure_crc = dce60_configure_crc,
.get_crc = dce110_get_crc,
+ .is_two_pixels_per_container = dce110_is_two_pixels_per_container,
};
void dce60_timing_generator_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 003a9330c286..88e7a1fc9a30 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -105,7 +105,7 @@ static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz)
dm_write_reg(tg->ctx, addr, value);
}
-static void program_timing(struct timing_generator *tg,
+static void dce80_timing_generator_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
int vready_offset,
int vstartup_start,
@@ -185,7 +185,7 @@ static void dce80_timing_generator_enable_advanced_request(
static const struct timing_generator_funcs dce80_tg_funcs = {
.validate_timing = dce110_tg_validate_timing,
- .program_timing = program_timing,
+ .program_timing = dce80_timing_generator_program_timing,
.enable_crtc = dce110_timing_generator_enable_crtc,
.disable_crtc = dce110_timing_generator_disable_crtc,
.is_counter_moving = dce110_timing_generator_is_counter_moving,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 88cf47a5ea75..baf663b661c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -429,7 +429,9 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
struct dcn_otg_state s = {0};
int pix_clk = 0;
- optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ if (tg->funcs->read_otg_state)
+ tg->funcs->read_otg_state(tg, &s);
+
pix_clk = dc->current_state->res_ctx.pipe_ctx[i].stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
//only print if OTG master is enabled
@@ -495,7 +497,8 @@ static void dcn10_clear_otpc_underflow(struct dc *dc)
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
- optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ if (tg->funcs->read_otg_state)
+ tg->funcs->read_otg_state(tg, &s);
if (s.otg_enabled & 1)
tg->funcs->clear_optc_underflow(tg);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
index e0558a78b11c..1c1228116487 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
@@ -812,7 +812,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
enc10, &stream->timing);
break;
case SIGNAL_TYPE_EDP:
- is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
break;
case SIGNAL_TYPE_VIRTUAL:
is_valid = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index d01a8b8f9595..d928b4dcf6b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -28,7 +28,7 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
@@ -391,7 +391,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -404,6 +404,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
index 0b47aeb60e79..bec0b4aaeb2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c
index 425b830b88d2..e93be7b6d9b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c
@@ -47,7 +47,7 @@
enc1->base.ctx
-static void enc3_update_hdmi_info_packet(
+void enc3_update_hdmi_info_packet(
struct dcn10_stream_encoder *enc1,
uint32_t packet_index,
const struct dc_info_packet *info_packet)
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h
index 06310973ded2..830ce7e47035 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h
@@ -322,6 +322,10 @@ void enc3_dp_set_dsc_pps_info_packet(
struct stream_encoder *enc,
bool enable,
uint8_t *dsc_packed_pps,
- bool immediate_update);
+ bool immediate_update);
+void enc3_update_hdmi_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet);
#endif /* __DC_DIO_STREAM_ENCODER_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
index b2cea59ba5d4..84cc2ddc52fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
@@ -37,7 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
#include "dal_asic_id.h"
-#include "link.h"
+#include "link_service.h"
#define CTX \
enc10->base.ctx
@@ -653,8 +653,9 @@ void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_lin
if (!query_dp_alt_from_dmub(enc, &cmd))
return;
- if (cmd.query_dp_alt.data.is_usb &&
- cmd.query_dp_alt.data.is_dp4 == 0)
+ if (cmd.query_dp_alt.data.is_dp_alt_disable == 0 &&
+ cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index ae81451a3a72..3e85e9c3d2cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -30,7 +30,7 @@
#include "dcn314_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
index 1a9bb614c41e..3523d1cdc1a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn32_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
index d4a3e811aa39..9972911330b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
@@ -28,6 +28,7 @@
#include "link_encoder.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn35_dio_link_encoder.h"
+#include "dc_dmub_srv.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
@@ -136,9 +137,9 @@ static const struct link_encoder_funcs dcn35_link_enc_funcs = {
.hw_init = dcn35_link_encoder_init,
.setup = dcn35_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
- .enable_dp_output = dcn31_link_encoder_enable_dp_output,
- .enable_dp_mst_output = dcn31_link_encoder_enable_dp_mst_output,
- .disable_output = dcn31_link_encoder_disable_output,
+ .enable_dp_output = dcn35_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn35_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn35_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
@@ -159,6 +160,8 @@ static const struct link_encoder_funcs dcn35_link_enc_funcs = {
.is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
+ .enable_dpia_output = dcn35_link_encoder_enable_dpia_output,
+ .disable_dpia_output = dcn35_link_encoder_disable_dpia_output,
};
void dcn35_link_encoder_construct(
@@ -265,3 +268,124 @@ void dcn35_link_encoder_construct(
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
+
+/* DPIA equivalent of link_transmitter_control. */
+static bool link_dpia_control(struct dc_context *dc_ctx,
+ struct dmub_cmd_dig_dpia_control_data *dpia_control)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA;
+ cmd.dig1_dpia_control.header.sub_type =
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL;
+ cmd.dig1_dpia_control.header.payload_bytes =
+ sizeof(cmd.dig1_dpia_control) -
+ sizeof(cmd.dig1_dpia_control.header);
+
+ cmd.dig1_dpia_control.dpia_control = *dpia_control;
+
+ dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+static void link_encoder_disable(struct dcn10_link_encoder *enc10)
+{
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+}
+
+void dcn35_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ }
+}
+
+void dcn35_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+ }
+}
+
+void dcn35_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_disable_output(enc, signal);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn10_link_encoder_disable_output(enc, signal);
+ }
+}
+
+void dcn35_link_encoder_enable_dpia_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = digmode;
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin unused by DPIA. */
+ dpia_control.hpdsel = 6;
+ dpia_control.dpia_id = dpia_id;
+ dpia_control.fec_rdy = fec_rdy;
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+}
+
+void dcn35_link_encoder_disable_dpia_output(
+ struct link_encoder *enc,
+ uint8_t dpia_id,
+ uint8_t digmode)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
+ return;
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = digmode;
+ dpia_control.dpia_id = dpia_id;
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+
+ link_encoder_disable(enc10);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
index d546a3676304..5712e6553fab 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
@@ -144,4 +144,45 @@ bool dcn35_is_dig_enabled(struct link_encoder *enc);
enum signal_type dcn35_get_dig_mode(struct link_encoder *enc);
void dcn35_link_encoder_setup(struct link_encoder *enc, enum signal_type signal);
+/*
+ * Enable DP transmitter and its encoder.
+ */
+void dcn35_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/*
+ * Enable DP transmitter and its encoder in MST mode.
+ */
+void dcn35_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/*
+ * Disable transmitter and its encoder.
+ */
+void dcn35_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal);
+
+/*
+ * Enable DP transmitter and its encoder for dpia port.
+ */
+void dcn35_link_encoder_enable_dpia_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy);
+
+/*
+ * Disable transmitter and its encoder for dpia port.
+ */
+void dcn35_link_encoder_disable_dpia_output(
+ struct link_encoder *enc,
+ uint8_t dpia_id,
+ uint8_t digmode);
+
#endif /* __DC_LINK_ENCODER__DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index 6ab2a218b769..fd5d1dbf9dc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn35_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -397,7 +397,7 @@ static bool enc35_is_fifo_enabled(struct stream_encoder *enc)
uint32_t reset_val;
REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
- return (reset_val == 0) ? false : true;
+ return reset_val != 0;
}
void enc35_disable_fifo(struct stream_encoder *enc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index 098c2a01a850..99aab70ef3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -32,7 +32,7 @@
#include "dcn401_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -60,7 +60,7 @@ static void enc401_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-static void enc401_stream_encoder_dvi_set_stream_attribute(
+void enc401_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -100,7 +100,7 @@ static void enc401_stream_encoder_dvi_set_stream_attribute(
}
/* setup stream encoder in hdmi mode */
-static void enc401_stream_encoder_hdmi_set_stream_attribute(
+void enc401_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
int actual_pix_clk_khz,
@@ -229,7 +229,7 @@ static void enc401_stream_encoder_hdmi_set_stream_attribute(
REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
}
-static void enc401_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
+void enc401_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -260,7 +260,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
-static void enc401_stream_encoder_dp_unblank(
+void enc401_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
@@ -376,7 +376,7 @@ static void enc401_stream_encoder_dp_unblank(
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
-static void enc401_read_state(struct stream_encoder *enc, struct enc_state *s)
+void enc401_read_state(struct stream_encoder *enc, struct enc_state *s)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -394,7 +394,7 @@ static void enc401_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
-static void enc401_stream_encoder_enable(
+void enc401_stream_encoder_enable(
struct stream_encoder *enc,
enum signal_type signal,
bool enable)
@@ -632,7 +632,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -645,6 +645,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
@@ -704,7 +705,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
}
-static void enc401_stream_encoder_map_to_link(
+void enc401_stream_encoder_map_to_link(
struct stream_encoder *enc,
uint32_t stream_enc_inst,
uint32_t link_enc_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
index d751839598f8..d6b00cd246b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
@@ -214,4 +214,27 @@ void enc401_stream_encoder_dp_set_stream_attribute(
enum dc_color_space output_color_space,
bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
+void enc401_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link);
+void enc401_stream_encoder_dp_unblank(
+ struct dc_link *link,
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param);
+void enc401_stream_encoder_enable(
+ struct stream_encoder *enc,
+ enum signal_type signal,
+ bool enable);
+void enc401_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container);
+void enc401_stream_encoder_map_to_link(
+ struct stream_encoder *enc,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst);
+void enc401_read_state(struct stream_encoder *enc, struct enc_state *s);
+void enc401_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio);
#endif /* __DC_DIO_STREAM_ENCODER_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 2e4a46f1b499..9d160b39e8c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -153,11 +153,24 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
bool enable
);
+
+bool dm_helpers_dp_write_hblank_reduction(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
+
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index f81e5a4e1d6d..fbbf9c757b3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -277,12 +277,13 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
/*
* SMU message tracing
*/
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx);
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx);
-
-#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx)
-#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx);
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx);
+#define TRACE_SMU_MSG_DELAY(msg_id, param_in, delay, ctx) dm_trace_smu_enter(msg_id, param_in, delay, ctx)
+#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_ENTER(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_EXIT(success, response, ctx) dm_trace_smu_exit(success, response, ctx)
/*
* DMUB Interfaces
@@ -291,6 +292,13 @@ bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, e
bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
/*
+ * ACPI Interfaces
+ */
+void dm_acpi_process_phy_transition_interlock(
+ const struct dc_context *ctx,
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params);
+
+/*
* Debug and verification hooks
*/
@@ -304,4 +312,6 @@ void dm_dtn_log_end(struct dc_context *ctx,
char *dce_version_to_string(const int version);
+bool dc_supports_vrr(const enum dce_version v);
+
#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index facf269c4326..3b093b8699ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -127,7 +127,7 @@ struct dm_pp_single_disp_config {
uint32_t src_height;
uint32_t src_width;
uint32_t v_refresh;
- uint32_t sym_clock; /* HDMI only */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
struct dc_link_settings link_settings; /* DP only */
};
@@ -275,4 +275,30 @@ enum dm_dmub_wait_type {
DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY,
};
+enum dm_acpi_transition_link_type {
+ hdmi_tmds,
+ hdmi_frl,
+ dp_8b_10b,
+ dp_128b_132b,
+ none
+};
+
+struct dm_process_phy_transition_init_params {
+ uint32_t phy_id;
+ uint8_t action;
+ uint32_t sym_clock_10khz;
+ enum signal_type signal;
+ enum dc_lane_count display_port_lanes_count;
+ enum dc_link_rate display_port_link_rate;
+ uint32_t transition_bitmask;
+ uint8_t hdmi_frl_num_lanes;
+};
+
+struct dm_process_phy_transition_input_params {
+ uint32_t phy_id;
+ uint32_t transition_id;
+ uint32_t phy_configuration;
+ uint32_t data_rate;
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 46f9c05de16e..b357683b4255 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -29,11 +29,15 @@ dml_ccflags := $(CC_FLAGS_FPU)
dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-frame_warn_flag := -Wframe-larger-than=3072
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ frame_warn_limit := 3072
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
@@ -110,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags)
-
ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
@@ -133,7 +134,6 @@ DML += dcn303/dcn303_fpu.o
DML += dcn314/dcn314_fpu.o
DML += dcn35/dcn35_fpu.o
DML += dcn351/dcn351_fpu.o
-DML += dcn401/dcn401_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index f1235bf9a596..74962791302f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
/*
* we want a breakdown of the various stages of validation, which the
@@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth(
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
+ if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
@@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth(
}
} else if (v->voltage_level == number_of_states_plus_one) {
BW_VAL_TRACE_SKIP(fail);
- } else if (fast_validate) {
+ } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index e9fea9c2162e..7aaf13bbd4e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,8 +30,7 @@
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"
-
-#include "link.h"
+#include "link_service.h"
#include "dcn20_fpu.h"
#include "dc_state_priv.h"
@@ -1315,7 +1314,7 @@ static void swizzle_to_dml_params(
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
@@ -1733,7 +1732,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
@@ -1780,10 +1779,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2027,7 +2026,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
}
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2040,7 +2039,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_COUNT();
- out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2050,12 +2049,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -2077,7 +2076,7 @@ validate_out:
}
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
@@ -2095,12 +2094,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- if (fast_validate) {
- return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
- }
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes);
// Best case, we support full UCLK switch latency
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
@@ -2113,7 +2111,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
@@ -2156,14 +2154,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
@@ -2239,7 +2237,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel_req,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
int vlevel, vlevel_max;
@@ -2281,10 +2279,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2319,7 +2317,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
}
bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2337,7 +2335,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2347,12 +2345,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
index b6c34198ddc8..aed00039ca62 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
@@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc,
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_calculate_wm(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
void dcn20_update_bounding_box(struct dc *dc,
@@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc,
void dcn20_patch_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb);
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes);
void dcn20_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
@@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
-bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
- fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode);
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum
+ dc_validate_mode, display_e2e_pipe_params_st *pipes);
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 390c1a77fda6..9c58ff1069d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 843d6004258c..570e6e39eb45 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 5718000627b0..f549da082c01 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -652,7 +652,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index aac0a0ae2966..e5f5c0663750 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -178,82 +178,6 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
};
-void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
- double vtotal_avg)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- double vtotal_min, vtotal_max;
- double ratio, modulo, phase;
- uint32_t vblank_start;
- uint32_t v_total_mask_value = 0;
-
- dc_assert_fp_enabled();
-
- /* Compute VTOTAL_MIN and VTOTAL_MAX, so that
- * VOTAL_MAX - VTOTAL_MIN = 1
- */
- v_total_mask_value = 16;
- vtotal_min = dcn_bw_floor(vtotal_avg);
- vtotal_max = dcn_bw_ceil(vtotal_avg);
-
- /* Check that bottom VBLANK is at least 2 lines tall when running with
- * VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
- * of lines in a frame - 1'.
- */
- REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
- &vblank_start);
- ASSERT(vtotal_min >= vblank_start + 1);
-
- /* Special case where the average frame rate can be achieved
- * without using the DTO
- */
- if (vtotal_min == vtotal_max) {
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
-
- optc->funcs->set_vtotal_min_max(optc, 0, 0);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
- REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
- return;
- }
-
- ratio = vtotal_max - vtotal_avg;
- modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
- phase = ratio * modulo;
-
- /* Special cases where the DTO phase gets rounded to 0 or
- * to DTO modulo
- */
- if (phase <= 0 || phase >= modulo) {
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
- phase <= 0 ?
- (uint32_t)vtotal_max : (uint32_t)vtotal_min);
- REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
- REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
- REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
- return;
- }
- REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 1,
- OTG_V_TOTAL_MAX_SEL, 1,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
- OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
- OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
- OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
- optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
-}
-
void dcn30_fpu_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
@@ -415,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg(
* newly found dummy_latency_index
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
@@ -706,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
index cab864095ce7..e3b6ad6a8784 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
@@ -29,9 +29,6 @@
#include "core_types.h"
#include "dcn20/dcn20_optc.h"
-void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
- double vtotal_avg);
-
void dcn30_fpu_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index cee1b351e105..8d24763938ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -281,10 +281,10 @@ static void CalculateDynamicMetadataParameters(
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
- long HTotal,
- long VBlank,
- long DynamicMetadataTransmittedBytes,
- long DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int HTotal,
+ unsigned int VBlank,
+ unsigned int DynamicMetadataTransmittedBytes,
+ int DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup,
@@ -1002,6 +1002,7 @@ static bool CalculatePrefetchSchedule(
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
@@ -3265,8 +3266,8 @@ static double CalculateWriteBackDelay(
static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK,
- double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes,
- long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
+ double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes,
+ int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks)
{
double TotalRepeaterDelayTime = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8d4873f80df0..4fb37df54d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -620,7 +620,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 0c0b2d67c9cd..1aaa77265eed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -326,7 +326,7 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
- int j;
+ int j = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
dc_assert_fp_enabled();
@@ -338,6 +338,15 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
dcn3_01_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
@@ -353,8 +362,13 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
- s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ /* Clocks independent of voltage level. */
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
s[i].dram_bw_per_chan_gbps =
dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
@@ -435,12 +449,12 @@ void dcn301_fpu_calculate_wm_and_dlg(struct dc *dc,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
+ vlevel = clamp(vlevel_req, 2, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
+ vlevel = clamp(vlevel_req, 1, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
index 8da97a96b1ce..8d7c59ec701d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
@@ -280,7 +280,7 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index e968870a4b81..b5d3fd4c3694 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -285,7 +285,7 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 17a21bcbde17..1a28061bb9ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -808,6 +808,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
+ dc_assert_fp_enabled();
+
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
@@ -815,6 +817,8 @@ int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb)
{
+ dc_assert_fp_enabled();
+
/* Roughly calculate required crb to hide latency. In practice there is slightly
* more buffer available for latency hiding
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index d2ae43a82ba5..dfcc5d50071e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate(
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* __DCN31_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index f567a9023682..ed59c77bc6f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1105,6 +1105,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index c46bda2141ac..bfeb01477f0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -615,7 +615,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 21f637ae4add..df9d50b9b57c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
dc_assert_fp_enabled();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@@ -409,6 +409,9 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
}
+ if (dc->debug.force_odm_combine_4to1)
+ context->bw_ctx.dml.ip.odm_combine_4to1_supported = true;
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
index d32c5bb99f4c..362ac79184ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -35,6 +35,6 @@
void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 5865e8fa2d8e..9f3938a50240 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1123,6 +1123,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index b7d2a0caec11..04df263ff65e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -703,7 +703,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 6f490d8d7038..8a0f128722b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -31,7 +31,7 @@
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
/* for subvp + DRR case, if subvp pipes are still present we support pstate */
if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
@@ -626,6 +626,8 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
+ !pipe->stream->hw_cursor_req &&
+ !dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
@@ -1477,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
@@ -1515,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx);
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
// Populate dppclk to trigger a recalculate in dml_get_voltage_level
// so the phantom pipe DLG params can be assigned correctly.
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
@@ -1558,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc_state_remove_phantom_streams_and_planes(dc, context);
dc_state_release_phantom_streams_and_planes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
@@ -2136,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
bool repopulate_pipes = false;
@@ -2160,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
for (i = 0; i < context->stream_count; i++)
resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -2170,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
&pipe_cnt, &repopulate_pipes))
goto validate_fail;
}
- if (fast_validate ||
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING ||
(dc->debug.dml_disallow_alternate_prefetch_modes &&
(vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
@@ -2193,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_none;
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.validate_max_state = false;
@@ -2245,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int flag_vlevel = vlevel;
int i;
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!dc->config.enable_windowed_mpo_odm)
dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
@@ -2341,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
if (is_subvp_p_drr) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
@@ -2387,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
}
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
if (vlevel_temp < vlevel) {
vlevel = vlevel_temp;
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
@@ -2408,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
stream_status->fpo_in_use = false;
}
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
}
}
}
@@ -3223,7 +3229,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
@@ -3395,7 +3401,7 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
uint32_t height = subvp_active_margin_list.res[i].height;
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ (uint64_t)pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 276e90e4e0ce..273d2bd79d85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 86ac7d59fd32..0748ef36a16a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1595,6 +1595,7 @@ double dml32_TruncToValidBPP(
unsigned int NonDSCBPP0;
unsigned int NonDSCBPP1;
unsigned int NonDSCBPP2;
+ unsigned int NonDSCBPP3 = BPP_INVALID;
if (Format == dm_420) {
NonDSCBPP0 = 12;
@@ -1603,6 +1604,7 @@ double dml32_TruncToValidBPP(
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dm_444) {
+ NonDSCBPP3 = 18;
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
NonDSCBPP2 = 36;
@@ -1667,6 +1669,8 @@ double dml32_TruncToValidBPP(
return NonDSCBPP1;
else if (MaxLinkBPP >= NonDSCBPP0)
return 16.0;
+ else if ((Output == dm_dp2p0 || Output == dm_dp) && NonDSCBPP3 != BPP_INVALID && MaxLinkBPP >= NonDSCBPP3)
+ return NonDSCBPP3; // Special case to allow 6bpc RGB for DP connections.
else
return BPP_INVALID;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 9ba6cb67655f..6c75aa82327a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
if (dual_plane) {
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx);
- ;
if (src->sw_mode == dm_sw_linear)
ASSERT(p1_pte_row_height_linear >= 8);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 8839faf42207..e0a1dc89ce43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -779,7 +779,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index beed7adbbd43..817a370e80a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -31,7 +31,7 @@
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -159,7 +159,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
+ .dscclk_mhz = 400.0,
.dtbclk_mhz = 600.0,
},
},
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
@@ -367,6 +367,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
@@ -435,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -443,8 +445,10 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@@ -496,9 +500,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
@@ -579,6 +581,8 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
unsigned int i, plane_count = 0;
DC_LOGGER_INIT(dc->ctx->logger);
+ dc_assert_fp_enabled();
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
index 067480fc3691..d121c5afce71 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
@@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index d9e63c4fdd95..77023b619f1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -10,7 +10,7 @@
#include "dml/dcn35/dcn35_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -401,6 +401,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
@@ -469,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -477,8 +478,10 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@@ -530,9 +533,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
index f93efab9a668..f71d9d8d0759 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
@@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
deleted file mode 100644
index 4fbecb5ff349..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dcn401_fpu.h"
-#include "dcn401/dcn401_resource.h"
-// We need this includes for WATERMARKS_* defines
-#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h"
-#include "link.h"
-
-#define DC_LOGGER_INIT(logger)
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
-{
- /* defaults */
- double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us;
- double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us;
- double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us;
- double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
- uint16_t setb_min_uclk_mhz = min_uclk_mhz;
- uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
-
- dc_assert_fp_enabled();
-
- /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
- if (dcfclk_mhz_for_the_second_state)
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
- else
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz)
- setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
- clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
- clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
- clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
- clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
- }
- /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
- /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
-}
-
-/*
- * dcn401_update_bw_bounding_box
- *
- * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from
- * spreadsheet with actual values as per dGPU SKU:
- * - with passed few options from dc->config
- * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
- * need to get it from PM FW)
- * - with passed latency values (passed in ns units) in dc-> bb override for
- * debugging purposes
- * - with passed latencies from VBIOS (in 100_ns units) if available for
- * certain dGPU SKU
- * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
- * of the same ASIC)
- * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
- * FW for different clocks (which might differ for certain dGPU SKU of the
- * same ASIC)
- */
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
-{
- dc_assert_fp_enabled();
-
- /* Override from passed dc->bb_overrides if available*/
- if (dc->bb_overrides.sr_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.urgent_latency_ns)
- dc->dml2_options.bbox_overrides.urgent_latency_us =
- dc->bb_overrides.urgent_latency_ns / 1000.0;
-
- if (dc->bb_overrides.dram_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (dc->bb_overrides.fclk_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.fclk_change_latency_us =
- dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
-
- /* Override from VBIOS if VBIOS bb_info available */
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
- struct bp_soc_bb_info bb_info = {0};
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- bb_info.dram_clock_change_latency_100ns * 10;
-
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- bb_info.dram_sr_exit_latency_100ns * 10;
- }
- }
-
- /* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans) {
- dc->dml2_options.bbox_overrides.dram_num_chan =
- dc->ctx->dc_bios->vram_info.num_chans;
-
- }
-
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
- dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
- dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
- dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
-
- if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
- unsigned int i = 0;
-
- dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- }
- }
- }
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
deleted file mode 100644
index 329f1788843c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DCN401_FPU_H__
-#define __DCN401_FPU_H__
-
-#include "clk_mgr.h"
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr);
-
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 412e75eb4704..12ff65b6a7e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -122,17 +122,6 @@ void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const stru
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param)
-{
- dml_print("DML_RQ_DLG_CALC: =====================================\n");
- dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
- dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_l);
- dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_c);
- dml_print("DML_RQ_DLG_CALC: =====================================\n");
-}
-
void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
index ebcd717744e5..2bc64c4081dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -35,7 +35,6 @@ void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dp
void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing);
void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param);
void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param);
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param);
void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param);
void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 072bd0539605..6b2ab4ec2b5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -66,11 +66,15 @@ static inline double dml_max5(double a, double b, double c, double d, double e)
static inline double dml_ceil(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(a, granularity);
}
static inline double dml_floor(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(a, granularity);
}
@@ -114,11 +118,15 @@ static inline double dml_ceil_2(double f)
static inline double dml_ceil_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(x, granularity);
}
static inline double dml_floor_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(x, granularity);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
deleted file mode 100644
index d9c27ebe12ee..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ /dev/null
@@ -1,135 +0,0 @@
-# SPDX-License-Identifier: MIT */
-#
-# Copyright 2023 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dml2.
-
-dml2_ccflags := $(CC_FLAGS_FPU)
-dml2_rcflags := $(CC_FLAGS_NO_FPU)
-
-ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-frame_warn_flag := -Wframe-larger-than=3072
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
-endif
-
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
-
-DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
- dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
- dml_display_rq_dlg_calc.o
-
-AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
-
-DML21 := src/dml2_top/dml2_top_interfaces.o
-DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/inc/dml2_debug.o
-DML21 += src/dml2_core/dml2_core_dcn4.o
-DML21 += src/dml2_core/dml2_core_factory.o
-DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
-DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
-DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
-DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
-DML21 += src/dml2_mcg/dml2_mcg_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
-DML21 += src/dml2_pmo/dml2_pmo_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
-DML21 += src/dml2_standalone_libraries/lib_float_math.o
-DML21 += dml21_translation_helper.o
-DML21 += dml21_wrapper.o
-DML21 += dml21_utils.o
-
-AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
deleted file mode 100644
index b2075b8c363b..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#ifndef _DML21_WRAPPER_H_
-#define _DML21_WRAPPER_H_
-
-#include "os_types.h"
-#include "dml_top_soc_parameter_types.h"
-
-struct dc;
-struct dc_state;
-struct dml2_configuration_options;
-struct dml2_context;
-
-/**
- * dml2_create - Creates dml21_context.
- * @in_dc: dc.
- * @dml2: Created dml21 context.
- * @config: dml21 configuration options.
- *
- * Create of DML21 is done as part of dc_state creation.
- * DML21 IP, SOC and STATES are initialized at
- * creation time.
- *
- * Return: True if dml2 is successfully created, false otherwise.
- */
-bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
-void dml21_destroy(struct dml2_context *dml2);
-void dml21_copy(struct dml2_context *dst_dml_ctx,
- struct dml2_context *src_dml_ctx);
-bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
- struct dml2_context *src_dml_ctx);
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
-
-/**
- * dml21_validate - Determines if a display configuration is supported or not.
- * @in_dc: dc.
- * @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
- *
- * Based on fast_validate option internally would call:
- *
- * -dml21_mode_check_and_programming - for non fast_validate option
- * Calculates if dc_state can be supported on the input display
- * configuration. If supported, generates the necessary HW
- * programming for the new dc_state.
- *
- * -dml21_check_mode_support - for fast_validate option
- * Calculates if dc_state can be supported for the input display
- * config.
-
- * Context: Two threads may not invoke this function concurrently unless they reference
- * separate dc_states for validation.
- * Return: True if mode is supported, false otherwise.
- */
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate);
-
-/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
-void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
-
-/* Structure for inputting external SOCBB and DCNIP values for tool based debugging. */
-struct socbb_ip_params_external {
- struct dml2_ip_capabilities ip_params;
- struct dml2_soc_bb soc_bb;
-};
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
deleted file mode 100644
index d82c681a5402..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Copyright 2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DML_DML_DCN3_SOC_BB__
-#define __DML_DML_DCN3_SOC_BB__
-
-#include "dml_top_soc_parameter_types.h"
-
-static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
- .derate_table = {
- .system_active_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .system_active_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .dcn_mall_prefetch_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .dcn_mall_prefetch_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .system_idle_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 100,
- },
- },
- .writeback = {
- .base_latency_us = 12,
- .scaling_factor_us = 0,
- .scaling_factor_mhz = 0,
- },
- .qos_params = {
- .dcn4x = {
- .df_qos_response_time_fclk_cycles = 300,
- .max_round_trip_to_furthest_cs_fclk_cycles = 350,
- .mall_overhead_fclk_cycles = 50,
- .meta_trip_adder_fclk_cycles = 36,
- .average_transport_distance_fclk_cycles = 257,
- .umc_urgent_ramp_latency_margin = 50,
- .umc_max_latency_margin = 30,
- .umc_average_latency_margin = 20,
- .fabric_max_transport_latency_margin = 20,
- .fabric_average_transport_latency_margin = 10,
-
- .per_uclk_dpm_params = {
- {
- .minimum_uclk_khz = 97,
- .urgent_ramp_uclk_cycles = 472,
- .trip_to_memory_uclk_cycles = 827,
- .meta_trip_to_memory_uclk_cycles = 827,
- .maximum_latency_when_urgent_uclk_cycles = 72,
- .average_latency_when_urgent_uclk_cycles = 61,
- .maximum_latency_when_non_urgent_uclk_cycles = 827,
- .average_latency_when_non_urgent_uclk_cycles = 118,
- },
- {
- .minimum_uclk_khz = 435,
- .urgent_ramp_uclk_cycles = 546,
- .trip_to_memory_uclk_cycles = 848,
- .meta_trip_to_memory_uclk_cycles = 848,
- .maximum_latency_when_urgent_uclk_cycles = 146,
- .average_latency_when_urgent_uclk_cycles = 90,
- .maximum_latency_when_non_urgent_uclk_cycles = 848,
- .average_latency_when_non_urgent_uclk_cycles = 135,
- },
- {
- .minimum_uclk_khz = 731,
- .urgent_ramp_uclk_cycles = 632,
- .trip_to_memory_uclk_cycles = 874,
- .meta_trip_to_memory_uclk_cycles = 874,
- .maximum_latency_when_urgent_uclk_cycles = 232,
- .average_latency_when_urgent_uclk_cycles = 124,
- .maximum_latency_when_non_urgent_uclk_cycles = 874,
- .average_latency_when_non_urgent_uclk_cycles = 155,
- },
- {
- .minimum_uclk_khz = 1187,
- .urgent_ramp_uclk_cycles = 716,
- .trip_to_memory_uclk_cycles = 902,
- .meta_trip_to_memory_uclk_cycles = 902,
- .maximum_latency_when_urgent_uclk_cycles = 316,
- .average_latency_when_urgent_uclk_cycles = 160,
- .maximum_latency_when_non_urgent_uclk_cycles = 902,
- .average_latency_when_non_urgent_uclk_cycles = 177,
- },
- },
- },
- },
- .qos_type = dml2_qos_param_type_dcn4x,
-};
-
-static const struct dml2_soc_bb dml2_socbb_dcn31 = {
- .clk_table = {
- .uclk = {
- .clk_values_khz = {97000, 435000, 731000, 1187000},
- .num_clk_values = 4,
- },
- .fclk = {
- .clk_values_khz = {300000, 2500000},
- .num_clk_values = 2,
- },
- .dcfclk = {
- .clk_values_khz = {200000, 1800000},
- .num_clk_values = 2,
- },
- .dispclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .dppclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .dtbclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .phyclk = {
- .clk_values_khz = {810000, 810000},
- .num_clk_values = 2,
- },
- .socclk = {
- .clk_values_khz = {300000, 1600000},
- .num_clk_values = 2,
- },
- .dscclk = {
- .clk_values_khz = {666667, 666667},
- .num_clk_values = 2,
- },
- .phyclk_d18 = {
- .clk_values_khz = {625000, 625000},
- .num_clk_values = 2,
- },
- .phyclk_d32 = {
- .clk_values_khz = {2000000, 2000000},
- .num_clk_values = 2,
- },
- .dram_config = {
- .channel_width_bytes = 2,
- .channel_count = 16,
- .transactions_per_clock = 16,
- },
- },
-
- .qos_parameters = {
- .derate_table = {
- .system_active_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .system_active_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .dcn_mall_prefetch_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .dcn_mall_prefetch_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .system_idle_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 100,
- },
- },
- .writeback = {
- .base_latency_us = 0,
- .scaling_factor_us = 0,
- .scaling_factor_mhz = 0,
- },
- .qos_params = {
- .dcn4x = {
- .df_qos_response_time_fclk_cycles = 300,
- .max_round_trip_to_furthest_cs_fclk_cycles = 350,
- .mall_overhead_fclk_cycles = 50,
- .meta_trip_adder_fclk_cycles = 36,
- .average_transport_distance_fclk_cycles = 260,
- .umc_urgent_ramp_latency_margin = 50,
- .umc_max_latency_margin = 30,
- .umc_average_latency_margin = 20,
- .fabric_max_transport_latency_margin = 20,
- .fabric_average_transport_latency_margin = 10,
-
- .per_uclk_dpm_params = {
- {
- // State 1
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 472,
- .trip_to_memory_uclk_cycles = 827,
- .meta_trip_to_memory_uclk_cycles = 827,
- .maximum_latency_when_urgent_uclk_cycles = 72,
- .average_latency_when_urgent_uclk_cycles = 72,
- .maximum_latency_when_non_urgent_uclk_cycles = 827,
- .average_latency_when_non_urgent_uclk_cycles = 117,
- },
- {
- // State 2
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 546,
- .trip_to_memory_uclk_cycles = 848,
- .meta_trip_to_memory_uclk_cycles = 848,
- .maximum_latency_when_urgent_uclk_cycles = 146,
- .average_latency_when_urgent_uclk_cycles = 146,
- .maximum_latency_when_non_urgent_uclk_cycles = 848,
- .average_latency_when_non_urgent_uclk_cycles = 133,
- },
- {
- // State 3
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 564,
- .trip_to_memory_uclk_cycles = 853,
- .meta_trip_to_memory_uclk_cycles = 853,
- .maximum_latency_when_urgent_uclk_cycles = 164,
- .average_latency_when_urgent_uclk_cycles = 164,
- .maximum_latency_when_non_urgent_uclk_cycles = 853,
- .average_latency_when_non_urgent_uclk_cycles = 136,
- },
- {
- // State 4
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 613,
- .trip_to_memory_uclk_cycles = 869,
- .meta_trip_to_memory_uclk_cycles = 869,
- .maximum_latency_when_urgent_uclk_cycles = 213,
- .average_latency_when_urgent_uclk_cycles = 213,
- .maximum_latency_when_non_urgent_uclk_cycles = 869,
- .average_latency_when_non_urgent_uclk_cycles = 149,
- },
- {
- // State 5
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 632,
- .trip_to_memory_uclk_cycles = 874,
- .meta_trip_to_memory_uclk_cycles = 874,
- .maximum_latency_when_urgent_uclk_cycles = 232,
- .average_latency_when_urgent_uclk_cycles = 232,
- .maximum_latency_when_non_urgent_uclk_cycles = 874,
- .average_latency_when_non_urgent_uclk_cycles = 153,
- },
- {
- // State 6
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 665,
- .trip_to_memory_uclk_cycles = 885,
- .meta_trip_to_memory_uclk_cycles = 885,
- .maximum_latency_when_urgent_uclk_cycles = 265,
- .average_latency_when_urgent_uclk_cycles = 265,
- .maximum_latency_when_non_urgent_uclk_cycles = 885,
- .average_latency_when_non_urgent_uclk_cycles = 161,
- },
- {
- // State 7
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 689,
- .trip_to_memory_uclk_cycles = 895,
- .meta_trip_to_memory_uclk_cycles = 895,
- .maximum_latency_when_urgent_uclk_cycles = 289,
- .average_latency_when_urgent_uclk_cycles = 289,
- .maximum_latency_when_non_urgent_uclk_cycles = 895,
- .average_latency_when_non_urgent_uclk_cycles = 167,
- },
- {
- // State 8
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 716,
- .trip_to_memory_uclk_cycles = 902,
- .meta_trip_to_memory_uclk_cycles = 902,
- .maximum_latency_when_urgent_uclk_cycles = 316,
- .average_latency_when_urgent_uclk_cycles = 316,
- .maximum_latency_when_non_urgent_uclk_cycles = 902,
- .average_latency_when_non_urgent_uclk_cycles = 174,
- },
- },
- },
- },
- .qos_type = dml2_qos_param_type_dcn4x,
- },
-
- .power_management_parameters = {
- .dram_clk_change_blackout_us = 400,
- .fclk_change_blackout_us = 0,
- .g7_ppt_blackout_us = 0,
- .stutter_enter_plus_exit_latency_us = 50,
- .stutter_exit_latency_us = 43,
- .z8_stutter_enter_plus_exit_latency_us = 0,
- .z8_stutter_exit_latency_us = 0,
- },
-
- .vmin_limit = {
- .dispclk_khz = 600 * 1000,
- },
-
- .dprefclk_mhz = 700,
- .xtalclk_mhz = 100,
- .pcie_refclk_mhz = 100,
- .dchub_refclk_mhz = 50,
- .mall_allocated_for_dcn_mbytes = 64,
- .max_outstanding_reqs = 512,
- .fabric_datapath_to_dcn_data_return_bytes = 64,
- .return_bus_width_bytes = 64,
- .hostvm_min_page_size_kbytes = 0,
- .gpuvm_min_page_size_kbytes = 256,
- .phy_downspread_percent = 0,
- .dcn_downspread_percent = 0,
- .dispclk_dppclk_vco_speed_mhz = 4500,
- .do_urgent_latency_adjustment = 0,
- .mem_word_bytes = 32,
- .num_dcc_mcaches = 8,
- .mcache_size_bytes = 2048,
- .mcache_line_size_bytes = 32,
- .max_fclk_for_uclk_dpm_khz = 1250 * 1000,
-};
-
-static const struct dml2_ip_capabilities dml2_dcn31_max_ip_caps = {
- .pipe_count = 4,
- .otg_count = 4,
- .num_dsc = 4,
- .max_num_dp2p0_streams = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_dp2p0_outputs = 4,
- .rob_buffer_size_kbytes = 192,
- .config_return_buffer_size_in_kbytes = 1152,
- .meta_fifo_size_in_kentries = 22,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .subvp_drr_scheduling_margin_us = 100,
- .subvp_prefetch_end_to_mall_start_us = 15,
- .subvp_fw_processing_delay = 15,
-
- .fams2 = {
- .max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 50,
- .vertical_interrupt_ack_delay_us = 18,
- .allow_programming_delay_us = 18,
- .min_allow_width_us = 20,
- .subvp_df_throttle_delay_us = 100,
- .subvp_programming_delay_us = 18,
- .subvp_prefetch_to_mall_delay_us = 18,
- .drr_programming_delay_us = 18,
- },
-};
-
-#endif /* __DML_DML_DCN3_SOC_BB__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
deleted file mode 100644
index 8f3c1c0b1cc1..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
+++ /dev/null
@@ -1,12413 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#include "dml2_internal_shared_types.h"
-#include "dml2_core_shared.h"
-#include "dml2_debug.h"
-#include "lib_float_math.h"
-
-double dml2_core_shared_div_rem(double dividend, unsigned int divisor, unsigned int *remainder)
-{
- *remainder = ((dividend / divisor) - (int)(dividend / divisor) > 0);
- return dividend / divisor;
-
-}
-
-/*
- * START OF STATIC HELPERS
- * These static methods are baseline implemenations from DCN4. These should NEVER
- * be modified when developing new DCNs. New DCN code should replace the static helpers
- * using the function pointer pattern.
- */
-
-static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only);
-static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg);
-static unsigned int dml_round_to_multiple(unsigned int num, unsigned int multiple, bool up);
-static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info);
-static void dml_calc_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane);
-static bool dml_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
-static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int pipe_idx);
-static void CalculateMaxDETAndMinCompressedBufferSize(unsigned int ConfigReturnBufferSizeInKByte,
- unsigned int ConfigReturnBufferSegmentSizeInKByte,
- unsigned int ROBBufferSizeInKByte,
- unsigned int MaxNumDPP,
- unsigned int nomDETInKByteOverrideEnable, // VBA_DELTA, allow DV to override default DET size
- unsigned int nomDETInKByteOverrideValue, // VBA_DELTA
- bool is_mrq_present,
-
- // Output
- unsigned int *MaxTotalDETInKByte,
- unsigned int *nomDETInKByte,
- unsigned int *MinCompressedBufferSizeInKByte);
- static void PixelClockAdjustmentForProgressiveToInterlaceUnit(const struct dml2_display_cfg *display_cfg, bool ptoi_supported, double *PixelClockBackEnd);
-static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode);
-static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan);
-static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode);
-static void CalculateBytePerPixelAndBlockSizes(enum dml2_source_format_class SourcePixelFormat,
- enum dml2_swizzle_mode SurfaceTiling,
- unsigned int pitch_y,
- unsigned int pitch_c,
-
- // Output
- unsigned int *BytePerPixelY,
- unsigned int *BytePerPixelC,
- double *BytePerPixelDETY,
- double *BytePerPixelDETC,
- unsigned int *BlockHeight256BytesY,
- unsigned int *BlockHeight256BytesC,
- unsigned int *BlockWidth256BytesY,
- unsigned int *BlockWidth256BytesC,
- unsigned int *MacroTileHeightY,
- unsigned int *MacroTileHeightC,
- unsigned int *MacroTileWidthY,
- unsigned int *MacroTileWidthC,
- bool *surf_linear128_l,
- bool *surf_linear128_c);
-static void CalculateSinglePipeDPPCLKAndSCLThroughput(
- double HRatio,
- double HRatioChroma,
- double VRatio,
- double VRatioChroma,
- double MaxDCHUBToPSCLThroughput,
- double MaxPSCLToLBThroughput,
- double PixelClock,
- enum dml2_source_format_class SourcePixelFormat,
- unsigned int HTaps,
- unsigned int HTapsChroma,
- unsigned int VTaps,
- unsigned int VTapsChroma,
-
- // Output
- double *PSCL_THROUGHPUT,
- double *PSCL_THROUGHPUT_CHROMA,
- double *DPPCLKUsingSingleDPP);
-static void CalculateSwathWidth(
- const struct dml2_display_cfg *display_cfg,
- bool ForceSingleDPP,
- unsigned int NumberOfActiveSurfaces,
- enum dml2_odm_mode ODMMode[],
- unsigned int BytePerPixY[],
- unsigned int BytePerPixC[],
- unsigned int Read256BytesBlockHeightY[],
- unsigned int Read256BytesBlockHeightC[],
- unsigned int Read256BytesBlockWidthY[],
- unsigned int Read256BytesBlockWidthC[],
- bool surf_linear128_l[],
- bool surf_linear128_c[],
- unsigned int DPPPerSurface[],
-
- // Output
- unsigned int req_per_swath_ub_l[],
- unsigned int req_per_swath_ub_c[],
- unsigned int SwathWidthSingleDPPY[],
- unsigned int SwathWidthSingleDPPC[],
- unsigned int SwathWidthY[], // per-pipe
- unsigned int SwathWidthC[], // per-pipe
- unsigned int MaximumSwathHeightY[],
- unsigned int MaximumSwathHeightC[],
- unsigned int swath_width_luma_ub[], // per-pipe
- unsigned int swath_width_chroma_ub[]); // per-pipe
-static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsigned int TotalNumberOfActiveDPP, bool NoChromaOrLinear);
-static void CalculateDETBufferSize(struct dml2_core_shared_calculate_det_buffer_size_params *p);
-static double CalculateRequiredDispclk(enum dml2_odm_mode ODMMode, double PixelClock);
-static double TruncToValidBPP(
- struct dml2_core_shared_TruncToValidBPP_locals *l,
- double LinkBitRate,
- unsigned int Lanes,
- unsigned int HTotal,
- unsigned int HActive,
- double PixelClock,
- double DesiredBPP,
- bool DSCEnable,
- enum dml2_output_encoder_class Output,
- enum dml2_output_format_class Format,
- unsigned int DSCInputBitPerComponent,
- unsigned int DSCSlices,
- unsigned int AudioRate,
- unsigned int AudioLayout,
- enum dml2_odm_mode ODMModeNoDSC,
- enum dml2_odm_mode ODMModeDSC,
-
- // Output
- unsigned int *RequiredSlots);
-static unsigned int dscceComputeDelay(
- unsigned int bpc,
- double BPP,
- unsigned int sliceWidth,
- unsigned int numSlices,
- enum dml2_output_format_class pixelFormat,
- enum dml2_output_encoder_class Output);
-static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, enum dml2_output_encoder_class Output);
-static unsigned int CalculateHostVMDynamicLevels(bool GPUVMEnable, bool HostVMEnable, unsigned int HostVMMinPageSize, unsigned int HostVMMaxNonCachedPageTableLevels);
-static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_and_row_bytes_params *p);
-static unsigned int CalculatePrefetchSourceLines(
- double VRatio,
- unsigned int VTaps,
- bool Interlace,
- bool ProgressiveToInterlaceUnitInOPP,
- unsigned int SwathHeight,
- enum dml2_rotation_angle RotationAngle,
- bool mirrored,
- bool ViewportStationary,
- unsigned int SwathWidth,
- unsigned int ViewportHeight,
- unsigned int ViewportXStart,
- unsigned int ViewportYStart,
-
- // Output
- unsigned int *VInitPreFill,
- unsigned int *MaxNumSwath);
-static void CalculateRowBandwidth(
- bool GPUVMEnable,
- bool use_one_row_for_frame,
- enum dml2_source_format_class SourcePixelFormat,
- double VRatio,
- double VRatioChroma,
- bool DCCEnable,
- double LineTime,
- unsigned int PixelPTEBytesPerRowLuma,
- unsigned int PixelPTEBytesPerRowChroma,
- unsigned int dpte_row_height_luma,
- unsigned int dpte_row_height_chroma,
-
- bool mrq_present,
- unsigned int meta_row_bytes_per_row_ub_l,
- unsigned int meta_row_bytes_per_row_ub_c,
- unsigned int meta_row_height_luma,
- unsigned int meta_row_height_chroma,
-
- // Output
- double *dpte_row_bw,
- double *meta_row_bw);
-static void CalculateMALLUseForStaticScreen(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MALLAllocatedForDCN,
- unsigned int SurfaceSizeInMALL[],
- bool one_row_per_frame_fits_in_buffer[],
-
- // Output
- bool is_using_mall_for_ss[]);
-static void CalculateDCCConfiguration(
- bool DCCEnabled,
- bool DCCProgrammingAssumesScanDirectionUnknown,
- enum dml2_source_format_class SourcePixelFormat,
- unsigned int SurfaceWidthLuma,
- unsigned int SurfaceWidthChroma,
- unsigned int SurfaceHeightLuma,
- unsigned int SurfaceHeightChroma,
- unsigned int nomDETInKByte,
- unsigned int RequestHeight256ByteLuma,
- unsigned int RequestHeight256ByteChroma,
- enum dml2_swizzle_mode TilingFormat,
- unsigned int BytePerPixelY,
- unsigned int BytePerPixelC,
- double BytePerPixelDETY,
- double BytePerPixelDETC,
- enum dml2_rotation_angle RotationAngle,
-
- // Output
- enum dml2_core_internal_request_type *RequestLuma,
- enum dml2_core_internal_request_type *RequestChroma,
- unsigned int *MaxUncompressedBlockLuma,
- unsigned int *MaxUncompressedBlockChroma,
- unsigned int *MaxCompressedBlockLuma,
- unsigned int *MaxCompressedBlockChroma,
- unsigned int *IndependentBlockLuma,
- unsigned int *IndependentBlockChroma);
-static void calculate_mcache_row_bytes(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_calculate_mcache_row_bytes_params *p);
-static void calculate_mcache_setting(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_calculate_mcache_setting_params *p);
-static void calculate_mall_bw_overhead_factor(
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int num_active_planes);
-static double dml_get_return_bandwidth_available(
- const struct dml2_soc_bb *soc,
- enum dml2_core_internal_soc_state_type state_type,
- enum dml2_core_internal_bw_type bw_type,
- bool is_avg_bw,
- bool is_hvm_en,
- bool is_hvm_only,
- double dcflk_mhz,
- double fclk_mhz,
- double dram_bw_mbps);
-static void calculate_bandwidth_available(
- double avg_bandwidth_available_min[dml2_core_internal_soc_state_max],
- double avg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available_min[dml2_core_internal_soc_state_max], // min between SDP and DRAM
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_max],
- double urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_max],
-
- const struct dml2_soc_bb *soc,
- bool HostVMEnable,
- double dcfclk_mhz,
- double fclk_mhz,
- double dram_bw_mbps);
-static void calculate_avg_bandwidth_required(
- double avg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int num_active_planes,
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double cursor_bw[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double mall_prefetch_dram_overhead_factor[],
- double mall_prefetch_sdp_overhead_factor[]);
-static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculateVMRowAndSwath_params *p);
-static double CalculateUrgentLatency(
- double UrgentLatencyPixelDataOnly,
- double UrgentLatencyPixelMixedWithVMData,
- double UrgentLatencyVMDataOnly,
- bool DoUrgentLatencyAdjustment,
- double UrgentLatencyAdjustmentFabricClockComponent,
- double UrgentLatencyAdjustmentFabricClockReference,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int urgent_ramp_uclk_cycles,
- unsigned int df_qos_response_time_fclk_cycles,
- unsigned int max_round_trip_to_furthest_cs_fclk_cycles,
- unsigned int mall_overhead_fclk_cycles,
- double umc_urgent_ramp_latency_margin,
- double fabric_max_transport_latency_margin);
-static double CalculateTripToMemory(
- double UrgLatency,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int trip_to_memory_uclk_cycles,
- unsigned int max_round_trip_to_furthest_cs_fclk_cycles,
- unsigned int mall_overhead_fclk_cycles,
- double umc_max_latency_margin,
- double fabric_max_transport_latency_margin);
-static double CalculateMetaTripToMemory(
- double UrgLatency,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int meta_trip_to_memory_uclk_cycles,
- unsigned int meta_trip_to_memory_fclk_cycles,
- double umc_max_latency_margin,
- double fabric_max_transport_latency_margin);
-static void calculate_cursor_req_attributes(
- unsigned int cursor_width,
- unsigned int cursor_bpp,
-
- // output
- unsigned int *cursor_lines_per_chunk,
- unsigned int *cursor_bytes_per_line,
- unsigned int *cursor_bytes_per_chunk,
- unsigned int *cursor_bytes);
-static void calculate_cursor_urgent_burst_factor(
- unsigned int CursorBufferSize,
- unsigned int CursorWidth,
- unsigned int cursor_bytes_per_chunk,
- unsigned int cursor_lines_per_chunk,
- double LineTime,
- double UrgentLatency,
-
- double *UrgentBurstFactorCursor,
- bool *NotEnoughUrgentLatencyHiding);
-static void CalculateUrgentBurstFactor(
- const struct dml2_plane_parameters *plane_cfg,
- unsigned int swath_width_luma_ub,
- unsigned int swath_width_chroma_ub,
- unsigned int SwathHeightY,
- unsigned int SwathHeightC,
- double LineTime,
- double UrgentLatency,
- double VRatio,
- double VRatioC,
- double BytePerPixelInDETY,
- double BytePerPixelInDETC,
- unsigned int DETBufferSizeY,
- unsigned int DETBufferSizeC,
- // Output
- double *UrgentBurstFactorLuma,
- double *UrgentBurstFactorChroma,
- bool *NotEnoughUrgentLatencyHiding);
-static void CalculateDCFCLKDeepSleep(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int BytePerPixelY[],
- unsigned int BytePerPixelC[],
- unsigned int SwathWidthY[],
- unsigned int SwathWidthC[],
- unsigned int DPPPerSurface[],
- double PSCL_THROUGHPUT[],
- double PSCL_THROUGHPUT_CHROMA[],
- double Dppclk[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- unsigned int ReturnBusWidth,
-
- // Output
- double *DCFClkDeepSleep);
-static double CalculateWriteBackDelay(
- enum dml2_source_format_class WritebackPixelFormat,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackVTaps,
- unsigned int WritebackDestinationWidth,
- unsigned int WritebackDestinationHeight,
- unsigned int WritebackSourceHeight,
- unsigned int HTotal);
-static unsigned int CalculateMaxVStartup(
- bool ptoi_supported,
- unsigned int vblank_nom_default_us,
- const struct dml2_timing_cfg *timing,
- double write_back_delay_us);
-static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *p);
-static void CalculateODMMode(
- unsigned int MaximumPixelsPerLinePerDSCUnit,
- unsigned int HActive,
- enum dml2_output_encoder_class Output,
- enum dml2_odm_mode ODMUse,
- double MaxDispclk,
- bool DSCEnable,
- unsigned int TotalNumberOfActiveDPP,
- unsigned int MaxNumDPP,
- double PixelClock,
-
- // Output
- bool *TotalAvailablePipesSupport,
- unsigned int *NumberOfDPP,
- enum dml2_odm_mode *ODMMode,
- double *RequiredDISPCLKPerSurface);
-static void CalculateOutputLink(
- struct dml2_core_internal_scratch *s,
- double PHYCLK,
- double PHYCLKD18,
- double PHYCLKD32,
- double Downspreading,
- bool IsMainSurfaceUsingTheIndicatedTiming,
- enum dml2_output_encoder_class Output,
- enum dml2_output_format_class OutputFormat,
- unsigned int HTotal,
- unsigned int HActive,
- double PixelClockBackEnd,
- double ForcedOutputLinkBPP,
- unsigned int DSCInputBitPerComponent,
- unsigned int NumberOfDSCSlices,
- double AudioSampleRate,
- unsigned int AudioSampleLayout,
- enum dml2_odm_mode ODMModeNoDSC,
- enum dml2_odm_mode ODMModeDSC,
- enum dml2_dsc_enable_option DSCEnable,
- unsigned int OutputLinkDPLanes,
- enum dml2_output_link_dp_rate OutputLinkDPRate,
-
- // Output
- bool *RequiresDSC,
- bool *RequiresFEC,
- double *OutBpp,
- enum dml2_core_internal_output_type *OutputType,
- enum dml2_core_internal_output_type_rate *OutputRate,
- unsigned int *RequiredSlots);
-static double CalculateWriteBackDISPCLK(
- enum dml2_source_format_class WritebackPixelFormat,
- double PixelClock,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackHTaps,
- unsigned int WritebackVTaps,
- unsigned int WritebackSourceWidth,
- unsigned int WritebackDestinationWidth,
- unsigned int HTotal,
- unsigned int WritebackLineBufferSize);
-static double RequiredDTBCLK(
- bool DSCEnable,
- double PixelClock,
- enum dml2_output_format_class OutputFormat,
- double OutputBpp,
- unsigned int DSCSlices,
- unsigned int HTotal,
- unsigned int HActive,
- unsigned int AudioRate,
- unsigned int AudioLayout);
-static unsigned int DSCDelayRequirement(
- bool DSCEnabled,
- enum dml2_odm_mode ODMMode,
- unsigned int DSCInputBitPerComponent,
- double OutputBpp,
- unsigned int HActive,
- unsigned int HTotal,
- unsigned int NumberOfDSCSlices,
- enum dml2_output_format_class OutputFormat,
- enum dml2_output_encoder_class Output,
- double PixelClock,
- double PixelClockBackEnd);
-static void CalculateSurfaceSizeInMall(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MALLAllocatedForDCN,
- unsigned int BytesPerPixelY[],
- unsigned int BytesPerPixelC[],
- unsigned int Read256BytesBlockWidthY[],
- unsigned int Read256BytesBlockWidthC[],
- unsigned int Read256BytesBlockHeightY[],
- unsigned int Read256BytesBlockHeightC[],
- unsigned int ReadBlockWidthY[],
- unsigned int ReadBlockWidthC[],
- unsigned int ReadBlockHeightY[],
- unsigned int ReadBlockHeightC[],
-
- // Output
- unsigned int SurfaceSizeInMALL[],
- bool *ExceededMALLSize);
-static void calculate_tdlut_setting(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_calculate_tdlut_setting_params *p);
-static void CalculateTarb(
- const struct dml2_display_cfg *display_cfg,
- unsigned int PixelChunkSizeInKByte,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- unsigned int dpte_group_bytes[],
- unsigned int tdlut_bytes_per_group[],
- double HostVMInefficiencyFactor,
- double HostVMInefficiencyFactorPrefetch,
- unsigned int HostVMMinPageSize,
- double ReturnBW,
-
- unsigned int MetaChunkSize,
-
- // output
- double *Tarb,
- double *Tarb_prefetch);
-static double CalculateTWait(long reserved_vblank_time_ns, double UrgentLatency, double Ttrip);
-static void CalculateVUpdateAndDynamicMetadataParameters(
- unsigned int MaxInterDCNTileRepeaters,
- double Dppclk,
- double Dispclk,
- double DCFClkDeepSleep,
- double PixelClock,
- unsigned int HTotal,
- unsigned int VBlank,
- unsigned int DynamicMetadataTransmittedBytes,
- unsigned int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int InterlaceEnable,
- bool ProgressiveToInterlaceUnitInOPP,
-
- // Output
- double *TSetup,
- double *Tdmbf,
- double *Tdmec,
- double *Tdmsks,
- unsigned int *VUpdateOffsetPix,
- unsigned int *VUpdateWidthPix,
- unsigned int *VReadyOffsetPix);
-static double get_urgent_bandwidth_required(
- struct dml2_core_shared_get_urgent_bandwidth_required_locals *l,
- const struct dml2_display_cfg *display_cfg,
- enum dml2_core_internal_soc_state_type state_type,
- enum dml2_core_internal_bw_type bw_type,
- bool inc_flip_bw, // including flip bw
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double dcc_dram_bw_pref_overhead_factor_p0[],
- double dcc_dram_bw_pref_overhead_factor_p1[],
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double PrefetchBandwidthLuma[],
- double PrefetchBandwidthChroma[],
- double cursor_bw[],
- double dpte_row_bw[],
- double meta_row_bw[],
- double prefetch_cursor_bw[],
- double prefetch_vmrow_bw[],
- double flip_bw[],
- double UrgentBurstFactorLuma[],
- double UrgentBurstFactorChroma[],
- double UrgentBurstFactorCursor[],
- double UrgentBurstFactorLumaPre[],
- double UrgentBurstFactorChromaPre[],
- double UrgentBurstFactorCursorPre[]);
-static void CalculateExtraLatency(
- const struct dml2_display_cfg *display_cfg,
- unsigned int ROBBufferSizeInKByte,
- unsigned int RoundTripPingLatencyCycles,
- unsigned int ReorderingBytes,
- double DCFCLK,
- double FabricClock,
- unsigned int PixelChunkSizeInKByte,
- double ReturnBW,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- unsigned int dpte_group_bytes[],
- unsigned int tdlut_bytes_per_group[],
- double HostVMInefficiencyFactor,
- double HostVMInefficiencyFactorPrefetch,
- unsigned int HostVMMinPageSize,
- enum dml2_qos_param_type qos_type,
- bool max_oustanding_when_urgent_expected,
- unsigned int max_outstanding_requests,
- unsigned int request_size_bytes_luma[],
- unsigned int request_size_bytes_chroma[],
- unsigned int MetaChunkSize,
- unsigned int dchub_arb_to_ret_delay,
- double Ttrip,
- unsigned int hostvm_mode,
-
- // output
- double *ExtraLatency, // Tex
- double *ExtraLatency_sr, // Tex_sr
- double *ExtraLatencyPrefetch);
-static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculatePrefetchSchedule_params *p);
-static void calculate_peak_bandwidth_required(
- struct dml2_core_internal_scratch *s,
-
- // output
- double urg_vactive_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int inc_flip_bw,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double dcc_dram_bw_pref_overhead_factor_p0[],
- double dcc_dram_bw_pref_overhead_factor_p1[],
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double PrefetchBandwidthLuma[],
- double PrefetchBandwidthChroma[],
- double cursor_bw[],
- double dpte_row_bw[],
- double meta_row_bw[],
- double prefetch_cursor_bw[],
- double prefetch_vmrow_bw[],
- double flip_bw[],
- double UrgentBurstFactorLuma[],
- double UrgentBurstFactorChroma[],
- double UrgentBurstFactorCursor[],
- double UrgentBurstFactorLumaPre[],
- double UrgentBurstFactorChromaPre[],
- double UrgentBurstFactorCursorPre[]);
-static void check_urgent_bandwidth_support(
- double *frac_urg_bandwidth_nom,
- double *frac_urg_bandwidth_mall,
- bool *vactive_bandwidth_support_ok, // vactive ok
- bool *bandwidth_support_ok, // max of vm, prefetch, vactive all ok
-
- unsigned int mall_allocated_for_dcn_mbytes,
- double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_vactive_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]);
-static double get_bandwidth_available_for_immediate_flip(
- enum dml2_core_internal_soc_state_type eval_state,
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max], // no flip
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]);
-static void calculate_immediate_flip_bandwidth_support(
- // Output
- double *frac_urg_bandwidth_flip,
- bool *flip_bandwidth_support_ok,
-
- // Input
- enum dml2_core_internal_soc_state_type eval_state,
- double urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]);
-static void CalculateFlipSchedule(
- struct dml2_core_internal_scratch *s,
- bool iflip_enable,
- bool use_lb_flip_bw,
- double HostVMInefficiencyFactor,
- double Tvm_trips_flip,
- double Tr0_trips_flip,
- double Tvm_trips_flip_rounded,
- double Tr0_trips_flip_rounded,
- bool GPUVMEnable,
- double vm_bytes, // vm_bytes
- double DPTEBytesPerRow, // dpte_row_bytes
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum dml2_source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw_flip,
- unsigned int dpte_row_height,
- unsigned int dpte_row_height_chroma,
- bool use_one_row_for_frame_flip,
- unsigned int max_flip_time_us,
- unsigned int per_pipe_flip_bytes,
- unsigned int meta_row_bytes,
- unsigned int meta_row_height,
- unsigned int meta_row_height_chroma,
- bool dcc_mrq_enable,
-
- // Output
- double *dst_y_per_vm_flip,
- double *dst_y_per_row_flip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
-static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *p);
-static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config);
-static double dram_bw_kbps_to_uclk_mhz(unsigned long long bw_kbps, const struct dml2_dram_params *dram_config);
-static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params);
-static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table);
-static unsigned int get_pipe_flip_bytes(
- double hostvm_inefficiency_factor,
- unsigned int vm_bytes,
- unsigned int dpte_row_bytes,
- unsigned int meta_row_bytes);
-static void calculate_hostvm_inefficiency_factor(
- double *HostVMInefficiencyFactor,
- double *HostVMInefficiencyFactorPrefetch,
-
- bool gpuvm_enable,
- bool hostvm_enable,
- unsigned int remote_iommu_outstanding_translations,
- unsigned int max_outstanding_reqs,
- double urg_bandwidth_avail_active_pixel_and_vm,
- double urg_bandwidth_avail_active_vm_only);
-static void CalculatePixelDeliveryTimes(
- const struct dml2_display_cfg *display_cfg,
- const struct core_display_cfg_support_info *cfg_support_info,
- unsigned int NumberOfActiveSurfaces,
- double VRatioPrefetchY[],
- double VRatioPrefetchC[],
- unsigned int swath_width_luma_ub[],
- unsigned int swath_width_chroma_ub[],
- double PSCL_THROUGHPUT[],
- double PSCL_THROUGHPUT_CHROMA[],
- double Dppclk[],
- unsigned int BytePerPixelC[],
- unsigned int req_per_swath_ub_l[],
- unsigned int req_per_swath_ub_c[],
-
- // Output
- double DisplayPipeLineDeliveryTimeLuma[],
- double DisplayPipeLineDeliveryTimeChroma[],
- double DisplayPipeLineDeliveryTimeLumaPrefetch[],
- double DisplayPipeLineDeliveryTimeChromaPrefetch[],
- double DisplayPipeRequestDeliveryTimeLuma[],
- double DisplayPipeRequestDeliveryTimeChroma[],
- double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
- double DisplayPipeRequestDeliveryTimeChromaPrefetch[]);
-static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTETimes_params *p);
-static void CalculateVMGroupAndRequestTimes(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int BytePerPixelC[],
- double dst_y_per_vm_vblank[],
- double dst_y_per_vm_flip[],
- unsigned int dpte_row_width_luma_ub[],
- unsigned int dpte_row_width_chroma_ub[],
- unsigned int vm_group_bytes[],
- unsigned int dpde0_bytes_per_frame_ub_l[],
- unsigned int dpde0_bytes_per_frame_ub_c[],
- unsigned int tdlut_pte_bytes_per_frame[],
- unsigned int meta_pte_bytes_per_frame_ub_l[],
- unsigned int meta_pte_bytes_per_frame_ub_c[],
- bool mrq_present,
-
- // Output
- double TimePerVMGroupVBlank[],
- double TimePerVMGroupFlip[],
- double TimePerVMRequestVBlank[],
- double TimePerVMRequestFlip[]);
-static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculateStutterEfficiency_params *p);
-static bool dml_is_dual_plane(enum dml2_source_format_class source_format);
-static unsigned int dml_get_plane_idx(const struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int pipe_idx);
-static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *wm_regs);
-static unsigned int log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend);
-static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
- const struct dml2_display_cfg *display_cfg,
- const struct dml2_core_internal_display_mode_lib *mode_lib,
- unsigned int pipe_idx);
-static void rq_dlg_get_dlg_reg(struct dml2_core_internal_scratch *s,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- const struct dml2_display_cfg *display_cfg,
- const struct dml2_core_internal_display_mode_lib *mode_lib,
- const unsigned int pipe_idx);
-static void rq_dlg_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *arb_param);
-
-/*
- * END OF STATIC HELPERS
- */
-
-bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
-{
- struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
- const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
- const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
-
- struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
- struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
- struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
- struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- unsigned int k, m, n;
-
- memset(&mode_lib->ms, 0, sizeof(struct dml2_core_internal_mode_support));
-
- mode_lib->ms.num_active_planes = display_cfg->num_planes;
- get_stream_output_bpp(s->OutputBpp, display_cfg);
-
- mode_lib->ms.state_idx = in_out_params->min_clk_index;
- mode_lib->ms.SOCCLK = ((double)mode_lib->soc.clk_table.socclk.clk_values_khz[0] / 1000);
- mode_lib->ms.DCFCLK = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_dcfclk_khz / 1000);
- mode_lib->ms.FabricClock = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz / 1000);
- mode_lib->ms.MaxDCFCLK = (double)min_clk_table->max_clocks_khz.dcfclk / 1000;
- mode_lib->ms.MaxFabricClock = (double)min_clk_table->max_clocks_khz.fclk / 1000;
- mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dispclk / 1000;
- mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000;
- mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
- mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
- mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
- mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
- mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
-
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: --- START --- \n", __func__);
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
- dml2_printf("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
- dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
- dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
-
- // dml2_printf_dml_policy(&mode_lib->ms.policy);
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, mode_lib->ms.num_active_planes);
-#endif
-
- CalculateMaxDETAndMinCompressedBufferSize(
- mode_lib->ip.config_return_buffer_size_in_kbytes,
- mode_lib->ip.config_return_buffer_segment_size_in_kbytes,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->ip.max_num_dpp,
- display_cfg->overrides.hw.force_nom_det_size_kbytes.enable,
- display_cfg->overrides.hw.force_nom_det_size_kbytes.value,
- mode_lib->ip.dcn_mrq_present,
-
- /* Output */
- &mode_lib->ms.MaxTotalDETInKByte,
- &mode_lib->ms.NomDETInKByte,
- &mode_lib->ms.MinCompressedBufferSizeInKByte);
-
- PixelClockAdjustmentForProgressiveToInterlaceUnit(display_cfg, mode_lib->ip.ptoi_supported, s->PixelClockBackEnd);
-
- /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
-
- /*Scale Ratio, taps Support Check*/
- mode_lib->ms.support.ScaleRatioAndTapsSupport = true;
- // Many core tests are still setting scaling parameters "incorrectly"
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].composition.scaler_info.enabled == false
- && (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format)
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio != 1.0
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps != 1.0
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio != 1.0
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps != 1.0)) {
- mode_lib->ms.support.ScaleRatioAndTapsSupport = false;
- } else if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps < 1.0 || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps > 8.0
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps < 1.0 || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps > 8.0
- || (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps > 1.0 && (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps % 2) == 1)
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio > mode_lib->ip.max_hscl_ratio
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio > mode_lib->ip.max_vscl_ratio
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio > display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio > display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps
- || (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format)
- && (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps < 1 || display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps > 8 ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps < 1 || display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps > 8 ||
- (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps > 1 && display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps % 2 == 1) ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio > mode_lib->ip.max_hscl_ratio ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio > mode_lib->ip.max_vscl_ratio ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio > display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio > display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps))) {
- mode_lib->ms.support.ScaleRatioAndTapsSupport = false;
- }
- }
-
- /*Source Format, Pixel Format and Scan Support Check*/
- mode_lib->ms.support.SourceFormatPixelAndScanSupport = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear && dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- mode_lib->ms.support.SourceFormatPixelAndScanSupport = false;
- }
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateBytePerPixelAndBlockSizes(
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].surface.tiling,
- display_cfg->plane_descriptors[k].surface.plane0.pitch,
- display_cfg->plane_descriptors[k].surface.plane1.pitch,
-
- /* Output */
- &mode_lib->ms.BytePerPixelY[k],
- &mode_lib->ms.BytePerPixelC[k],
- &mode_lib->ms.BytePerPixelInDETY[k],
- &mode_lib->ms.BytePerPixelInDETC[k],
- &mode_lib->ms.Read256BlockHeightY[k],
- &mode_lib->ms.Read256BlockHeightC[k],
- &mode_lib->ms.Read256BlockWidthY[k],
- &mode_lib->ms.Read256BlockWidthC[k],
- &mode_lib->ms.MacroTileHeightY[k],
- &mode_lib->ms.MacroTileHeightC[k],
- &mode_lib->ms.MacroTileWidthY[k],
- &mode_lib->ms.MacroTileWidthC[k],
- &mode_lib->ms.surf_linear128_l[k],
- &mode_lib->ms.surf_linear128_c[k]);
- }
-
- /*Bandwidth Support Check*/
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- mode_lib->ms.SwathWidthYSingleDPP[k] = display_cfg->plane_descriptors[k].composition.viewport.plane0.width;
- mode_lib->ms.SwathWidthCSingleDPP[k] = display_cfg->plane_descriptors[k].composition.viewport.plane1.width;
- } else {
- mode_lib->ms.SwathWidthYSingleDPP[k] = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- mode_lib->ms.SwathWidthCSingleDPP[k] = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- }
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- mode_lib->ms.SurfaceReadBandwidthLuma[k] = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->ms.SurfaceReadBandwidthChroma[k] = mode_lib->ms.SwathWidthCSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
-
- mode_lib->ms.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width *
- display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
-
-#ifdef __DML_VBA_DEBUG__
- double old_ReadBandwidthLuma = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- double old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthChroma[k]);
-#endif
- }
-
- // Writeback bandwidth
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
- / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8.0;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
- / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4.0;
- } else {
- mode_lib->ms.WriteBandwidth[k] = 0.0;
- }
- }
-
- /*Writeback Latency support check*/
- mode_lib->ms.support.WritebackLatencySupport = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024.0 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
- mode_lib->ms.support.WritebackLatencySupport = false;
- }
- }
-
- /* Writeback Mode Support Check */
- s->TotalNumberOfActiveWriteback = 0;
- for (k = 0; k <= (unsigned int)mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true
- && (display_cfg->plane_descriptors[k].stream_index == k)) {
- s->TotalNumberOfActiveWriteback = s->TotalNumberOfActiveWriteback + 1;
- }
- }
-
- mode_lib->ms.support.EnoughWritebackUnits = 1;
- if (s->TotalNumberOfActiveWriteback > (unsigned int)mode_lib->ip.max_num_wb) {
- mode_lib->ms.support.EnoughWritebackUnits = false;
- }
-
- /* Writeback Scale Ratio and Taps Support Check */
- mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > mode_lib->ip.writeback_max_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > mode_lib->ip.writeback_max_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio < mode_lib->ip.writeback_min_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio < mode_lib->ip.writeback_min_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > (unsigned int) mode_lib->ip.writeback_max_hscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps > (unsigned int) mode_lib->ip.writeback_max_vscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps
- || (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > 2.0 && ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps % 2) == 1))) {
- mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
- }
- if (2.0 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps - 1) * 57 > mode_lib->ip.writeback_line_buffer_buffer_size) {
- mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
- }
- }
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateSinglePipeDPPCLKAndSCLThroughput(
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ip.max_dchub_pscl_bw_pix_per_clk,
- mode_lib->ip.max_pscl_lb_bw_pix_per_clk,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps,
- /* Output */
- &mode_lib->ms.PSCL_FACTOR[k],
- &mode_lib->ms.PSCL_FACTOR_CHROMA[k],
- &mode_lib->ms.MinDPPCLKUsingSingleDPP[k]);
- }
-
- // Max Viewport Size support
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
- s->MaximumSwathWidthSupportLuma = 15360;
- } else if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle) && mode_lib->ms.BytePerPixelC[k] > 0 && display_cfg->plane_descriptors[k].pixel_format != dml2_rgbe_alpha) { // horz video
- s->MaximumSwathWidthSupportLuma = 7680 + 16;
- } else if (dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle) && mode_lib->ms.BytePerPixelC[k] > 0 && display_cfg->plane_descriptors[k].pixel_format != dml2_rgbe_alpha) { // vert video
- s->MaximumSwathWidthSupportLuma = 4320 + 16;
- } else if (display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) { // rgbe + alpha
- s->MaximumSwathWidthSupportLuma = 5120 + 16;
- } else if (dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle) && mode_lib->ms.BytePerPixelY[k] == 8 && display_cfg->plane_descriptors[k].surface.dcc.enable == true) { // vert 64bpp
- s->MaximumSwathWidthSupportLuma = 3072 + 16;
- } else {
- s->MaximumSwathWidthSupportLuma = 6144 + 16;
- }
-
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
- s->MaximumSwathWidthSupportChroma = (unsigned int)(s->MaximumSwathWidthSupportLuma / 2.0);
- } else {
- s->MaximumSwathWidthSupportChroma = s->MaximumSwathWidthSupportLuma;
- }
- mode_lib->ms.MaximumSwathWidthInLineBufferLuma = mode_lib->ip.line_buffer_size_bits * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ /
- (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio, 1.0) - 2, 0.0));
- if (mode_lib->ms.BytePerPixelC[k] == 0.0) {
- mode_lib->ms.MaximumSwathWidthInLineBufferChroma = 0;
- } else {
- mode_lib->ms.MaximumSwathWidthInLineBufferChroma = mode_lib->ip.line_buffer_size_bits * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ /
- (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio, 1.0) - 2, 0.0));
- }
- mode_lib->ms.MaximumSwathWidthLuma[k] = math_min2(s->MaximumSwathWidthSupportLuma, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
- mode_lib->ms.MaximumSwathWidthChroma[k] = math_min2(s->MaximumSwathWidthSupportChroma, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
- }
-
- /* Cursor Support Check */
- mode_lib->ms.support.CursorSupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].cursor.cursor_width > 0.0) {
- if (display_cfg->plane_descriptors[k].cursor.cursor_bpp == 64 && mode_lib->ip.cursor_64bpp_support == false) {
- mode_lib->ms.support.CursorSupport = false;
- }
- }
- }
-
- /* Valid Pitch Check */
- mode_lib->ms.support.PitchSupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
-
- // data pitch
- unsigned int alignment_l = mode_lib->ms.MacroTileWidthY[k];
-
- if (mode_lib->ms.surf_linear128_l[k])
- alignment_l = alignment_l / 2;
-
- mode_lib->ms.support.AlignedYPitch[k] = (unsigned int)math_ceil2(math_max2(display_cfg->plane_descriptors[k].surface.plane0.pitch, display_cfg->plane_descriptors[k].surface.plane0.width), alignment_l);
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
- unsigned int alignment_c = mode_lib->ms.MacroTileWidthC[k];
-
- if (mode_lib->ms.surf_linear128_c[k])
- alignment_c = alignment_c / 2;
- mode_lib->ms.support.AlignedCPitch[k] = (unsigned int)math_ceil2(math_max2(display_cfg->plane_descriptors[k].surface.plane1.pitch, display_cfg->plane_descriptors[k].surface.plane1.width), alignment_c);
- } else {
- mode_lib->ms.support.AlignedCPitch[k] = display_cfg->plane_descriptors[k].surface.plane1.pitch;
- }
-
- if (mode_lib->ms.support.AlignedYPitch[k] > display_cfg->plane_descriptors[k].surface.plane0.pitch ||
- mode_lib->ms.support.AlignedCPitch[k] > display_cfg->plane_descriptors[k].surface.plane1.pitch) {
- mode_lib->ms.support.PitchSupport = false;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
- dml2_printf("DML::%s: k=%u PitchY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
- dml2_printf("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
- dml2_printf("DML::%s: k=%u PitchC = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
- dml2_printf("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
-#endif
- }
-
- // meta pitch
- if (mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable) {
- mode_lib->ms.support.AlignedDCCMetaPitchY[k] = (unsigned int)math_ceil2(math_max2(display_cfg->plane_descriptors[k].surface.dcc.plane0.pitch,
- display_cfg->plane_descriptors[k].surface.plane0.width), 64.0 * mode_lib->ms.Read256BlockWidthY[k]);
-
- if (mode_lib->ms.support.AlignedDCCMetaPitchY[k] > display_cfg->plane_descriptors[k].surface.dcc.plane0.pitch)
- mode_lib->ms.support.PitchSupport = false;
-
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
- mode_lib->ms.support.AlignedDCCMetaPitchC[k] = (unsigned int)math_ceil2(math_max2(display_cfg->plane_descriptors[k].surface.dcc.plane1.pitch,
- display_cfg->plane_descriptors[k].surface.plane1.width), 64.0 * mode_lib->ms.Read256BlockWidthC[k]);
-
- if (mode_lib->ms.support.AlignedDCCMetaPitchC[k] > display_cfg->plane_descriptors[k].surface.dcc.plane1.pitch)
- mode_lib->ms.support.PitchSupport = false;
- }
- } else {
- mode_lib->ms.support.AlignedDCCMetaPitchY[k] = 0;
- mode_lib->ms.support.AlignedDCCMetaPitchC[k] = 0;
- }
- }
-
- mode_lib->ms.support.ViewportExceedsSurface = false;
- if (!display_cfg->overrides.hw.surface_viewport_size_check_disable) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].composition.viewport.plane0.width > display_cfg->plane_descriptors[k].surface.plane0.width || display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
- mode_lib->ms.support.ViewportExceedsSurface = true;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u SurfaceWidthY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
- dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
-#endif
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
- if (display_cfg->plane_descriptors[k].composition.viewport.plane1.width > display_cfg->plane_descriptors[k].surface.plane1.width ||
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height > display_cfg->plane_descriptors[k].surface.plane1.height) {
- mode_lib->ms.support.ViewportExceedsSurface = true;
- }
- }
- }
- }
- }
-
- CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
- CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSizeInKByte = mode_lib->ip.config_return_buffer_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->MaxTotalDETInKByte = mode_lib->ms.MaxTotalDETInKByte;
- CalculateSwathAndDETConfiguration_params->MinCompressedBufferSizeInKByte = mode_lib->ms.MinCompressedBufferSizeInKByte;
- CalculateSwathAndDETConfiguration_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateSwathAndDETConfiguration_params->pixel_chunk_size_kbytes = mode_lib->ip.pixel_chunk_size_kbytes;
- CalculateSwathAndDETConfiguration_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateSwathAndDETConfiguration_params->pixel_chunk_size_kbytes = mode_lib->ip.pixel_chunk_size_kbytes;
- CalculateSwathAndDETConfiguration_params->ForceSingleDPP = 1;
- CalculateSwathAndDETConfiguration_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateSwathAndDETConfiguration_params->nomDETInKByte = mode_lib->ms.NomDETInKByte;
- CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->ms.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->ms.SurfaceReadBandwidthChroma;
- CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = mode_lib->ms.MaximumSwathWidthLuma;
- CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = mode_lib->ms.MaximumSwathWidthChroma;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->ms.Read256BlockHeightY;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightC = mode_lib->ms.Read256BlockHeightC;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockWidthY = mode_lib->ms.Read256BlockWidthY;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockWidthC = mode_lib->ms.Read256BlockWidthC;
- CalculateSwathAndDETConfiguration_params->surf_linear128_l = mode_lib->ms.surf_linear128_l;
- CalculateSwathAndDETConfiguration_params->surf_linear128_c = mode_lib->ms.surf_linear128_c;
- CalculateSwathAndDETConfiguration_params->ODMMode = s->dummy_odm_mode;
- CalculateSwathAndDETConfiguration_params->BytePerPixY = mode_lib->ms.BytePerPixelY;
- CalculateSwathAndDETConfiguration_params->BytePerPixC = mode_lib->ms.BytePerPixelC;
- CalculateSwathAndDETConfiguration_params->BytePerPixDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateSwathAndDETConfiguration_params->BytePerPixDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateSwathAndDETConfiguration_params->DPPPerSurface = s->dummy_integer_array[2];
- CalculateSwathAndDETConfiguration_params->mrq_present = mode_lib->ip.dcn_mrq_present;
-
- // output
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_l = s->dummy_integer_array[0];
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_c = s->dummy_integer_array[1];
- CalculateSwathAndDETConfiguration_params->swath_width_luma_ub = s->dummy_integer_array[3];
- CalculateSwathAndDETConfiguration_params->swath_width_chroma_ub = s->dummy_integer_array[4];
- CalculateSwathAndDETConfiguration_params->SwathWidth = s->dummy_integer_array[5];
- CalculateSwathAndDETConfiguration_params->SwathWidthChroma = s->dummy_integer_array[6];
- CalculateSwathAndDETConfiguration_params->SwathHeightY = s->dummy_integer_array[7];
- CalculateSwathAndDETConfiguration_params->SwathHeightC = s->dummy_integer_array[8];
- CalculateSwathAndDETConfiguration_params->request_size_bytes_luma = s->dummy_integer_array[26];
- CalculateSwathAndDETConfiguration_params->request_size_bytes_chroma = s->dummy_integer_array[27];
- CalculateSwathAndDETConfiguration_params->DETBufferSizeInKByte = s->dummy_integer_array[9];
- CalculateSwathAndDETConfiguration_params->DETBufferSizeY = s->dummy_integer_array[10];
- CalculateSwathAndDETConfiguration_params->DETBufferSizeC = s->dummy_integer_array[11];
- CalculateSwathAndDETConfiguration_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CalculateSwathAndDETConfiguration_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CalculateSwathAndDETConfiguration_params->UnboundedRequestEnabled = &s->dummy_boolean[0];
- CalculateSwathAndDETConfiguration_params->compbuf_reserved_space_64b = &s->dummy_integer[1];
- CalculateSwathAndDETConfiguration_params->hw_debug5 = &s->dummy_boolean[2];
- CalculateSwathAndDETConfiguration_params->CompressedBufferSizeInkByte = &s->dummy_integer[0];
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupportPerSurface = mode_lib->ms.SingleDPPViewportSizeSupportPerSurface;
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupport = &s->dummy_boolean[1];
- CalculateSwathAndDETConfiguration_params->funcs = &mode_lib->funcs;
-
- // This calls is just to find out if there is enough DET space to support full vp in 1 pipe.
- CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
-
- {
- mode_lib->ms.TotalNumberOfActiveDPP = 0;
- mode_lib->ms.support.TotalAvailablePipesSupport = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- CalculateODMMode(
- mode_lib->ip.maximum_pixels_per_line_per_dsc_unit,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode,
- mode_lib->ms.max_dispclk_freq_mhz,
- false, // DSCEnable
- mode_lib->ms.TotalNumberOfActiveDPP,
- mode_lib->ip.max_num_dpp,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
-
- /* Output */
- &s->TotalAvailablePipesSupportNoDSC,
- &s->NumberOfDPPNoDSC,
- &s->ODMModeNoDSC,
- &s->RequiredDISPCLKPerSurfaceNoDSC);
-
- CalculateODMMode(
- mode_lib->ip.maximum_pixels_per_line_per_dsc_unit,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode,
- mode_lib->ms.max_dispclk_freq_mhz,
- true, // DSCEnable
- mode_lib->ms.TotalNumberOfActiveDPP,
- mode_lib->ip.max_num_dpp,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
-
- /* Output */
- &s->TotalAvailablePipesSupportDSC,
- &s->NumberOfDPPDSC,
- &s->ODMModeDSC,
- &s->RequiredDISPCLKPerSurfaceDSC);
-
- /*Number Of DSC Slices*/
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- if (s->PixelClockBackEnd[k] > 4800) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = (unsigned int)(math_ceil2(s->PixelClockBackEnd[k] / 600, 4));
- } else if (s->PixelClockBackEnd[k] > 2400) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 8;
- } else if (s->PixelClockBackEnd[k] > 1200) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 4;
- } else if (s->PixelClockBackEnd[k] > 340) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 2;
- } else {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 1;
- }
- } else {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 0;
- }
-
- if (s->ODMModeDSC == dml2_odm_mode_combine_2to1)
- mode_lib->ms.support.NumberOfDSCSlices[k] = 2 * (unsigned int)math_ceil2(mode_lib->ms.support.NumberOfDSCSlices[k] / 2.0, 1.0);
- else if (s->ODMModeDSC == dml2_odm_mode_combine_3to1)
- mode_lib->ms.support.NumberOfDSCSlices[k] = 12;
- else if (s->ODMModeDSC == dml2_odm_mode_combine_4to1)
- mode_lib->ms.support.NumberOfDSCSlices[k] = 4 * (unsigned int)math_ceil2(mode_lib->ms.support.NumberOfDSCSlices[k] / 4.0, 1.0);
-
- CalculateOutputLink(
- &mode_lib->scratch,
- ((double)mode_lib->soc.clk_table.phyclk.clk_values_khz[0] / 1000),
- ((double)mode_lib->soc.clk_table.phyclk_d18.clk_values_khz[0] / 1000),
- ((double)mode_lib->soc.clk_table.phyclk_d32.clk_values_khz[0] / 1000),
- mode_lib->soc.phy_downspread_percent,
- (display_cfg->plane_descriptors[k].stream_index == k),
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- s->PixelClockBackEnd[k],
- s->OutputBpp[k],
- mode_lib->ip.maximum_dsc_bits_per_component,
- mode_lib->ms.support.NumberOfDSCSlices[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout,
- s->ODMModeNoDSC,
- s->ODMModeDSC,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_lane_count,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate,
-
- /* Output */
- &mode_lib->ms.RequiresDSC[k],
- &mode_lib->ms.RequiresFEC[k],
- &mode_lib->ms.OutputBpp[k],
- &mode_lib->ms.OutputType[k], // VBA_DELTA, VBA uses a string to represent type and rate, but DML uses enum, don't want to rely on strng
- &mode_lib->ms.OutputRate[k],
- &mode_lib->ms.RequiredSlots[k]);
-
- if (mode_lib->ms.RequiresDSC[k] == false) {
- mode_lib->ms.ODMMode[k] = s->ODMModeNoDSC;
- mode_lib->ms.RequiredDISPCLKPerSurface[k] = s->RequiredDISPCLKPerSurfaceNoDSC;
- if (!s->TotalAvailablePipesSupportNoDSC)
- mode_lib->ms.support.TotalAvailablePipesSupport = false;
- mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPNoDSC;
- } else {
- mode_lib->ms.ODMMode[k] = s->ODMModeDSC;
- mode_lib->ms.RequiredDISPCLKPerSurface[k] = s->RequiredDISPCLKPerSurfaceDSC;
- if (!s->TotalAvailablePipesSupportDSC)
- mode_lib->ms.support.TotalAvailablePipesSupport = false;
- mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPDSC;
- }
- dml2_printf("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
- }
-
- // FIXME_DCN4 - add odm vs mpc use check
-
- // FIXME_DCN4 - add imall cap check
- mode_lib->ms.support.incorrect_imall_usage = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ip.imall_supported && display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall)
- mode_lib->ms.support.incorrect_imall_usage = 1;
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 1;
-
- if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 4;
- } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 3;
- } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 2;
- } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) {
- mode_lib->ms.MPCCombine[k] = true;
- mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
- } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 1;
- if (!mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- dml2_printf("ERROR: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
- }
- } else {
- if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- mode_lib->ms.MPCCombine[k] = true;
- mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
- }
- }
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
-#endif
- }
-
- if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp)
- mode_lib->ms.support.TotalAvailablePipesSupport = false;
-
-
- mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0;
- for (k = 0; k < (unsigned int)mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoOfDPP[k] == 1)
- mode_lib->ms.TotalNumberOfSingleDPPSurfaces = mode_lib->ms.TotalNumberOfSingleDPPSurfaces + 1;
- }
-
- //DISPCLK/DPPCLK
- mode_lib->ms.WritebackRequiredDISPCLK = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- mode_lib->ms.WritebackRequiredDISPCLK = math_max2(mode_lib->ms.WritebackRequiredDISPCLK,
- CalculateWriteBackDISPCLK(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- mode_lib->ip.writeback_line_buffer_buffer_size));
- }
- }
-
- mode_lib->ms.RequiredDISPCLK = mode_lib->ms.WritebackRequiredDISPCLK;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.RequiredDISPCLK = math_max2(mode_lib->ms.RequiredDISPCLK, mode_lib->ms.RequiredDISPCLKPerSurface[k]);
- }
-
- mode_lib->ms.GlobalDPPCLK = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.RequiredDPPCLK[k] = mode_lib->ms.MinDPPCLKUsingSingleDPP[k] / mode_lib->ms.NoOfDPP[k];
- mode_lib->ms.GlobalDPPCLK = math_max2(mode_lib->ms.GlobalDPPCLK, mode_lib->ms.RequiredDPPCLK[k]);
- }
-
- mode_lib->ms.support.DISPCLK_DPPCLK_Support = !((mode_lib->ms.RequiredDISPCLK > mode_lib->ms.max_dispclk_freq_mhz) || (mode_lib->ms.GlobalDPPCLK > mode_lib->ms.max_dppclk_freq_mhz));
- }
-
- /* Total Available OTG, HDMIFRL, DP Support Check */
- s->TotalNumberOfActiveOTG = 0;
- s->TotalNumberOfActiveHDMIFRL = 0;
- s->TotalNumberOfActiveDP2p0 = 0;
- s->TotalNumberOfActiveDP2p0Outputs = 0;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- s->TotalNumberOfActiveOTG = s->TotalNumberOfActiveOTG + 1;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl)
- s->TotalNumberOfActiveHDMIFRL = s->TotalNumberOfActiveHDMIFRL + 1;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp2p0) {
- s->TotalNumberOfActiveDP2p0 = s->TotalNumberOfActiveDP2p0 + 1;
- // FIXME_STAGE2: SW not using backend related stuff, need mapping for mst setup
- //if (display_cfg->output.OutputMultistreamId[k] == k || display_cfg->output.OutputMultistreamEn[k] == false) {
- s->TotalNumberOfActiveDP2p0Outputs = s->TotalNumberOfActiveDP2p0Outputs + 1;
- //}
- }
- }
- }
-
- mode_lib->ms.support.NumberOfOTGSupport = (s->TotalNumberOfActiveOTG <= (unsigned int)mode_lib->ip.max_num_otg);
- mode_lib->ms.support.NumberOfHDMIFRLSupport = (s->TotalNumberOfActiveHDMIFRL <= (unsigned int)mode_lib->ip.max_num_hdmi_frl_outputs);
- mode_lib->ms.support.NumberOfDP2p0Support = (s->TotalNumberOfActiveDP2p0 <= (unsigned int)mode_lib->ip.max_num_dp2p0_streams && s->TotalNumberOfActiveDP2p0Outputs <= (unsigned int)mode_lib->ip.max_num_dp2p0_outputs);
-
- mode_lib->ms.support.ExceededMultistreamSlots = false;
- mode_lib->ms.support.LinkCapacitySupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_disabled == false &&
- display_cfg->plane_descriptors[k].stream_index == k && (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp2p0 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_edp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmi || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl) && mode_lib->ms.OutputBpp[k] == 0) {
- mode_lib->ms.support.LinkCapacitySupport = false;
- }
- }
-
- mode_lib->ms.support.P2IWith420 = false;
- mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP = false;
- mode_lib->ms.support.DSC422NativeNotSupported = false;
- mode_lib->ms.support.LinkRateDoesNotMatchDPVersion = false;
- mode_lib->ms.support.LinkRateForMultistreamNotIndicated = false;
- mode_lib->ms.support.BPPForMultistreamNotIndicated = false;
- mode_lib->ms.support.MultistreamWithHDMIOreDP = false;
- mode_lib->ms.support.MSOOrODMSplitWithNonDPLink = false;
- mode_lib->ms.support.NotEnoughLanesForMSO = false;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].stream_index == k && (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp2p0 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_edp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmi || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl)) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced == 1 && mode_lib->ip.ptoi_supported == true)
- mode_lib->ms.support.P2IWith420 = true;
-
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary && s->OutputBpp[k] != 0)
- mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP = true;
- if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 && !mode_lib->ip.dsc422_native_support)
- mode_lib->ms.support.DSC422NativeNotSupported = true;
-
- if (((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_hbr || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_hbr2 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_hbr3) &&
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder != dml2_dp && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder != dml2_edp) ||
- ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_uhbr10 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_uhbr13p5 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_uhbr20) &&
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder != dml2_dp2p0))
- mode_lib->ms.support.LinkRateDoesNotMatchDPVersion = true;
-
- // FIXME_STAGE2
- //if (display_cfg->output.OutputMultistreamEn[k] == 1) {
- // if (display_cfg->output.OutputMultistreamId[k] == k && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_na)
- // mode_lib->ms.support.LinkRateForMultistreamNotIndicated = true;
- // if (display_cfg->output.OutputMultistreamId[k] == k && s->OutputBpp[k] == 0)
- // mode_lib->ms.support.BPPForMultistreamNotIndicated = true;
- // for (n = 0; n < mode_lib->ms.num_active_planes; ++n) {
- // if (display_cfg->output.OutputMultistreamId[k] == n && s->OutputBpp[k] == 0)
- // mode_lib->ms.support.BPPForMultistreamNotIndicated = true;
- // }
- //}
-
- if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_edp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmi ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl)) {
- // FIXME_STAGE2
- //if (display_cfg->output.OutputMultistreamEn[k] == 1 && display_cfg->output.OutputMultistreamId[k] == k)
- // mode_lib->ms.support.MultistreamWithHDMIOreDP = true;
- //for (n = 0; n < mode_lib->ms.num_active_planes; ++n) {
- // if (display_cfg->output.OutputMultistreamEn[k] == 1 && display_cfg->output.OutputMultistreamId[k] == n)
- // mode_lib->ms.support.MultistreamWithHDMIOreDP = true;
- //}
- }
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder != dml2_dp && (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_split_1to2 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_mso_1to2 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_mso_1to4))
- mode_lib->ms.support.MSOOrODMSplitWithNonDPLink = true;
-
- if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_mso_1to2 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_lane_count < 2) ||
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_mso_1to4 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_lane_count < 4))
- mode_lib->ms.support.NotEnoughLanesForMSO = true;
- }
- }
-
- mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].stream_index == k &&
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl &&
- RequiredDTBCLK(
- mode_lib->ms.RequiresDSC[k],
- s->PixelClockBackEnd[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format,
- mode_lib->ms.OutputBpp[k],
- mode_lib->ms.support.NumberOfDSCSlices[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout) > ((double)mode_lib->soc.clk_table.dtbclk.clk_values_khz[0] / 1000)) {
- mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true;
- }
- }
-
- mode_lib->ms.support.DSCCLKRequiredMoreThanSupported = false;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp2p0 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_edp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420) {
- s->DSCFormatFactor = 2;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_444) {
- s->DSCFormatFactor = 1;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl) {
- s->DSCFormatFactor = 2;
- } else {
- s->DSCFormatFactor = 1;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
-#endif
- if (mode_lib->ms.RequiresDSC[k] == true) {
- s->PixelClockBackEndFactor = 3.0;
-
- if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1)
- s->PixelClockBackEndFactor = 12.0;
- else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1)
- s->PixelClockBackEndFactor = 9.0;
- else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1)
- s->PixelClockBackEndFactor = 6.0;
-
- mode_lib->ms.required_dscclk_freq_mhz[k] = s->PixelClockBackEnd[k] / s->PixelClockBackEndFactor / (double)s->DSCFormatFactor;
- if (mode_lib->ms.required_dscclk_freq_mhz[k] > mode_lib->ms.max_dscclk_freq_mhz) {
- mode_lib->ms.support.DSCCLKRequiredMoreThanSupported = true;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
-#endif
- }
- }
- }
- }
-
- /* Check DSC Unit and Slices Support */
- mode_lib->ms.support.NotEnoughDSCSlices = false;
- s->TotalDSCUnitsRequired = 0;
- mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.RequiresDSC[k] == true) {
- s->NumDSCUnitRequired = 1;
-
- if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1)
- s->NumDSCUnitRequired = 4;
- else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1)
- s->NumDSCUnitRequired = 3;
- else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1)
- s->NumDSCUnitRequired = 2;
-
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active > s->NumDSCUnitRequired * (unsigned int)mode_lib->ip.maximum_pixels_per_line_per_dsc_unit)
- mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport = false;
- s->TotalDSCUnitsRequired = s->TotalDSCUnitsRequired + s->NumDSCUnitRequired;
- if (mode_lib->ms.support.NumberOfDSCSlices[k] > 4 * s->NumDSCUnitRequired)
- mode_lib->ms.support.NotEnoughDSCSlices = true;
- }
- }
-
- mode_lib->ms.support.NotEnoughDSCUnits = false;
- if (s->TotalDSCUnitsRequired > (unsigned int)mode_lib->ip.num_dsc) {
- mode_lib->ms.support.NotEnoughDSCUnits = true;
- }
-
- /*DSC Delay per state*/
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.DSCDelay[k] = DSCDelayRequirement(mode_lib->ms.RequiresDSC[k],
- mode_lib->ms.ODMMode[k],
- mode_lib->ip.maximum_dsc_bits_per_component,
- mode_lib->ms.OutputBpp[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- mode_lib->ms.support.NumberOfDSCSlices[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- s->PixelClockBackEnd[k]);
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- for (m = 0; m < mode_lib->ms.num_active_planes; m++) {
- if (display_cfg->plane_descriptors[k].stream_index == m && mode_lib->ms.RequiresDSC[m] == true) {
- mode_lib->ms.DSCDelay[k] = mode_lib->ms.DSCDelay[m];
- }
- }
- }
-
- // Figure out the swath and DET configuration after the num dpp per plane is figured out
- CalculateSwathAndDETConfiguration_params->ForceSingleDPP = false;
- CalculateSwathAndDETConfiguration_params->ODMMode = mode_lib->ms.ODMMode;
- CalculateSwathAndDETConfiguration_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
-
- // output
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_l = s->dummy_integer_array[0];
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_c = s->dummy_integer_array[1];
- CalculateSwathAndDETConfiguration_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub;
- CalculateSwathAndDETConfiguration_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub;
- CalculateSwathAndDETConfiguration_params->SwathWidth = mode_lib->ms.SwathWidthY;
- CalculateSwathAndDETConfiguration_params->SwathWidthChroma = mode_lib->ms.SwathWidthC;
- CalculateSwathAndDETConfiguration_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateSwathAndDETConfiguration_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- CalculateSwathAndDETConfiguration_params->request_size_bytes_luma = mode_lib->ms.support.request_size_bytes_luma;
- CalculateSwathAndDETConfiguration_params->request_size_bytes_chroma = mode_lib->ms.support.request_size_bytes_chroma;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeInKByte = mode_lib->ms.DETBufferSizeInKByte; // FIXME: This is per pipe but the pipes in plane will use that
- CalculateSwathAndDETConfiguration_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateSwathAndDETConfiguration_params->UnboundedRequestEnabled = &mode_lib->ms.UnboundedRequestEnabled;
- CalculateSwathAndDETConfiguration_params->compbuf_reserved_space_64b = s->dummy_integer_array[3];
- CalculateSwathAndDETConfiguration_params->hw_debug5 = s->dummy_boolean_array[1];
- CalculateSwathAndDETConfiguration_params->CompressedBufferSizeInkByte = &mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupportPerSurface = s->dummy_boolean_array[0];
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupport = &mode_lib->ms.support.ViewportSizeSupport;
- CalculateSwathAndDETConfiguration_params->funcs = &mode_lib->funcs;
-
- CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
-
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.SurfaceSizeInMALL[k] = 0;
- mode_lib->ms.support.ExceededMALLSize = 0;
- } else {
- CalculateSurfaceSizeInMall(
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
-
- mode_lib->ms.BytePerPixelY,
- mode_lib->ms.BytePerPixelC,
- mode_lib->ms.Read256BlockWidthY,
- mode_lib->ms.Read256BlockWidthC,
- mode_lib->ms.Read256BlockHeightY,
- mode_lib->ms.Read256BlockHeightC,
- mode_lib->ms.MacroTileWidthY,
- mode_lib->ms.MacroTileWidthC,
- mode_lib->ms.MacroTileHeightY,
- mode_lib->ms.MacroTileHeightC,
-
- /* Output */
- mode_lib->ms.SurfaceSizeInMALL,
- &mode_lib->ms.support.ExceededMALLSize);
- }
-
- mode_lib->ms.TotalNumberOfDCCActiveDPP = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].surface.dcc.enable == true) {
- mode_lib->ms.TotalNumberOfDCCActiveDPP = mode_lib->ms.TotalNumberOfDCCActiveDPP + mode_lib->ms.NoOfDPP[k];
- }
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- s->SurfParameters[k].PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->SurfParameters[k].DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- s->SurfParameters[k].RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- s->SurfParameters[k].ViewportHeight = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- s->SurfParameters[k].ViewportHeightC = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- s->SurfParameters[k].BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- s->SurfParameters[k].BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- s->SurfParameters[k].BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- s->SurfParameters[k].BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- s->SurfParameters[k].BlockWidthY = mode_lib->ms.MacroTileWidthY[k];
- s->SurfParameters[k].BlockHeightY = mode_lib->ms.MacroTileHeightY[k];
- s->SurfParameters[k].BlockWidthC = mode_lib->ms.MacroTileWidthC[k];
- s->SurfParameters[k].BlockHeightC = mode_lib->ms.MacroTileHeightC[k];
- s->SurfParameters[k].InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- s->SurfParameters[k].HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- s->SurfParameters[k].DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- s->SurfParameters[k].SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- s->SurfParameters[k].SurfaceTiling = display_cfg->plane_descriptors[k].surface.tiling;
- s->SurfParameters[k].BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- s->SurfParameters[k].BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- s->SurfParameters[k].ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
- s->SurfParameters[k].VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- s->SurfParameters[k].VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- s->SurfParameters[k].VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- s->SurfParameters[k].VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- s->SurfParameters[k].PitchY = display_cfg->plane_descriptors[k].surface.plane0.pitch;
- s->SurfParameters[k].PitchC = display_cfg->plane_descriptors[k].surface.plane1.pitch;
- s->SurfParameters[k].ViewportStationary = display_cfg->plane_descriptors[k].composition.viewport.stationary;
- s->SurfParameters[k].ViewportXStart = display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start;
- s->SurfParameters[k].ViewportYStart = display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start;
- s->SurfParameters[k].ViewportXStartC = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- s->SurfParameters[k].ViewportYStartC = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- s->SurfParameters[k].FORCE_ONE_ROW_FOR_FRAME = display_cfg->plane_descriptors[k].overrides.hw.force_one_row_for_frame;
- s->SurfParameters[k].SwathHeightY = mode_lib->ms.SwathHeightY[k];
- s->SurfParameters[k].SwathHeightC = mode_lib->ms.SwathHeightC[k];
-
- s->SurfParameters[k].DCCMetaPitchY = display_cfg->plane_descriptors[k].surface.dcc.plane0.pitch;
- s->SurfParameters[k].DCCMetaPitchC = display_cfg->plane_descriptors[k].surface.dcc.plane1.pitch;
- }
-
- CalculateVMRowAndSwath_params->display_cfg = display_cfg;
- CalculateVMRowAndSwath_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateVMRowAndSwath_params->myPipe = s->SurfParameters;
- CalculateVMRowAndSwath_params->SurfaceSizeInMALL = mode_lib->ms.SurfaceSizeInMALL;
- CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsLuma = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
- CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsChroma = mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma;
- CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->soc.mall_allocated_for_dcn_mbytes;
- CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ip.dcc_meta_buffer_size_bytes;
- CalculateVMRowAndSwath_params->mrq_present = mode_lib->ip.dcn_mrq_present;
-
- // output
- CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceeded;
- CalculateVMRowAndSwath_params->dpte_row_width_luma_ub = s->dummy_integer_array[12];
- CalculateVMRowAndSwath_params->dpte_row_width_chroma_ub = s->dummy_integer_array[13];
- CalculateVMRowAndSwath_params->dpte_row_height_luma = mode_lib->ms.dpte_row_height;
- CalculateVMRowAndSwath_params->dpte_row_height_chroma = mode_lib->ms.dpte_row_height_chroma;
- CalculateVMRowAndSwath_params->dpte_row_height_linear_luma = s->dummy_integer_array[14]; // VBA_DELTA
- CalculateVMRowAndSwath_params->dpte_row_height_linear_chroma = s->dummy_integer_array[15]; // VBA_DELTA
- CalculateVMRowAndSwath_params->vm_group_bytes = s->dummy_integer_array[16];
- CalculateVMRowAndSwath_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateVMRowAndSwath_params->PixelPTEReqWidthY = s->dummy_integer_array[17];
- CalculateVMRowAndSwath_params->PixelPTEReqHeightY = s->dummy_integer_array[18];
- CalculateVMRowAndSwath_params->PTERequestSizeY = s->dummy_integer_array[19];
- CalculateVMRowAndSwath_params->PixelPTEReqWidthC = s->dummy_integer_array[20];
- CalculateVMRowAndSwath_params->PixelPTEReqHeightC = s->dummy_integer_array[21];
- CalculateVMRowAndSwath_params->PTERequestSizeC = s->dummy_integer_array[22];
- CalculateVMRowAndSwath_params->vmpg_width_y = s->vmpg_width_y;
- CalculateVMRowAndSwath_params->vmpg_height_y = s->vmpg_height_y;
- CalculateVMRowAndSwath_params->vmpg_width_c = s->vmpg_width_c;
- CalculateVMRowAndSwath_params->vmpg_height_c = s->vmpg_height_c;
- CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_l = s->dummy_integer_array[23];
- CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_c = s->dummy_integer_array[24];
- CalculateVMRowAndSwath_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY;
- CalculateVMRowAndSwath_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC;
- CalculateVMRowAndSwath_params->VInitPreFillY = mode_lib->ms.PrefillY;
- CalculateVMRowAndSwath_params->VInitPreFillC = mode_lib->ms.PrefillC;
- CalculateVMRowAndSwath_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY;
- CalculateVMRowAndSwath_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC;
- CalculateVMRowAndSwath_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- CalculateVMRowAndSwath_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow;
- CalculateVMRowAndSwath_params->vm_bytes = mode_lib->ms.vm_bytes;
- CalculateVMRowAndSwath_params->use_one_row_for_frame = mode_lib->ms.use_one_row_for_frame;
- CalculateVMRowAndSwath_params->use_one_row_for_frame_flip = mode_lib->ms.use_one_row_for_frame_flip;
- CalculateVMRowAndSwath_params->is_using_mall_for_ss = s->dummy_boolean_array[0];
- CalculateVMRowAndSwath_params->PTE_BUFFER_MODE = s->dummy_boolean_array[1];
- CalculateVMRowAndSwath_params->BIGK_FRAGMENT_SIZE = s->dummy_integer_array[25];
- CalculateVMRowAndSwath_params->DCCMetaBufferSizeNotExceeded = mode_lib->ms.DCCMetaBufferSizeNotExceeded;
- CalculateVMRowAndSwath_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- CalculateVMRowAndSwath_params->meta_row_bytes = mode_lib->ms.meta_row_bytes;
- CalculateVMRowAndSwath_params->meta_req_width_luma = s->dummy_integer_array[26];
- CalculateVMRowAndSwath_params->meta_req_height_luma = s->dummy_integer_array[27];
- CalculateVMRowAndSwath_params->meta_row_width_luma = s->dummy_integer_array[28];
- CalculateVMRowAndSwath_params->meta_row_height_luma = s->meta_row_height_luma;
- CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_l = s->dummy_integer_array[29];
- CalculateVMRowAndSwath_params->meta_req_width_chroma = s->dummy_integer_array[30];
- CalculateVMRowAndSwath_params->meta_req_height_chroma = s->dummy_integer_array[31];
- CalculateVMRowAndSwath_params->meta_row_width_chroma = s->dummy_integer_array[32];
- CalculateVMRowAndSwath_params->meta_row_height_chroma = s->meta_row_height_chroma;
- CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_c = s->dummy_integer_array[33];
-
- CalculateVMRowAndSwath(&mode_lib->scratch, CalculateVMRowAndSwath_params);
-
- mode_lib->ms.support.PTEBufferSizeNotExceeded = true;
- mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.PTEBufferSizeNotExceeded[k] == false)
- mode_lib->ms.support.PTEBufferSizeNotExceeded = false;
-
- if (mode_lib->ms.DCCMetaBufferSizeNotExceeded[k] == false)
- mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = false;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
-#endif
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
- dml2_printf("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
-#endif
-
- mode_lib->ms.UrgLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
- mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
- mode_lib->ms.FabricClock,
- mode_lib->ms.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- mode_lib->ms.TripToMemory = CalculateTripToMemory(
- mode_lib->ms.UrgLatency,
- mode_lib->ms.FabricClock,
- mode_lib->ms.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- mode_lib->ms.TripToMemory = math_max2(mode_lib->ms.UrgLatency, mode_lib->ms.TripToMemory);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- calculate_cursor_req_attributes(
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- display_cfg->plane_descriptors[k].cursor.cursor_bpp,
-
- // output
- &s->cursor_lines_per_chunk[k],
- &s->cursor_bytes_per_line[k],
- &s->cursor_bytes_per_chunk[k],
- &s->cursor_bytes[k]);
-
- bool cursor_not_enough_urgent_latency_hiding = 0;
- calculate_cursor_urgent_burst_factor(
- mode_lib->ip.cursor_buffer_size,
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- s->cursor_bytes_per_chunk[k],
- s->cursor_lines_per_chunk[k],
- line_time_us,
- mode_lib->ms.UrgLatency,
-
- // output
- &mode_lib->ms.UrgentBurstFactorCursor[k],
- &cursor_not_enough_urgent_latency_hiding);
- mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
-#endif
-
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- line_time_us,
- mode_lib->ms.UrgLatency,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
-
- // Output
- &mode_lib->ms.UrgentBurstFactorLuma[k],
- &mode_lib->ms.UrgentBurstFactorChroma[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHiding[k]);
-
- mode_lib->ms.NotEnoughUrgentLatencyHiding[k] = mode_lib->ms.NotEnoughUrgentLatencyHiding[k] || cursor_not_enough_urgent_latency_hiding;
- }
-
- CalculateDCFCLKDeepSleep(
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.BytePerPixelY,
- mode_lib->ms.BytePerPixelC,
- mode_lib->ms.SwathWidthY,
- mode_lib->ms.SwathWidthC,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.PSCL_FACTOR,
- mode_lib->ms.PSCL_FACTOR_CHROMA,
- mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
- mode_lib->soc.return_bus_width_bytes,
-
- /* Output */
- &mode_lib->ms.dcfclk_deepsleep);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->ms.WritebackDelayTime[k] = mode_lib->soc.qos_parameters.writeback.base_latency_us + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->ms.RequiredDISPCLK;
- } else {
- mode_lib->ms.WritebackDelayTime[k] = 0.0;
- }
- for (m = 0; m <= mode_lib->ms.num_active_planes - 1; m++) {
- if (display_cfg->plane_descriptors[m].stream_index == k && display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.enable == true) {
- mode_lib->ms.WritebackDelayTime[k] = math_max2(mode_lib->ms.WritebackDelayTime[k],
- mode_lib->soc.qos_parameters.writeback.base_latency_us + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.input_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].timing.h_total) / mode_lib->ms.RequiredDISPCLK);
- }
- }
- }
- }
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- for (m = 0; m <= mode_lib->ms.num_active_planes - 1; m++) {
- if (display_cfg->plane_descriptors[k].stream_index == m) {
- mode_lib->ms.WritebackDelayTime[k] = mode_lib->ms.WritebackDelayTime[m];
- }
- }
- }
-
- // MaximumVStartup is actually Tvstartup_min in DCN4 programming guide
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- bool isInterlaceTiming = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced && !mode_lib->ip.ptoi_supported);
- s->MaximumVStartup[k] = CalculateMaxVStartup(
- mode_lib->ip.ptoi_supported,
- mode_lib->ip.vblank_nom_default_us,
- &display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing,
- mode_lib->ms.WritebackDelayTime[k]);
- mode_lib->ms.MaxVStartupLines[k] = (isInterlaceTiming ? (2 * s->MaximumVStartup[k]) : s->MaximumVStartup[k]);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
-#endif
-
- /* Immediate Flip and MALL parameters */
- s->ImmediateFlipRequired = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- s->ImmediateFlipRequired = s->ImmediateFlipRequired || display_cfg->plane_descriptors[k].immediate_flip;
- }
-
- mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe =
- mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe ||
- ((display_cfg->hostvm_enable == true || display_cfg->plane_descriptors[k].immediate_flip == true) &&
- (display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame || dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])));
- }
-
- mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen = mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen ||
- ((display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_force_enable || display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_auto) && (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]))) ||
- ((display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_force_disable || display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_auto) && (display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame));
- }
-
- s->FullFrameMALLPStateMethod = false;
- s->SubViewportMALLPStateMethod = false;
- s->PhantomPipeMALLPStateMethod = false;
- s->SubViewportMALLRefreshGreaterThan120Hz = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame)
- s->FullFrameMALLPStateMethod = true;
- if (display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) {
- s->SubViewportMALLPStateMethod = true;
- if (!display_cfg->overrides.enable_subvp_implicit_pmo) {
- // For dv, small frame tests will have very high refresh rate
- unsigned long long refresh_rate = (unsigned long long) ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz * 1000 /
- (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- if (refresh_rate > 120)
- s->SubViewportMALLRefreshGreaterThan120Hz = true;
- }
- }
- if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]))
- s->PhantomPipeMALLPStateMethod = true;
- }
- mode_lib->ms.support.InvalidCombinationOfMALLUseForPState = (s->SubViewportMALLPStateMethod != s->PhantomPipeMALLPStateMethod) ||
- (s->SubViewportMALLPStateMethod && s->FullFrameMALLPStateMethod) || s->SubViewportMALLRefreshGreaterThan120Hz;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
- dml2_printf("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
- dml2_printf("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
- dml2_printf("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
- dml2_printf("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
- dml2_printf("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.max_urgent_latency_us);
- dml2_printf("DML::%s: urgent latency tolerance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
-#endif
-
- mode_lib->ms.support.OutstandingRequestsSupport = true;
- mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = true;
-
- mode_lib->ms.support.avg_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
-
- mode_lib->ms.support.avg_non_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
-
- double outstanding_latency_us = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
- outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_luma[k]
- / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
-
- if (outstanding_latency_us < mode_lib->ms.support.avg_urgent_latency_us) {
- mode_lib->ms.support.OutstandingRequestsSupport = false;
- }
-
- if (outstanding_latency_us < mode_lib->ms.support.avg_non_urgent_latency_us) {
- mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
- dml2_printf("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
- dml2_printf("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
-#endif
- }
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x && mode_lib->ms.BytePerPixelC[k] > 0) {
- outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_chroma[k]
- / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
-
- if (outstanding_latency_us < mode_lib->ms.support.avg_urgent_latency_us) {
- mode_lib->ms.support.OutstandingRequestsSupport = false;
- }
-
- if (outstanding_latency_us < mode_lib->ms.support.avg_non_urgent_latency_us) {
- mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
-#endif
- }
- }
-
- memset(calculate_mcache_setting_params, 0, sizeof(struct dml2_core_calcs_calculate_mcache_setting_params));
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0 || mode_lib->ip.dcn_mrq_present) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- mode_lib->ms.mall_prefetch_sdp_overhead_factor[k] = 1.0;
- mode_lib->ms.mall_prefetch_dram_overhead_factor[k] = 1.0;
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0[k] = 1.0;
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0[k] = 1.0;
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1[k] = 1.0;
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1[k] = 1.0;
- }
- } else {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- calculate_mcache_setting_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- calculate_mcache_setting_params->num_chans = mode_lib->soc.clk_table.dram_config.channel_count;
- calculate_mcache_setting_params->mem_word_bytes = mode_lib->soc.mem_word_bytes;
- calculate_mcache_setting_params->mcache_size_bytes = mode_lib->soc.mcache_size_bytes;
- calculate_mcache_setting_params->mcache_line_size_bytes = mode_lib->soc.mcache_line_size_bytes;
- calculate_mcache_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_mcache_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
-
- calculate_mcache_setting_params->source_format = display_cfg->plane_descriptors[k].pixel_format;
- calculate_mcache_setting_params->surf_vert = dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle);
- calculate_mcache_setting_params->vp_stationary = display_cfg->plane_descriptors[k].composition.viewport.stationary;
- calculate_mcache_setting_params->tiling_mode = display_cfg->plane_descriptors[k].surface.tiling;
- calculate_mcache_setting_params->imall_enable = mode_lib->ip.imall_supported && display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall;
-
- calculate_mcache_setting_params->vp_start_x_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start;
- calculate_mcache_setting_params->vp_start_y_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start;
- calculate_mcache_setting_params->full_vp_width_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.width;
- calculate_mcache_setting_params->full_vp_height_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- calculate_mcache_setting_params->blk_width_l = mode_lib->ms.MacroTileWidthY[k];
- calculate_mcache_setting_params->blk_height_l = mode_lib->ms.MacroTileHeightY[k];
- calculate_mcache_setting_params->vmpg_width_l = s->vmpg_width_y[k];
- calculate_mcache_setting_params->vmpg_height_l = s->vmpg_height_y[k];
- calculate_mcache_setting_params->full_swath_bytes_l = s->full_swath_bytes_l[k];
- calculate_mcache_setting_params->bytes_per_pixel_l = mode_lib->ms.BytePerPixelY[k];
-
- calculate_mcache_setting_params->vp_start_x_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.x_start;
- calculate_mcache_setting_params->vp_start_y_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- calculate_mcache_setting_params->full_vp_width_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.width;
- calculate_mcache_setting_params->full_vp_height_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- calculate_mcache_setting_params->blk_width_c = mode_lib->ms.MacroTileWidthC[k];
- calculate_mcache_setting_params->blk_height_c = mode_lib->ms.MacroTileHeightC[k];
- calculate_mcache_setting_params->vmpg_width_c = s->vmpg_width_c[k];
- calculate_mcache_setting_params->vmpg_height_c = s->vmpg_height_c[k];
- calculate_mcache_setting_params->full_swath_bytes_c = s->full_swath_bytes_c[k];
- calculate_mcache_setting_params->bytes_per_pixel_c = mode_lib->ms.BytePerPixelC[k];
-
- // output
- calculate_mcache_setting_params->dcc_dram_bw_nom_overhead_factor_l = &mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0[k];
- calculate_mcache_setting_params->dcc_dram_bw_pref_overhead_factor_l = &mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0[k];
- calculate_mcache_setting_params->dcc_dram_bw_nom_overhead_factor_c = &mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1[k];
- calculate_mcache_setting_params->dcc_dram_bw_pref_overhead_factor_c = &mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1[k];
-
- calculate_mcache_setting_params->num_mcaches_l = &mode_lib->ms.num_mcaches_l[k];
- calculate_mcache_setting_params->mcache_row_bytes_l = &mode_lib->ms.mcache_row_bytes_l[k];
- calculate_mcache_setting_params->mcache_offsets_l = mode_lib->ms.mcache_offsets_l[k];
- calculate_mcache_setting_params->mcache_shift_granularity_l = &mode_lib->ms.mcache_shift_granularity_l[k];
-
- calculate_mcache_setting_params->num_mcaches_c = &mode_lib->ms.num_mcaches_c[k];
- calculate_mcache_setting_params->mcache_row_bytes_c = &mode_lib->ms.mcache_row_bytes_c[k];
- calculate_mcache_setting_params->mcache_offsets_c = mode_lib->ms.mcache_offsets_c[k];
- calculate_mcache_setting_params->mcache_shift_granularity_c = &mode_lib->ms.mcache_shift_granularity_c[k];
-
- calculate_mcache_setting_params->mall_comb_mcache_l = &mode_lib->ms.mall_comb_mcache_l[k];
- calculate_mcache_setting_params->mall_comb_mcache_c = &mode_lib->ms.mall_comb_mcache_c[k];
- calculate_mcache_setting_params->lc_comb_mcache = &mode_lib->ms.lc_comb_mcache[k];
-
- calculate_mcache_setting(&mode_lib->scratch, calculate_mcache_setting_params);
- }
-
- calculate_mall_bw_overhead_factor(
- mode_lib->ms.mall_prefetch_sdp_overhead_factor,
- mode_lib->ms.mall_prefetch_dram_overhead_factor,
-
- // input
- display_cfg,
- mode_lib->ms.num_active_planes);
- }
-
- // Calculate all the bandwidth available
- // Need anothe bw for latency evaluation
- calculate_bandwidth_available(
- mode_lib->ms.support.avg_bandwidth_available_min, // not used
- mode_lib->ms.support.avg_bandwidth_available, // not used
- mode_lib->ms.support.urg_bandwidth_available_min_latency,
- mode_lib->ms.support.urg_bandwidth_available, // not used
- mode_lib->ms.support.urg_bandwidth_available_vm_only, // not used
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm, // not used
-
- &mode_lib->soc,
- display_cfg->hostvm_enable,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ms.dram_bw_mbps);
-
- calculate_bandwidth_available(
- mode_lib->ms.support.avg_bandwidth_available_min,
- mode_lib->ms.support.avg_bandwidth_available,
- mode_lib->ms.support.urg_bandwidth_available_min,
- mode_lib->ms.support.urg_bandwidth_available,
- mode_lib->ms.support.urg_bandwidth_available_vm_only,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm,
-
- &mode_lib->soc,
- display_cfg->hostvm_enable,
- mode_lib->ms.MaxDCFCLK,
- mode_lib->ms.MaxFabricClock,
- mode_lib->ms.dram_bw_mbps);
-
-
- // Average BW support check
- calculate_avg_bandwidth_required(
- mode_lib->ms.support.avg_bandwidth_required,
- // input
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
- mode_lib->ms.cursor_bw,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.mall_prefetch_dram_overhead_factor,
- mode_lib->ms.mall_prefetch_sdp_overhead_factor);
-
- for (m = 0; m < dml2_core_internal_bw_max; m++) { // check sdp and dram
- mode_lib->ms.support.avg_bandwidth_support_ok[dml2_core_internal_soc_state_sys_idle][m] = 1;
- mode_lib->ms.support.avg_bandwidth_support_ok[dml2_core_internal_soc_state_sys_active][m] = (mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][m] <= mode_lib->ms.support.avg_bandwidth_available[dml2_core_internal_soc_state_sys_active][m]);
- mode_lib->ms.support.avg_bandwidth_support_ok[dml2_core_internal_soc_state_svp_prefetch][m] = (mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][m] <= mode_lib->ms.support.avg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][m]);
- }
-
- mode_lib->ms.support.AvgBandwidthSupport = true;
- mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHiding[k]) {
- mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = false;
- dml2_printf("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
-
- }
- }
- for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (n = 0; n < dml2_core_internal_bw_max; n++) { // check sdp and dram
- if (!mode_lib->ms.support.avg_bandwidth_support_ok[m][n] && (m == dml2_core_internal_soc_state_sys_active || mode_lib->soc.mall_allocated_for_dcn_mbytes > 0)) {
- mode_lib->ms.support.AvgBandwidthSupport = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
-#endif
- }
- }
- }
-
- /* Prefetch Check */
- {
- mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- mode_lib->ms.Total3dlutActive = 0;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
-
- // Calculate tdlut schedule related terms
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
- calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- double min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- min_return_bw_for_latency,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.request_size_bytes_luma,
- mode_lib->ms.support.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->ms.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->ms.ExtraLatency,
- &mode_lib->ms.ExtraLatency_sr,
- &mode_lib->ms.ExtraLatencyPrefetch);
-
- {
- mode_lib->ms.support.PrefetchSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
-
- mode_lib->ms.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
- myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->ms.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
- dml2_printf("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->MaxVStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
- CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
- CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
-
- mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
- mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
- dml2_printf("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
- } // for k num_planes
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.dst_y_prefetch[k] < 2.0
- || mode_lib->ms.LinesForVM[k] >= 32.0
- || mode_lib->ms.LinesForDPTERow[k] >= 16.0
- || mode_lib->ms.NoTimeForPrefetch[k] == true
- || s->DSTYAfterScaler[k] > 8) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
- dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
- dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
- dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
- }
- }
-
- mode_lib->ms.support.DynamicMetadataSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
- mode_lib->ms.support.DynamicMetadataSupported = false;
- }
- }
-
- mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__) {
- mode_lib->ms.support.VRatioInPrefetchSupported = false;
- dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
- }
- }
-
- s->AnyLinesForVMOrRowTooLarge = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.LinesForDPTERow[k] >= 16 || mode_lib->ms.LinesForVM[k] >= 32) {
- s->AnyLinesForVMOrRowTooLarge = true;
- }
- }
-
- // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
- if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- // Calculate Urgent burst factor for prefetch
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
- dml2_printf("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
-#endif
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- line_time_us,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.VRatioPreY[k],
- mode_lib->ms.VRatioPreC[k],
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
- /* Output */
- &mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChromaPre[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
-
- // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
- // assume flip bw is 0 at this point
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.non_urg_bandwidth_required,
-
- display_cfg,
- 0, // inc_flip_bw
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1,
- mode_lib->ms.mall_prefetch_sdp_overhead_factor,
- mode_lib->ms.mall_prefetch_dram_overhead_factor,
-
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
- mode_lib->ms.RequiredPrefetchPixelDataBWLuma,
- mode_lib->ms.RequiredPrefetchPixelDataBWChroma,
- mode_lib->ms.cursor_bw,
- mode_lib->ms.dpte_row_bw,
- mode_lib->ms.meta_row_bw,
- mode_lib->ms.prefetch_cursor_bw,
- mode_lib->ms.prefetch_vmrow_bw,
- mode_lib->ms.final_flip_bw,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
- mode_lib->ms.UrgentBurstFactorLumaPre,
- mode_lib->ms.UrgentBurstFactorChromaPre,
- mode_lib->ms.UrgentBurstFactorCursorPre);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth
- &s->dummy_single[1], // double* frac_urg_bandwidth_mall
- &mode_lib->ms.support.UrgVactiveBandwidthSupport,
- &mode_lib->ms.support.PrefetchBandwidthSupported,
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->ms.support.non_urg_bandwidth_required,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
- dml2_printf("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
- }
-
-
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true && mode_lib->ms.support.VRatioInPrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
-
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- s->dummy_bw,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
-
- // Input
- display_cfg,
- 1, // inc_flip_bw
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
-
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1,
- mode_lib->ms.mall_prefetch_sdp_overhead_factor,
- mode_lib->ms.mall_prefetch_dram_overhead_factor,
-
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
- mode_lib->ms.RequiredPrefetchPixelDataBWLuma,
- mode_lib->ms.RequiredPrefetchPixelDataBWChroma,
- mode_lib->ms.cursor_bw,
- mode_lib->ms.dpte_row_bw,
- mode_lib->ms.meta_row_bw,
- mode_lib->ms.prefetch_cursor_bw,
- mode_lib->ms.prefetch_vmrow_bw,
- mode_lib->ms.final_flip_bw,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
- mode_lib->ms.UrgentBurstFactorLumaPre,
- mode_lib->ms.UrgentBurstFactorChromaPre,
- mode_lib->ms.UrgentBurstFactorCursorPre);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- } else { // if prefetch not support, assume iflip is not supported too
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
- } // prefetch schedule
- }
-
- s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
- s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
- s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
- s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.USRRetrainingLatency = 0; // FIXME_STAGE2: no USR related bbox value
- s->mSOCParameters.SMNLatency = 0; // FIXME_STAGE2
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false /*FIXME_STAGE2 was: mode_lib->ms.policy.USRRetrainingRequired, no new dml2 replacement*/;
- CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- //CalculateWatermarks_params->LBBitPerPixel = 57; // FIXME_STAGE2, need a new ip param?
- CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
- CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
- CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
- }
-
- // End of Prefetch Check
-
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
-
- //Re-ordering Buffer Support Check
- mode_lib->ms.support.max_urgent_latency_us
- = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
- if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= mode_lib->ms.support.max_urgent_latency_us) {
- mode_lib->ms.support.ROBSupport = true;
- } else {
- mode_lib->ms.support.ROBSupport = false;
- }
- } else {
- if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
- mode_lib->ms.support.ROBSupport = true;
- } else {
- mode_lib->ms.support.ROBSupport = false;
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.max_urgent_latency_us);
- dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
-#endif
-
- /*Mode Support, Voltage State and SOC Configuration*/
- {
- // s->dram_clock_change_support = 1;
- // s->f_clock_change_support = 1;
-
- if (mode_lib->ms.support.ScaleRatioAndTapsSupport
- && mode_lib->ms.support.SourceFormatPixelAndScanSupport
- && mode_lib->ms.support.ViewportSizeSupport
- && !mode_lib->ms.support.LinkRateDoesNotMatchDPVersion
- && !mode_lib->ms.support.LinkRateForMultistreamNotIndicated
- && !mode_lib->ms.support.BPPForMultistreamNotIndicated
- && !mode_lib->ms.support.MultistreamWithHDMIOreDP
- && !mode_lib->ms.support.ExceededMultistreamSlots
- && !mode_lib->ms.support.MSOOrODMSplitWithNonDPLink
- && !mode_lib->ms.support.NotEnoughLanesForMSO
- //&& mode_lib->ms.support.LinkCapacitySupport == true // FIXME_STAGE2
- && !mode_lib->ms.support.P2IWith420
- && !mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP
- && !mode_lib->ms.support.DSC422NativeNotSupported
- && !mode_lib->ms.support.NotEnoughDSCUnits
- && !mode_lib->ms.support.NotEnoughDSCSlices
- && !mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe
- && !mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen
- && !mode_lib->ms.support.DSCCLKRequiredMoreThanSupported
- && mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport
- && !mode_lib->ms.support.DTBCLKRequiredMoreThanSupported
- && !mode_lib->ms.support.InvalidCombinationOfMALLUseForPState
- && mode_lib->ms.support.ROBSupport
- && mode_lib->ms.support.OutstandingRequestsSupport
- && mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance
- && mode_lib->ms.support.DISPCLK_DPPCLK_Support
- && mode_lib->ms.support.TotalAvailablePipesSupport
- && mode_lib->ms.support.NumberOfOTGSupport
- && mode_lib->ms.support.NumberOfHDMIFRLSupport
- && mode_lib->ms.support.NumberOfDP2p0Support
- && mode_lib->ms.support.EnoughWritebackUnits
- && mode_lib->ms.support.WritebackLatencySupport
- && mode_lib->ms.support.WritebackScaleRatioAndTapsSupport
- && mode_lib->ms.support.CursorSupport
- && mode_lib->ms.support.PitchSupport
- && !mode_lib->ms.support.ViewportExceedsSurface
- && mode_lib->ms.support.PrefetchSupported
- && mode_lib->ms.support.EnoughUrgentLatencyHidingSupport
- && mode_lib->ms.support.AvgBandwidthSupport
- && mode_lib->ms.support.DynamicMetadataSupported
- && mode_lib->ms.support.VRatioInPrefetchSupported
- && mode_lib->ms.support.PTEBufferSizeNotExceeded
- && mode_lib->ms.support.DCCMetaBufferSizeNotExceeded
- && !mode_lib->ms.support.ExceededMALLSize
- && ((!display_cfg->hostvm_enable && !s->ImmediateFlipRequired) || mode_lib->ms.support.ImmediateFlipSupport)) {
- // && s->dram_clock_change_support == true
- // && s->f_clock_change_support == true
- // && (/*FIXME_STAGE2 was: mode_lib->ms.policy.USRRetrainingRequired, no new dml2 replacement || */ mode_lib->ms.support.USRRetrainingSupport)) {
- dml2_printf("DML::%s: mode is supported\n", __func__);
- mode_lib->ms.support.ModeSupport = true;
- } else {
- dml2_printf("DML::%s: mode is NOT supported\n", __func__);
- mode_lib->ms.support.ModeSupport = false;
- }
- }
-
- // Since now the mode_support work on 1 particular power state, so there is only 1 state idx (index 0).
- dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
- dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
- mode_lib->ms.support.DPPPerSurface[k] = mode_lib->ms.NoOfDPP[k];
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- mode_lib->ms.support.ODMMode[k] = mode_lib->ms.ODMMode[k];
- } else {
- mode_lib->ms.support.ODMMode[k] = dml2_odm_mode_bypass;
- }
-
- mode_lib->ms.support.DSCEnabled[k] = mode_lib->ms.RequiresDSC[k];
- mode_lib->ms.support.FECEnabled[k] = mode_lib->ms.RequiresFEC[k];
- mode_lib->ms.support.OutputBpp[k] = mode_lib->ms.OutputBpp[k];
- mode_lib->ms.support.OutputType[k] = mode_lib->ms.OutputType[k];
- mode_lib->ms.support.OutputRate[k] = mode_lib->ms.OutputRate[k];
-
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
- dml2_printf("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
-#endif
- }
-
-#if defined(__DML_VBA_DEBUG__)
- if (!mode_lib->ms.support.ModeSupport)
- dml2_print_dml_mode_support_info(&mode_lib->ms.support, true);
- dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, mode_lib->ms.support.ModeSupport, in_out_params->min_clk_index);
- dml2_printf("DML::%s: --- DONE --- \n", __func__);
-#endif
-
- if (mode_lib->ms.support.ModeSupport) {
- *in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
- return true;
- } else {
- return false;
- }
-}
-
-static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
-{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
- if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = 0x%x\n", support->ImmediateFlipSupport);
- if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = 0x%x\n", support->WritebackLatencySupport);
- if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = 0x%x\n", support->ScaleRatioAndTapsSupport);
- if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = 0x%x\n", support->SourceFormatPixelAndScanSupport);
- if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = 0x%x\n", support->P2IWith420);
- if (!fail_only || support->DSCOnlyIfNecessaryWithBPP == 1)
- dml2_printf("DML: support: DSCOnlyIfNecessaryWithBPP = 0x%x\n", support->DSCOnlyIfNecessaryWithBPP);
- if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = 0x%x\n", support->DSC422NativeNotSupported);
- if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = 0x%x\n", support->LinkRateDoesNotMatchDPVersion);
- if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = 0x%x\n", support->LinkRateForMultistreamNotIndicated);
- if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = 0x%x\n", support->BPPForMultistreamNotIndicated);
- if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = 0x%x\n", support->MultistreamWithHDMIOreDP);
- if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = 0x%x\n", support->MSOOrODMSplitWithNonDPLink);
- if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = 0x%x\n", support->NotEnoughLanesForMSO);
- if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = 0x%x\n", support->NumberOfOTGSupport);
- if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = 0x%x\n", support->NumberOfHDMIFRLSupport);
- if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = 0x%x\n", support->NumberOfDP2p0Support);
- if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = 0x%x\n", support->WritebackScaleRatioAndTapsSupport);
- if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = 0x%x\n", support->CursorSupport);
- if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = 0x%x\n", support->PitchSupport);
- if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = 0x%x\n", support->ViewportExceedsSurface);
- if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = 0x%x\n", support->ExceededMALLSize);
- if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = 0x%x\n", support->EnoughWritebackUnits);
- if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = 0x%x\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
- if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = 0x%x\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
- if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = 0x%x\n", support->InvalidCombinationOfMALLUseForPState);
- if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = 0x%x\n", support->ExceededMultistreamSlots);
- if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = 0x%x\n", support->NotEnoughDSCUnits);
- if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = 0x%x\n", support->NotEnoughDSCSlices);
- if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = 0x%x\n", support->PixelsPerLinePerDSCUnitSupport);
- if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = 0x%x\n", support->DSCCLKRequiredMoreThanSupported);
- if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = 0x%x\n", support->DTBCLKRequiredMoreThanSupported);
- if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = 0x%x\n", support->LinkCapacitySupport);
- if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
- if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
- if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
- if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
- if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
- if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
- if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
- if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
- if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
- dml2_printf("DML: ===================================== \n");
-}
-
-static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
-{
- for (unsigned int k = 0; k < display_cfg->num_planes; k++) {
- double bpc = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.bpc;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_disable) {
- switch (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format) {
- case dml2_444:
- out_bpp[k] = bpc * 3;
- break;
- case dml2_s422:
- out_bpp[k] = bpc * 2;
- break;
- case dml2_n422:
- out_bpp[k] = bpc * 2;
- break;
- case dml2_420:
- default:
- out_bpp[k] = bpc * 1.5;
- break;
- }
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable) {
- out_bpp[k] = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.dsc_compressed_bpp_x16 / 16;
- } else {
- out_bpp[k] = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
-#endif
- }
-}
-
-static unsigned int dml_round_to_multiple(unsigned int num, unsigned int multiple, bool up)
-{
- unsigned int remainder;
-
- if (multiple == 0)
- return num;
-
- remainder = num % multiple;
- if (remainder == 0)
- return num;
-
- if (up)
- return (num + multiple - remainder);
- else
- return (num - remainder);
-}
-
-static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info)
-{
- unsigned int num_active_pipes = 0;
-
- for (unsigned int k = 0; k < num_planes; k++) {
- num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
-#endif
- return num_active_pipes;
-}
-
-static void dml_calc_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane)
-{
- unsigned int pipe_idx = 0;
-
- for (unsigned int k = 0; k < DML2_MAX_PLANES; ++k) {
- pipe_plane[k] = __DML2_CALCS_PIPE_NO_PLANE__;
- }
-
- for (unsigned int plane_idx = 0; plane_idx < DML2_MAX_PLANES; plane_idx++) {
- for (int i = 0; i < cfg_support_info->plane_support_info[plane_idx].dpps_used; i++) {
- pipe_plane[pipe_idx] = plane_idx;
- pipe_idx++;
- }
- }
-}
-
-static bool dml_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg)
-{
- bool is_phantom = false;
-
- if (plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe ||
- plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return) {
- is_phantom = true;
- }
-
- return is_phantom;
-}
-
-static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int pipe_idx)
-{
- unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
-
- bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[plane_idx]);
- dml2_printf("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
- return is_phantom;
-}
-
-static void CalculateMaxDETAndMinCompressedBufferSize(
- unsigned int ConfigReturnBufferSizeInKByte,
- unsigned int ConfigReturnBufferSegmentSizeInKByte,
- unsigned int ROBBufferSizeInKByte,
- unsigned int MaxNumDPP,
- unsigned int nomDETInKByteOverrideEnable, // VBA_DELTA, allow DV to override default DET size
- unsigned int nomDETInKByteOverrideValue, // VBA_DELTA
- bool is_mrq_present,
-
- // Output
- unsigned int *MaxTotalDETInKByte,
- unsigned int *nomDETInKByte,
- unsigned int *MinCompressedBufferSizeInKByte)
-{
- if (is_mrq_present)
- *MaxTotalDETInKByte = (unsigned int)math_ceil2((double)(ConfigReturnBufferSizeInKByte + ROBBufferSizeInKByte) * 4 / 5, 64);
- else
- *MaxTotalDETInKByte = ConfigReturnBufferSizeInKByte - ConfigReturnBufferSegmentSizeInKByte;
-
- *nomDETInKByte = (unsigned int)(math_floor2((double)*MaxTotalDETInKByte / (double)MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte));
- *MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
-
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
- dml2_printf("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
-#endif
-
- if (nomDETInKByteOverrideEnable) {
- *nomDETInKByte = nomDETInKByteOverrideValue;
- dml2_printf("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
- }
-}
-
-static void PixelClockAdjustmentForProgressiveToInterlaceUnit(const struct dml2_display_cfg *display_cfg, bool ptoi_supported, double *PixelClockBackEnd)
-{
- //unsigned int num_active_planes = display_cfg->num_planes;
-
- //Progressive To Interlace Unit Effect
- for (unsigned int k = 0; k < display_cfg->num_planes; ++k) {
- PixelClockBackEnd[k] = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced == 1 && ptoi_supported == true) {
- // FIXME_STAGE2... can sw pass the pixel rate for interlaced directly
- //display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz = 2 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz;
- }
- }
-}
-
-bool dml2_core_shared_is_420(enum dml2_source_format_class source_format)
-{
- bool val = false;
-
- switch (source_format) {
- case dml2_444_8:
- val = 0;
- break;
- case dml2_444_16:
- val = 0;
- break;
- case dml2_444_32:
- val = 0;
- break;
- case dml2_444_64:
- val = 0;
- break;
- case dml2_420_8:
- val = 1;
- break;
- case dml2_420_10:
- val = 1;
- break;
- case dml2_420_12:
- val = 1;
- break;
- case dml2_rgbe_alpha:
- val = 0;
- break;
- case dml2_rgbe:
- val = 0;
- break;
- case dml2_mono_8:
- val = 0;
- break;
- case dml2_mono_16:
- val = 0;
- break;
- default:
- DML2_ASSERT(0);
- break;
- }
- return val;
-}
-
-static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
-{
- switch (sw_mode) {
- case (dml2_sw_linear):
- return 256;
- case (dml2_sw_256b_2d):
- return 256;
- case (dml2_sw_4kb_2d):
- return 4096;
- case (dml2_sw_64kb_2d):
- return 65536;
- case (dml2_sw_256kb_2d):
- return 262144;
- case (dml2_gfx11_sw_linear):
- return 256;
- case (dml2_gfx11_sw_64kb_d):
- return 65536;
- case (dml2_gfx11_sw_64kb_d_t):
- return 65536;
- case (dml2_gfx11_sw_64kb_d_x):
- return 65536;
- case (dml2_gfx11_sw_64kb_r_x):
- return 65536;
- case (dml2_gfx11_sw_256kb_d_x):
- return 262144;
- case (dml2_gfx11_sw_256kb_r_x):
- return 262144;
- default:
- DML2_ASSERT(0);
- return 256;
- }
-}
-
-const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
-{
- switch (bw_type) {
- case (dml2_core_internal_bw_sdp):
- return("dml2_core_internal_bw_sdp");
- case (dml2_core_internal_bw_dram):
- return("dml2_core_internal_bw_dram");
- case (dml2_core_internal_bw_max):
- return("dml2_core_internal_bw_max");
- default:
- return("dml2_core_internal_bw_unknown");
- }
-}
-
-const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
-{
- switch (dml2_core_internal_soc_state_type) {
- case (dml2_core_internal_soc_state_sys_idle):
- return("dml2_core_internal_soc_state_sys_idle");
- case (dml2_core_internal_soc_state_sys_active):
- return("dml2_core_internal_soc_state_sys_active");
- case (dml2_core_internal_soc_state_svp_prefetch):
- return("dml2_core_internal_soc_state_svp_prefetch");
- case dml2_core_internal_soc_state_max:
- default:
- return("dml2_core_internal_soc_state_unknown");
- }
-}
-
-static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan)
-{
- bool is_vert = false;
- if (Scan == dml2_rotation_90 || Scan == dml2_rotation_270) {
- is_vert = true;
- } else {
- is_vert = false;
- }
- return is_vert;
-}
-
-static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
-{
- int unsigned version = 0;
-
- if (sw_mode == dml2_sw_linear ||
- sw_mode == dml2_sw_256b_2d ||
- sw_mode == dml2_sw_4kb_2d ||
- sw_mode == dml2_sw_64kb_2d ||
- sw_mode == dml2_sw_256kb_2d) {
- version = 12;
- } else if (sw_mode == dml2_gfx11_sw_linear ||
- sw_mode == dml2_gfx11_sw_64kb_d ||
- sw_mode == dml2_gfx11_sw_64kb_d_t ||
- sw_mode == dml2_gfx11_sw_64kb_d_x ||
- sw_mode == dml2_gfx11_sw_64kb_r_x ||
- sw_mode == dml2_gfx11_sw_256kb_d_x ||
- sw_mode == dml2_gfx11_sw_256kb_r_x) {
- version = 11;
- } else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
- }
-
- return version;
-}
-
-static void CalculateBytePerPixelAndBlockSizes(
- enum dml2_source_format_class SourcePixelFormat,
- enum dml2_swizzle_mode SurfaceTiling,
- unsigned int pitch_y,
- unsigned int pitch_c,
-
- // Output
- unsigned int *BytePerPixelY,
- unsigned int *BytePerPixelC,
- double *BytePerPixelDETY,
- double *BytePerPixelDETC,
- unsigned int *BlockHeight256BytesY,
- unsigned int *BlockHeight256BytesC,
- unsigned int *BlockWidth256BytesY,
- unsigned int *BlockWidth256BytesC,
- unsigned int *MacroTileHeightY,
- unsigned int *MacroTileHeightC,
- unsigned int *MacroTileWidthY,
- unsigned int *MacroTileWidthC,
- bool *surf_linear128_l,
- bool *surf_linear128_c)
-{
- *BytePerPixelDETY = 0;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
-
- if (SourcePixelFormat == dml2_444_64) {
- *BytePerPixelDETY = 8;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 8;
- *BytePerPixelC = 0;
- } else if (SourcePixelFormat == dml2_444_32 || SourcePixelFormat == dml2_rgbe) {
- *BytePerPixelDETY = 4;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 4;
- *BytePerPixelC = 0;
- } else if (SourcePixelFormat == dml2_444_16 || SourcePixelFormat == dml2_mono_16) {
- *BytePerPixelDETY = 2;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 2;
- *BytePerPixelC = 0;
- } else if (SourcePixelFormat == dml2_444_8 || SourcePixelFormat == dml2_mono_8) {
- *BytePerPixelDETY = 1;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 1;
- *BytePerPixelC = 0;
- } else if (SourcePixelFormat == dml2_rgbe_alpha) {
- *BytePerPixelDETY = 4;
- *BytePerPixelDETC = 1;
- *BytePerPixelY = 4;
- *BytePerPixelC = 1;
- } else if (SourcePixelFormat == dml2_420_8) {
- *BytePerPixelDETY = 1;
- *BytePerPixelDETC = 2;
- *BytePerPixelY = 1;
- *BytePerPixelC = 2;
- } else if (SourcePixelFormat == dml2_420_12) {
- *BytePerPixelDETY = 2;
- *BytePerPixelDETC = 4;
- *BytePerPixelY = 2;
- *BytePerPixelC = 4;
- } else if (SourcePixelFormat == dml2_420_10) {
- *BytePerPixelDETY = (double)(4.0 / 3);
- *BytePerPixelDETC = (double)(8.0 / 3);
- *BytePerPixelY = 2;
- *BytePerPixelC = 4;
- } else {
- dml2_printf("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
- DML2_ASSERT(0);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
- dml2_printf("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
- dml2_printf("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
- dml2_printf("DML::%s: pitch_y = %u\n", __func__, pitch_y);
- dml2_printf("DML::%s: pitch_c = %u\n", __func__, pitch_c);
- dml2_printf("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
- dml2_printf("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
-#endif
-
- if (dml_get_gfx_version(SurfaceTiling) == 11) {
- *surf_linear128_l = 0;
- *surf_linear128_c = 0;
- } else {
- if (SurfaceTiling == dml2_sw_linear) {
- *surf_linear128_l = (((pitch_y * *BytePerPixelY) % 256) != 0);
-
- if (dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha)
- *surf_linear128_c = (((pitch_c * *BytePerPixelC) % 256) != 0);
- }
- }
-
- if (!(dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha)) {
- if (SurfaceTiling == dml2_sw_linear) {
- *BlockHeight256BytesY = 1;
- } else if (SourcePixelFormat == dml2_444_64) {
- *BlockHeight256BytesY = 4;
- } else if (SourcePixelFormat == dml2_444_8) {
- *BlockHeight256BytesY = 16;
- } else {
- *BlockHeight256BytesY = 8;
- }
- *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
- *BlockHeight256BytesC = 0;
- *BlockWidth256BytesC = 0;
- } else { // dual plane
- if (SurfaceTiling == dml2_sw_linear) {
- *BlockHeight256BytesY = 1;
- *BlockHeight256BytesC = 1;
- } else if (SourcePixelFormat == dml2_rgbe_alpha) {
- *BlockHeight256BytesY = 8;
- *BlockHeight256BytesC = 16;
- } else if (SourcePixelFormat == dml2_420_8) {
- *BlockHeight256BytesY = 16;
- *BlockHeight256BytesC = 8;
- } else {
- *BlockHeight256BytesY = 8;
- *BlockHeight256BytesC = 8;
- }
- *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
- *BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
- dml2_printf("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
- dml2_printf("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
- dml2_printf("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
-#endif
-
- if (dml_get_gfx_version(SurfaceTiling) == 11) {
- if (SurfaceTiling == dml2_gfx11_sw_linear) {
- *MacroTileHeightY = *BlockHeight256BytesY;
- *MacroTileWidthY = 256 / *BytePerPixelY / *MacroTileHeightY;
- *MacroTileHeightC = *BlockHeight256BytesC;
- if (*MacroTileHeightC == 0) {
- *MacroTileWidthC = 0;
- } else {
- *MacroTileWidthC = 256 / *BytePerPixelC / *MacroTileHeightC;
- }
- } else if (SurfaceTiling == dml2_gfx11_sw_64kb_d || SurfaceTiling == dml2_gfx11_sw_64kb_d_t || SurfaceTiling == dml2_gfx11_sw_64kb_d_x || SurfaceTiling == dml2_gfx11_sw_64kb_r_x) {
- *MacroTileHeightY = 16 * *BlockHeight256BytesY;
- *MacroTileWidthY = 65536 / *BytePerPixelY / *MacroTileHeightY;
- *MacroTileHeightC = 16 * *BlockHeight256BytesC;
- if (*MacroTileHeightC == 0) {
- *MacroTileWidthC = 0;
- } else {
- *MacroTileWidthC = 65536 / *BytePerPixelC / *MacroTileHeightC;
- }
- } else {
- *MacroTileHeightY = 32 * *BlockHeight256BytesY;
- *MacroTileWidthY = 65536 * 4 / *BytePerPixelY / *MacroTileHeightY;
- *MacroTileHeightC = 32 * *BlockHeight256BytesC;
- if (*MacroTileHeightC == 0) {
- *MacroTileWidthC = 0;
- } else {
- *MacroTileWidthC = 65536 * 4 / *BytePerPixelC / *MacroTileHeightC;
- }
- }
- } else {
- unsigned int macro_tile_size_bytes = dml_get_tile_block_size_bytes(SurfaceTiling);
- unsigned int macro_tile_scale = 1; // macro tile to 256B req scaling
-
- if (SurfaceTiling == dml2_sw_linear) {
- macro_tile_scale = 1;
- } else if (SurfaceTiling == dml2_sw_4kb_2d) {
- macro_tile_scale = 4;
- } else if (SurfaceTiling == dml2_sw_64kb_2d) {
- macro_tile_scale = 16;
- } else if (SurfaceTiling == dml2_sw_256kb_2d) {
- macro_tile_scale = 32;
- } else {
- dml2_printf("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
- DML2_ASSERT(0);
- }
-
- *MacroTileHeightY = macro_tile_scale * *BlockHeight256BytesY;
- *MacroTileWidthY = macro_tile_size_bytes / *BytePerPixelY / *MacroTileHeightY;
- *MacroTileHeightC = macro_tile_scale * *BlockHeight256BytesC;
- if (*MacroTileHeightC == 0) {
- *MacroTileWidthC = 0;
- } else {
- *MacroTileWidthC = macro_tile_size_bytes / *BytePerPixelC / *MacroTileHeightC;
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
- dml2_printf("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
- dml2_printf("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
- dml2_printf("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
-#endif
-}
-
-static void CalculateSinglePipeDPPCLKAndSCLThroughput(
- double HRatio,
- double HRatioChroma,
- double VRatio,
- double VRatioChroma,
- double MaxDCHUBToPSCLThroughput,
- double MaxPSCLToLBThroughput,
- double PixelClock,
- enum dml2_source_format_class SourcePixelFormat,
- unsigned int HTaps,
- unsigned int HTapsChroma,
- unsigned int VTaps,
- unsigned int VTapsChroma,
-
- // Output
- double *PSCL_THROUGHPUT,
- double *PSCL_THROUGHPUT_CHROMA,
- double *DPPCLKUsingSingleDPP)
-{
- if (HRatio > 1) {
- *PSCL_THROUGHPUT = math_min2(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput * HRatio / math_ceil2((double)HTaps / 6.0, 1.0));
- } else {
- *PSCL_THROUGHPUT = math_min2(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput);
- }
-
- double DPPCLKUsingSingleDPPLuma;
- double DPPCLKUsingSingleDPPChroma;
-
- DPPCLKUsingSingleDPPLuma = PixelClock * math_max3(VTaps / 6 * math_min2(1, HRatio), HRatio * VRatio / *PSCL_THROUGHPUT, 1);
-
- if ((HTaps > 6 || VTaps > 6) && DPPCLKUsingSingleDPPLuma < 2 * PixelClock)
- DPPCLKUsingSingleDPPLuma = 2 * PixelClock;
-
- if (!dml2_core_shared_is_420(SourcePixelFormat) && SourcePixelFormat != dml2_rgbe_alpha) {
- *PSCL_THROUGHPUT_CHROMA = 0;
- *DPPCLKUsingSingleDPP = DPPCLKUsingSingleDPPLuma;
- } else {
- if (HRatioChroma > 1) {
- *PSCL_THROUGHPUT_CHROMA = math_min2(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput * HRatioChroma / math_ceil2((double)HTapsChroma / 6.0, 1.0));
- } else {
- *PSCL_THROUGHPUT_CHROMA = math_min2(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput);
- }
- DPPCLKUsingSingleDPPChroma = PixelClock * math_max3(VTapsChroma / 6 * math_min2(1, HRatioChroma),
- HRatioChroma * VRatioChroma / *PSCL_THROUGHPUT_CHROMA, 1);
- if ((HTapsChroma > 6 || VTapsChroma > 6) && DPPCLKUsingSingleDPPChroma < 2 * PixelClock)
- DPPCLKUsingSingleDPPChroma = 2 * PixelClock;
- *DPPCLKUsingSingleDPP = math_max2(DPPCLKUsingSingleDPPLuma, DPPCLKUsingSingleDPPChroma);
- }
-}
-
-static void CalculateSwathWidth(
- const struct dml2_display_cfg *display_cfg,
- bool ForceSingleDPP,
- unsigned int NumberOfActiveSurfaces,
- enum dml2_odm_mode ODMMode[],
- unsigned int BytePerPixY[],
- unsigned int BytePerPixC[],
- unsigned int Read256BytesBlockHeightY[],
- unsigned int Read256BytesBlockHeightC[],
- unsigned int Read256BytesBlockWidthY[],
- unsigned int Read256BytesBlockWidthC[],
- bool surf_linear128_l[],
- bool surf_linear128_c[],
- unsigned int DPPPerSurface[],
-
- // Output
- unsigned int req_per_swath_ub_l[],
- unsigned int req_per_swath_ub_c[],
- unsigned int SwathWidthSingleDPPY[],
- unsigned int SwathWidthSingleDPPC[],
- unsigned int SwathWidthY[], // per-pipe
- unsigned int SwathWidthC[], // per-pipe
- unsigned int MaximumSwathHeightY[],
- unsigned int MaximumSwathHeightC[],
- unsigned int swath_width_luma_ub[], // per-pipe
- unsigned int swath_width_chroma_ub[]) // per-pipe
-{
- enum dml2_odm_mode MainSurfaceODMMode;
- double odm_hactive_factor = 1.0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.width;
- } else {
- SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u ViewportWidth=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
-
- MainSurfaceODMMode = ODMMode[k];
- for (unsigned int j = 0; j < NumberOfActiveSurfaces; ++j) {
- if (display_cfg->plane_descriptors[k].stream_index == j) {
- MainSurfaceODMMode = ODMMode[j];
- }
- }
-
- if (ForceSingleDPP) {
- SwathWidthY[k] = SwathWidthSingleDPPY[k];
- } else {
- if (MainSurfaceODMMode == dml2_odm_mode_combine_4to1)
- odm_hactive_factor = 4.0;
- else if (MainSurfaceODMMode == dml2_odm_mode_combine_3to1)
- odm_hactive_factor = 3.0;
- else if (MainSurfaceODMMode == dml2_odm_mode_combine_2to1)
- odm_hactive_factor = 2.0;
-
- if (MainSurfaceODMMode == dml2_odm_mode_combine_4to1 || MainSurfaceODMMode == dml2_odm_mode_combine_3to1 || MainSurfaceODMMode == dml2_odm_mode_combine_2to1) {
- SwathWidthY[k] = (unsigned int)(math_min2((double)SwathWidthSingleDPPY[k], math_round((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active / odm_hactive_factor * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio)));
- } else if (DPPPerSurface[k] == 2) {
- SwathWidthY[k] = SwathWidthSingleDPPY[k] / 2;
- } else {
- SwathWidthY[k] = SwathWidthSingleDPPY[k];
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u HActive=%u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
- dml2_printf("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
- dml2_printf("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
- dml2_printf("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
-#endif
-
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
- SwathWidthC[k] = SwathWidthY[k] / 2;
- SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k] / 2;
- } else {
- SwathWidthC[k] = SwathWidthY[k];
- SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k];
- }
-
- if (ForceSingleDPP == true) {
- SwathWidthY[k] = SwathWidthSingleDPPY[k];
- SwathWidthC[k] = SwathWidthSingleDPPC[k];
- }
-
- unsigned int req_width_horz_y = Read256BytesBlockWidthY[k];
- unsigned int req_width_horz_c = Read256BytesBlockWidthC[k];
-
- if (surf_linear128_l[k])
- req_width_horz_y = req_width_horz_y / 2;
-
- if (surf_linear128_c[k])
- req_width_horz_c = req_width_horz_c / 2;
-
- unsigned int surface_width_ub_l = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane0.width, req_width_horz_y);
- unsigned int surface_height_ub_l = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane0.height, Read256BytesBlockHeightY[k]);
- unsigned int surface_width_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.width, req_width_horz_c);
- unsigned int surface_height_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.height, Read256BytesBlockHeightC[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
- dml2_printf("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
- dml2_printf("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
- dml2_printf("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
-
- req_per_swath_ub_l[k] = 0;
- req_per_swath_ub_c[k] = 0;
- if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
- MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
- if (display_cfg->plane_descriptors[k].composition.viewport.stationary && DPPPerSurface[k] == 1) {
- swath_width_luma_ub[k] = (unsigned int)(math_min2(surface_width_ub_l, math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start + SwathWidthY[k] + req_width_horz_y - 1, req_width_horz_y) - math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start, req_width_horz_y)));
- } else {
- swath_width_luma_ub[k] = (unsigned int)(math_min2(surface_width_ub_l, math_ceil2((double)SwathWidthY[k] - 1, req_width_horz_y) + req_width_horz_y));
- }
- req_per_swath_ub_l[k] = swath_width_luma_ub[k] / req_width_horz_y;
-
- if (BytePerPixC[k] > 0) {
- if (display_cfg->plane_descriptors[k].composition.viewport.stationary && DPPPerSurface[k] == 1) {
- swath_width_chroma_ub[k] = (unsigned int)(math_min2(surface_width_ub_c, math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start + SwathWidthC[k] + req_width_horz_c - 1, req_width_horz_c) - math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start, req_width_horz_c)));
- } else {
- swath_width_chroma_ub[k] = (unsigned int)(math_min2(surface_width_ub_c, math_ceil2((double)SwathWidthC[k] - 1, req_width_horz_c) + req_width_horz_c));
- }
- req_per_swath_ub_c[k] = swath_width_chroma_ub[k] / req_width_horz_c;
- } else {
- swath_width_chroma_ub[k] = 0;
- }
- } else {
- MaximumSwathHeightY[k] = Read256BytesBlockWidthY[k];
- MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
-
- if (display_cfg->plane_descriptors[k].composition.viewport.stationary && DPPPerSurface[k] == 1) {
- swath_width_luma_ub[k] = (unsigned int)(math_min2(surface_height_ub_l, math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start + SwathWidthY[k] + Read256BytesBlockHeightY[k] - 1, Read256BytesBlockHeightY[k]) - math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start, Read256BytesBlockHeightY[k])));
- } else {
- swath_width_luma_ub[k] = (unsigned int)(math_min2(surface_height_ub_l, math_ceil2((double)SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]));
- }
- req_per_swath_ub_l[k] = swath_width_luma_ub[k] / Read256BytesBlockHeightY[k];
- if (BytePerPixC[k] > 0) {
- if (display_cfg->plane_descriptors[k].composition.viewport.stationary && DPPPerSurface[k] == 1) {
- swath_width_chroma_ub[k] = (unsigned int)(math_min2(surface_height_ub_c, math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start + SwathWidthC[k] + Read256BytesBlockHeightC[k] - 1, Read256BytesBlockHeightC[k]) - math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start, Read256BytesBlockHeightC[k])));
- } else {
- swath_width_chroma_ub[k] = (unsigned int)(math_min2(surface_height_ub_c, math_ceil2((double)SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]));
- }
- req_per_swath_ub_c[k] = swath_width_chroma_ub[k] / Read256BytesBlockHeightC[k];
- } else {
- swath_width_chroma_ub[k] = 0;
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
-
- }
-}
-
-static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsigned int TotalNumberOfActiveDPP, bool NoChromaOrLinear)
-{
- bool unb_req_ok = false;
- bool unb_req_en = false;
-
- unb_req_ok = (TotalNumberOfActiveDPP == 1 && NoChromaOrLinear);
- unb_req_en = unb_req_ok;
-
- if (unb_req_force_en) {
- unb_req_en = unb_req_force_val && unb_req_ok;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
- dml2_printf("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
- dml2_printf("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
- dml2_printf("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
-#endif
- return (unb_req_en);
-}
-
-static void CalculateDETBufferSize(struct dml2_core_shared_calculate_det_buffer_size_params *p)
-{
- unsigned int DETBufferSizePoolInKByte;
- unsigned int NextDETBufferPieceInKByte;
- bool DETPieceAssignedToThisSurfaceAlready[DML2_MAX_PLANES];
- bool NextPotentialSurfaceToAssignDETPieceFound;
- unsigned int NextSurfaceToAssignDETPiece;
- double TotalBandwidth;
- double BandwidthOfSurfacesNotAssignedDETPiece;
- unsigned int max_minDET;
- unsigned int minDET;
- unsigned int minDET_pipe;
- unsigned int TotalBandwidthPerStream[DML2_MAX_PLANES] = { 0 };
- unsigned int TotalPixelRate = 0;
- unsigned int DETBudgetPerStream[DML2_MAX_PLANES] = { 0 };
- unsigned int RemainingDETBudgetPerStream[DML2_MAX_PLANES] = { 0 };
- unsigned int IdealDETBudget, DeltaDETBudget;
- bool MinimizeReallocationSuccess = false;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, p->NumberOfActiveSurfaces);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, p->MaxTotalDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, p->MinCompressedBufferSizeInKByte);
- dml2_printf("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, p->CompressedBufferSegmentSizeInkByte);
-#endif
-
- // Note: Will use default det size if that fits 2 swaths
- if (p->UnboundedRequestEnabled) {
- if (p->display_cfg->plane_descriptors[0].overrides.det_size_override_kb > 0) {
- p->DETBufferSizeInKByte[0] = p->display_cfg->plane_descriptors[0].overrides.det_size_override_kb;
- } else {
- p->DETBufferSizeInKByte[0] = (unsigned int)math_max2(128.0, math_ceil2(2.0 * ((double)p->full_swath_bytes_l[0] + (double)p->full_swath_bytes_c[0]) / 1024.0, p->ConfigReturnBufferSegmentSizeInkByte));
- }
- *p->CompressedBufferSizeInkByte = p->ConfigReturnBufferSizeInKByte - p->DETBufferSizeInKByte[0];
- } else {
- DETBufferSizePoolInKByte = p->MaxTotalDETInKByte;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->DETBufferSizeInKByte[k] = 0;
- if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format)) {
- max_minDET = p->nomDETInKByte - p->ConfigReturnBufferSegmentSizeInkByte;
- } else {
- max_minDET = p->nomDETInKByte;
- }
- minDET = 128;
- minDET_pipe = 0;
-
- // add DET resource until can hold 2 full swaths
- while (minDET <= max_minDET && minDET_pipe == 0) {
- if (2.0 * ((double)p->full_swath_bytes_l[k] + (double)p->full_swath_bytes_c[k]) / 1024.0 <= minDET)
- minDET_pipe = minDET;
- minDET = minDET + p->ConfigReturnBufferSegmentSizeInkByte;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET = %u\n", __func__, k, minDET);
- dml2_printf("DML::%s: k=%u max_minDET = %u\n", __func__, k, max_minDET);
- dml2_printf("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, minDET_pipe);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
-#endif
-
- if (minDET_pipe == 0) {
- minDET_pipe = (unsigned int)(math_max2(128, math_ceil2(((double)p->full_swath_bytes_l[k] + (double)p->full_swath_bytes_c[k]) / 1024.0, p->ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, minDET_pipe);
-#endif
- }
-
- if (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- p->DETBufferSizeInKByte[k] = 0;
- } else if (p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb > 0) {
- p->DETBufferSizeInKByte[k] = p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb;
- DETBufferSizePoolInKByte = DETBufferSizePoolInKByte - (p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]) * p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb;
- } else if ((p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]) * minDET_pipe <= DETBufferSizePoolInKByte) {
- p->DETBufferSizeInKByte[k] = minDET_pipe;
- DETBufferSizePoolInKByte = DETBufferSizePoolInKByte - (p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]) * minDET_pipe;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, DETBufferSizePoolInKByte);
-#endif
- }
-
- if (p->display_cfg->minimize_det_reallocation) {
- MinimizeReallocationSuccess = true;
- // To minimize det reallocation, we don't distribute based on each surfaces bandwidth proportional to the global
- // but rather distribute DET across streams proportionally based on pixel rate, and only distribute based on
- // bandwidth between the planes on the same stream. This ensures that large scale re-distribution only on a
- // stream count and/or pixel rate change, which is must less likely then general bandwidth changes per plane.
-
- // Calculate total pixel rate
- for (unsigned int k = 0; k < p->display_cfg->num_streams; ++k) {
- TotalPixelRate += p->display_cfg->stream_descriptors[k].timing.pixel_clock_khz;
- }
-
- // Calculate per stream DET budget
- for (unsigned int k = 0; k < p->display_cfg->num_streams; ++k) {
- DETBudgetPerStream[k] = (unsigned int)((double)p->display_cfg->stream_descriptors[k].timing.pixel_clock_khz * p->MaxTotalDETInKByte / TotalPixelRate);
- RemainingDETBudgetPerStream[k] = DETBudgetPerStream[k];
- }
-
- // Calculate the per stream total bandwidth
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- TotalBandwidthPerStream[p->display_cfg->plane_descriptors[k].stream_index] += (unsigned int)(p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k]);
-
- // Check the minimum can be satisfied by budget
- if (RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index] >= p->DETBufferSizeInKByte[k]) {
- RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index] -= p->DETBufferSizeInKByte[k];
- } else {
- MinimizeReallocationSuccess = false;
- break;
- }
- }
- }
-
- if (MinimizeReallocationSuccess) {
- // Since a fixed budget per stream is sufficient to satisfy the minimums, just re-distribute each streams
- // budget proportionally across its planes
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- IdealDETBudget = (unsigned int)(((p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k]) / TotalBandwidthPerStream[p->display_cfg->plane_descriptors[k].stream_index])
- * DETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index]);
-
- if (IdealDETBudget > p->DETBufferSizeInKByte[k]) {
- DeltaDETBudget = IdealDETBudget - p->DETBufferSizeInKByte[k];
- if (DeltaDETBudget > RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index])
- DeltaDETBudget = RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index];
-
- p->DETBufferSizeInKByte[k] += DeltaDETBudget;
- RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index] -= DeltaDETBudget;
- }
-
- // Split among the pipes per the plane
- p->DETBufferSizeInKByte[k] = (unsigned int)((double)p->DETBufferSizeInKByte[k] / (p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]));
-
- // Round down to segment size
- p->DETBufferSizeInKByte[k] = (p->DETBufferSizeInKByte[k] / p->CompressedBufferSegmentSizeInkByte) * p->CompressedBufferSegmentSizeInkByte;
- }
- }
- }
- }
-
- if (!MinimizeReallocationSuccess) {
- TotalBandwidth = 0;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- TotalBandwidth = TotalBandwidth + p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k];
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- }
- dml2_printf("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
-#endif
- dml2_printf("DML::%s: TotalBandwidth = %f\n", __func__, TotalBandwidth);
- BandwidthOfSurfacesNotAssignedDETPiece = TotalBandwidth;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
-
- if (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- DETPieceAssignedToThisSurfaceAlready[k] = true;
- } else if (p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb > 0 || (((double)(p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]) * (double)p->DETBufferSizeInKByte[k] / (double)p->MaxTotalDETInKByte) >= ((p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k]) / TotalBandwidth))) {
- DETPieceAssignedToThisSurfaceAlready[k] = true;
- BandwidthOfSurfacesNotAssignedDETPiece = BandwidthOfSurfacesNotAssignedDETPiece - p->ReadBandwidthLuma[k] - p->ReadBandwidthChroma[k];
- } else {
- DETPieceAssignedToThisSurfaceAlready[k] = false;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, BandwidthOfSurfacesNotAssignedDETPiece);
-#endif
- }
-
- for (unsigned int j = 0; j < p->NumberOfActiveSurfaces; ++j) {
- NextPotentialSurfaceToAssignDETPieceFound = false;
- NextSurfaceToAssignDETPiece = 0;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, p->ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, p->ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, NextSurfaceToAssignDETPiece);
-#endif
- if (!DETPieceAssignedToThisSurfaceAlready[k] && (!NextPotentialSurfaceToAssignDETPieceFound ||
- p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k] < p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece] + p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece])) {
- NextSurfaceToAssignDETPiece = k;
- NextPotentialSurfaceToAssignDETPieceFound = true;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
-#endif
- }
-
- if (NextPotentialSurfaceToAssignDETPieceFound) {
- NextDETBufferPieceInKByte = (unsigned int)(math_min2(
- math_round((double)DETBufferSizePoolInKByte * (p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece] + p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece]) / BandwidthOfSurfacesNotAssignedDETPiece /
- ((p->ForceSingleDPP ? 1 : p->DPPPerSurface[NextSurfaceToAssignDETPiece]) * p->ConfigReturnBufferSegmentSizeInkByte))
- * (p->ForceSingleDPP ? 1 : p->DPPPerSurface[NextSurfaceToAssignDETPiece]) * p->ConfigReturnBufferSegmentSizeInkByte,
- math_floor2((double)DETBufferSizePoolInKByte, (p->ForceSingleDPP ? 1 : p->DPPPerSurface[NextSurfaceToAssignDETPiece]) * p->ConfigReturnBufferSegmentSizeInkByte)));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, DETBufferSizePoolInKByte);
- dml2_printf("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, NextSurfaceToAssignDETPiece);
- dml2_printf("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, NextSurfaceToAssignDETPiece, p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, NextSurfaceToAssignDETPiece, p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, BandwidthOfSurfacesNotAssignedDETPiece);
- dml2_printf("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, NextDETBufferPieceInKByte);
- dml2_printf("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, NextSurfaceToAssignDETPiece, p->DETBufferSizeInKByte[NextSurfaceToAssignDETPiece]);
-#endif
-
- p->DETBufferSizeInKByte[NextSurfaceToAssignDETPiece] = p->DETBufferSizeInKByte[NextSurfaceToAssignDETPiece] + NextDETBufferPieceInKByte / (p->ForceSingleDPP ? 1 : p->DPPPerSurface[NextSurfaceToAssignDETPiece]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("to %u\n", p->DETBufferSizeInKByte[NextSurfaceToAssignDETPiece]);
-#endif
-
- DETBufferSizePoolInKByte = DETBufferSizePoolInKByte - NextDETBufferPieceInKByte;
- DETPieceAssignedToThisSurfaceAlready[NextSurfaceToAssignDETPiece] = true;
- BandwidthOfSurfacesNotAssignedDETPiece = BandwidthOfSurfacesNotAssignedDETPiece - (p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece] + p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
- }
- }
- }
- *p->CompressedBufferSizeInkByte = p->MinCompressedBufferSizeInKByte;
- }
- *p->CompressedBufferSizeInkByte = *p->CompressedBufferSizeInkByte * p->CompressedBufferSegmentSizeInkByte / p->ConfigReturnBufferSegmentSizeInkByte;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- After bandwidth adjustment ---\n", __func__);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, p->DETBufferSizeInKByte[k], p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k]);
- }
-#endif
-}
-
-static double CalculateRequiredDispclk(
- enum dml2_odm_mode ODMMode,
- double PixelClock)
-{
-
- if (ODMMode == dml2_odm_mode_combine_4to1) {
- return PixelClock / 4.0;
- } else if (ODMMode == dml2_odm_mode_combine_3to1) {
- return PixelClock / 3.0;
- } else if (ODMMode == dml2_odm_mode_combine_2to1) {
- return PixelClock / 2.0;
- } else {
- return PixelClock;
- }
-}
-
-static double TruncToValidBPP(
- struct dml2_core_shared_TruncToValidBPP_locals *l,
- double LinkBitRate,
- unsigned int Lanes,
- unsigned int HTotal,
- unsigned int HActive,
- double PixelClock,
- double DesiredBPP,
- bool DSCEnable,
- enum dml2_output_encoder_class Output,
- enum dml2_output_format_class Format,
- unsigned int DSCInputBitPerComponent,
- unsigned int DSCSlices,
- unsigned int AudioRate,
- unsigned int AudioLayout,
- enum dml2_odm_mode ODMModeNoDSC,
- enum dml2_odm_mode ODMModeDSC,
-
- // Output
- unsigned int *RequiredSlots)
-{
- double MaxLinkBPP;
- unsigned int MinDSCBPP;
- double MaxDSCBPP;
- unsigned int NonDSCBPP0;
- unsigned int NonDSCBPP1;
- unsigned int NonDSCBPP2;
- enum dml2_odm_mode ODMMode;
-
- if (Format == dml2_420) {
- NonDSCBPP0 = 12;
- NonDSCBPP1 = 15;
- NonDSCBPP2 = 18;
- MinDSCBPP = 6;
- MaxDSCBPP = 16;
- } else if (Format == dml2_444) {
- NonDSCBPP0 = 24;
- NonDSCBPP1 = 30;
- NonDSCBPP2 = 36;
- MinDSCBPP = 8;
- MaxDSCBPP = 16;
- } else {
- if (Output == dml2_hdmi || Output == dml2_hdmifrl) {
- NonDSCBPP0 = 24;
- NonDSCBPP1 = 24;
- NonDSCBPP2 = 24;
- } else {
- NonDSCBPP0 = 16;
- NonDSCBPP1 = 20;
- NonDSCBPP2 = 24;
- }
- if (Format == dml2_n422 || Output == dml2_hdmifrl) {
- MinDSCBPP = 7;
- MaxDSCBPP = 16;
- } else {
- MinDSCBPP = 8;
- MaxDSCBPP = 16;
- }
- }
- if (Output == dml2_dp2p0) {
- MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0;
- } else if (DSCEnable && Output == dml2_dp) {
- MaxLinkBPP = LinkBitRate / 10.0 * 8.0 * Lanes / PixelClock * (1 - 2.4 / 100);
- } else {
- MaxLinkBPP = LinkBitRate / 10.0 * 8.0 * Lanes / PixelClock;
- }
-
- ODMMode = DSCEnable ? ODMModeDSC : ODMModeNoDSC;
-
- if (ODMMode == dml2_odm_mode_split_1to2) {
- MaxLinkBPP = 2 * MaxLinkBPP;
- }
-
- if (DesiredBPP == 0) {
- if (DSCEnable) {
- if (MaxLinkBPP < MinDSCBPP) {
- return __DML2_CALCS_DPP_INVALID__;
- } else if (MaxLinkBPP >= MaxDSCBPP) {
- return MaxDSCBPP;
- } else {
- return math_floor2(16.0 * MaxLinkBPP, 1.0) / 16.0;
- }
- } else {
- if (MaxLinkBPP >= NonDSCBPP2) {
- return NonDSCBPP2;
- } else if (MaxLinkBPP >= NonDSCBPP1) {
- return NonDSCBPP1;
- } else if (MaxLinkBPP >= NonDSCBPP0) {
- return NonDSCBPP0;
- } else {
- return __DML2_CALCS_DPP_INVALID__;
- }
- }
- } else {
- if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 || DesiredBPP == NonDSCBPP0)) ||
- (DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP))) {
- return __DML2_CALCS_DPP_INVALID__;
- } else {
- return DesiredBPP;
- }
- }
-}
-
-// updated for dcn4
-static unsigned int dscceComputeDelay(
- unsigned int bpc,
- double BPP,
- unsigned int sliceWidth,
- unsigned int numSlices,
- enum dml2_output_format_class pixelFormat,
- enum dml2_output_encoder_class Output)
-{
- // valid bpc = source bits per component in the set of {8, 10, 12}
- // valid bpp = increments of 1/16 of a bit
- // min = 6/7/8 in N420/N422/444, respectively
- // max = such that compression is 1:1
- //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
- //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
- //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
-
- // fixed value
- unsigned int rcModelSize = 8192;
-
- // N422/N420 operate at 2 pixels per clock
- unsigned int pixelsPerClock, padding_pixels, ssm_group_priming_delay, ssm_pipeline_delay, obsm_pipeline_delay, slice_padded_pixels, ixd_plus_padding, ixd_plus_padding_groups, cycles_per_group, group_delay, pipeline_delay, pixels, additional_group_delay, lines_to_reach_ixd, groups_to_reach_ixd, slice_width_groups, initial_xmit_delay, number_of_lines_to_reach_ixd, slice_width_modified;
-
-
- if (pixelFormat == dml2_420)
- pixelsPerClock = 2;
- // #all other modes operate at 1 pixel per clock
- else if (pixelFormat == dml2_444)
- pixelsPerClock = 1;
- else if (pixelFormat == dml2_n422 || Output == dml2_hdmifrl)
- pixelsPerClock = 2;
- else
- pixelsPerClock = 1;
-
- //initial transmit delay as per PPS
- initial_xmit_delay = (unsigned int)(math_round(rcModelSize / 2.0 / BPP / pixelsPerClock));
-
- //slice width as seen by dscc_bcl in pixels or pixels pairs (depending on number of pixels per pixel container based on pixel format)
- slice_width_modified = (pixelFormat == dml2_444 || pixelFormat == dml2_420 || Output == dml2_hdmifrl) ? sliceWidth / 2 : sliceWidth;
-
- padding_pixels = ((slice_width_modified % 3) != 0) ? (3 - (slice_width_modified % 3)) * (initial_xmit_delay / slice_width_modified) : 0;
-
- if ((3.0 * pixelsPerClock * BPP) >= ((double)((initial_xmit_delay + 2) / 3) * (double)(3 + (pixelFormat == dml2_n422)))) {
- if ((initial_xmit_delay + padding_pixels) % 3 == 1) {
- initial_xmit_delay++;
- }
- }
-
-
- //sub-stream multiplexer balance fifo priming delay in groups as per dsc standard
- if (bpc == 8)
- ssm_group_priming_delay = 83;
- else if (bpc == 10)
- ssm_group_priming_delay = 91;
- else if (bpc == 12)
- ssm_group_priming_delay = 115;
- else if (bpc == 14)
- ssm_group_priming_delay = 123;
- else
- ssm_group_priming_delay = 128;
-
- //slice width in groups is rounded up to the nearest group as DSC adds padded pixels such that there are an integer number of groups per slice
- slice_width_groups = (slice_width_modified + 2) / 3;
-
- //determine number of padded pixels in the last group of a slice line, computed as
- slice_padded_pixels = 3 * slice_width_groups - slice_width_modified;
-
-
-
-
- //determine integer number of complete slice lines required to reach initial transmit delay without ssm delay considered
- number_of_lines_to_reach_ixd = initial_xmit_delay / slice_width_modified;
-
- //increase initial transmit delay by the number of padded pixels added to a slice line multipled by the integer number of complete lines to reach initial transmit delay
- //this step is necessary as each padded pixel added takes up a clock cycle and, therefore, adds to the overall delay
- ixd_plus_padding = initial_xmit_delay + slice_padded_pixels * number_of_lines_to_reach_ixd;
-
- //convert the padded initial transmit delay from pixels to groups by rounding up to the nearest group as DSC processes in groups of pixels
- ixd_plus_padding_groups = (ixd_plus_padding + 2) / 3;
-
- //number of groups required for a slice to reach initial transmit delay is the sum of the padded initial transmit delay plus the ssm group priming delay
- groups_to_reach_ixd = ixd_plus_padding_groups + ssm_group_priming_delay;
-
-
- //number of lines required to reach padded initial transmit delay in groups in slices to the left of the last horizontal slice
- //needs to be rounded up as a complete slice lines are buffered prior to initial transmit delay being reached in the last horizontal slice
- lines_to_reach_ixd = (groups_to_reach_ixd + slice_width_groups - 1) / slice_width_groups; //round up lines to reach ixd to next
-
- //determine if there are non-zero number of pixels reached in the group where initial transmit delay is reached
- //an additional group time (i.e., 3 pixel times) is required before the first output if there are no additional pixels beyond initial transmit delay
- additional_group_delay = ((initial_xmit_delay - number_of_lines_to_reach_ixd * slice_width_modified) % 3) == 0 ? 1 : 0;
-
- //number of pipeline delay cycles in the ssm block (can be determined empirically or analytically by inspecting the ssm block)
- ssm_pipeline_delay = 2;
-
- //number of pipe delay cycles in the obsm block (can be determined empirically or analytically by inspecting the obsm block)
- obsm_pipeline_delay = 1;
-
- //a group of pixels is worth 6 pixels in N422/N420 mode or 3 pixels in all other modes
- if (pixelFormat == dml2_420 || pixelFormat == dml2_444 || pixelFormat == dml2_n422 || Output == dml2_hdmifrl)
- cycles_per_group = 6;
- else
- cycles_per_group = 3;
- //delay of the bit stream contruction layer in pixels is the sum of:
- //1. number of pixel containers in a slice line multipled by the number of lines required to reach initial transmit delay multipled by number of slices to the left of the last horizontal slice
- //2. number of pixel containers required to reach initial transmit delay (specifically, in the last horizontal slice)
- //3. additional group of delay if initial transmit delay is reached exactly in a group
- //4. ssm and obsm pipeline delay (i.e., clock cycles of delay)
- group_delay = (lines_to_reach_ixd * slice_width_groups * (numSlices - 1)) + groups_to_reach_ixd + additional_group_delay;
- pipeline_delay = ssm_pipeline_delay + obsm_pipeline_delay;
-
- //pixel delay is group_delay (converted to pixels) + pipeline, however, first group is a special case since it is processed as soon as it arrives (i.e., in 3 cycles regardless of pixel format)
- pixels = (group_delay - 1) * cycles_per_group + 3 + pipeline_delay;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: bpc: %u\n", __func__, bpc);
- dml2_printf("DML::%s: BPP: %f\n", __func__, BPP);
- dml2_printf("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
- dml2_printf("DML::%s: numSlices: %u\n", __func__, numSlices);
- dml2_printf("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Output: %u\n", __func__, Output);
- dml2_printf("DML::%s: pixels: %u\n", __func__, pixels);
-#endif
- return pixels;
-}
-
-
-//updated in dcn4
-static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, enum dml2_output_encoder_class Output)
-{
- unsigned int Delay = 0;
- unsigned int dispclk_per_dscclk = 3;
-
- // sfr
- Delay = Delay + 2;
-
- if (pixelFormat == dml2_420 || pixelFormat == dml2_n422 || (Output == dml2_hdmifrl && pixelFormat != dml2_444)) {
- dispclk_per_dscclk = 3 * 2;
- }
-
- if (pixelFormat == dml2_420) {
- //dscc top delay for pixel compression layer
- Delay = Delay + 16 * dispclk_per_dscclk;
-
- // dscc - input deserializer
- Delay = Delay + 5;
-
- // dscc - input cdc fifo
- Delay = Delay + 1 + 4 * dispclk_per_dscclk;
-
- // dscc - output cdc fifo
- Delay = Delay + 3 + 1 * dispclk_per_dscclk;
-
- // dscc - cdc uncertainty
- Delay = Delay + 3 + 3 * dispclk_per_dscclk;
- } else if (pixelFormat == dml2_n422 || (Output == dml2_hdmifrl && pixelFormat != dml2_444)) {
- //dscc top delay for pixel compression layer
- Delay = Delay + 16 * dispclk_per_dscclk;
- // dsccif
- Delay = Delay + 1;
- // dscc - input deserializer
- Delay = Delay + 5;
- // dscc - input cdc fifo
- Delay = Delay + 1 + 4 * dispclk_per_dscclk;
-
-
- // dscc - output cdc fifo
- Delay = Delay + 3 + 1 * dispclk_per_dscclk;
- // dscc - cdc uncertainty
- Delay = Delay + 3 + 3 * dispclk_per_dscclk;
- } else if (pixelFormat == dml2_s422) {
- //dscc top delay for pixel compression layer
- Delay = Delay + 17 * dispclk_per_dscclk;
-
- // dscc - input deserializer
- Delay = Delay + 3;
- // dscc - input cdc fifo
- Delay = Delay + 1 + 4 * dispclk_per_dscclk;
- // dscc - output cdc fifo
- Delay = Delay + 3 + 1 * dispclk_per_dscclk;
- // dscc - cdc uncertainty
- Delay = Delay + 3 + 3 * dispclk_per_dscclk;
- } else {
- //dscc top delay for pixel compression layer
- Delay = Delay + 16 * dispclk_per_dscclk;
- // dscc - input deserializer
- Delay = Delay + 3;
- // dscc - input cdc fifo
- Delay = Delay + 1 + 4 * dispclk_per_dscclk;
- // dscc - output cdc fifo
- Delay = Delay + 3 + 1 * dispclk_per_dscclk;
-
- // dscc - cdc uncertainty
- Delay = Delay + 3 + 3 * dispclk_per_dscclk;
- }
-
- // sft
- Delay = Delay + 1;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Delay = %u\n", __func__, Delay);
-#endif
-
- return Delay;
-}
-
-static unsigned int CalculateHostVMDynamicLevels(
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMinPageSize,
- unsigned int HostVMMaxNonCachedPageTableLevels)
-{
- unsigned int HostVMDynamicLevels = 0;
-
- if (GPUVMEnable && HostVMEnable) {
- if (HostVMMinPageSize < 2048)
- HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
- else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576)
- HostVMDynamicLevels = (unsigned int)math_max2(0, (double)HostVMMaxNonCachedPageTableLevels - 1);
- else
- HostVMDynamicLevels = (unsigned int)math_max2(0, (double)HostVMMaxNonCachedPageTableLevels - 2);
- } else {
- HostVMDynamicLevels = 0;
- }
- return HostVMDynamicLevels;
-}
-
-static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_and_row_bytes_params *p)
-{
- unsigned int extra_dpde_bytes;
- unsigned int extra_mpde_bytes;
- unsigned int MacroTileSizeBytes;
- unsigned int vp_height_dpte_ub;
-
- unsigned int meta_surface_bytes;
- unsigned int vm_bytes;
- unsigned int vp_height_meta_ub;
-
- *p->MetaRequestHeight = 8 * p->BlockHeight256Bytes;
- *p->MetaRequestWidth = 8 * p->BlockWidth256Bytes;
- if (p->SurfaceTiling == dml2_sw_linear) {
- *p->meta_row_height = 32;
- *p->meta_row_width = (unsigned int)(math_floor2(p->ViewportXStart + p->SwathWidth + *p->MetaRequestWidth - 1, *p->MetaRequestWidth) - math_floor2(p->ViewportXStart, *p->MetaRequestWidth));
- *p->meta_row_bytes = (unsigned int)(*p->meta_row_width * *p->MetaRequestHeight * p->BytePerPixel / 256.0); // FIXME_DCN4SW missing in old code but no dcc for linear anyways?
- } else if (!dml_is_vertical_rotation(p->RotationAngle)) {
- *p->meta_row_height = *p->MetaRequestHeight;
- if (p->ViewportStationary && p->NumberOfDPPs == 1) {
- *p->meta_row_width = (unsigned int)(math_floor2(p->ViewportXStart + p->SwathWidth + *p->MetaRequestWidth - 1, *p->MetaRequestWidth) - math_floor2(p->ViewportXStart, *p->MetaRequestWidth));
- } else {
- *p->meta_row_width = (unsigned int)(math_ceil2(p->SwathWidth - 1, *p->MetaRequestWidth) + *p->MetaRequestWidth);
- }
- *p->meta_row_bytes = (unsigned int)(*p->meta_row_width * *p->MetaRequestHeight * p->BytePerPixel / 256.0);
- } else {
- *p->meta_row_height = *p->MetaRequestWidth;
- if (p->ViewportStationary && p->NumberOfDPPs == 1) {
- *p->meta_row_width = (unsigned int)(math_floor2(p->ViewportYStart + p->ViewportHeight + *p->MetaRequestHeight - 1, *p->MetaRequestHeight) - math_floor2(p->ViewportYStart, *p->MetaRequestHeight));
- } else {
- *p->meta_row_width = (unsigned int)(math_ceil2(p->SwathWidth - 1, *p->MetaRequestHeight) + *p->MetaRequestHeight);
- }
- *p->meta_row_bytes = (unsigned int)(*p->meta_row_width * *p->MetaRequestWidth * p->BytePerPixel / 256.0);
- }
-
- if (p->ViewportStationary && p->is_phantom && (p->NumberOfDPPs == 1 || !dml_is_vertical_rotation(p->RotationAngle))) {
- vp_height_meta_ub = (unsigned int)(math_floor2(p->ViewportYStart + p->ViewportHeight + 64 * p->BlockHeight256Bytes - 1, 64 * p->BlockHeight256Bytes) - math_floor2(p->ViewportYStart, 64 * p->BlockHeight256Bytes));
- } else if (!dml_is_vertical_rotation(p->RotationAngle)) {
- vp_height_meta_ub = (unsigned int)(math_ceil2(p->ViewportHeight - 1, 64 * p->BlockHeight256Bytes) + 64 * p->BlockHeight256Bytes);
- } else {
- vp_height_meta_ub = (unsigned int)(math_ceil2(p->SwathWidth - 1, 64 * p->BlockHeight256Bytes) + 64 * p->BlockHeight256Bytes);
- }
-
- meta_surface_bytes = (unsigned int)(p->DCCMetaPitch * vp_height_meta_ub * p->BytePerPixel / 256.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
- dml2_printf("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
-#endif
- if (p->GPUVMEnable == true) {
- double meta_vmpg_bytes = 4.0 * 1024.0;
- *p->meta_pte_bytes_per_frame_ub = (unsigned int)((math_ceil2((double)(meta_surface_bytes - meta_vmpg_bytes) / (8 * meta_vmpg_bytes), 1) + 1) * 64);
- extra_mpde_bytes = 128 * (p->GPUVMMaxPageTableLevels - 1);
- } else {
- *p->meta_pte_bytes_per_frame_ub = 0;
- extra_mpde_bytes = 0;
- }
-
- if (!p->DCCEnable || !p->mrq_present) {
- *p->meta_pte_bytes_per_frame_ub = 0;
- extra_mpde_bytes = 0;
- *p->meta_row_bytes = 0;
- }
-
- if (!p->GPUVMEnable) {
- *p->PixelPTEBytesPerRow = 0;
- *p->PixelPTEBytesPerRowStorage = 0;
- *p->dpte_row_width_ub = 0;
- *p->dpte_row_height = 0;
- *p->dpte_row_height_linear = 0;
- *p->PixelPTEBytesPerRow_one_row_per_frame = 0;
- *p->dpte_row_width_ub_one_row_per_frame = 0;
- *p->dpte_row_height_one_row_per_frame = 0;
- *p->vmpg_width = 0;
- *p->vmpg_height = 0;
- *p->PixelPTEReqWidth = 0;
- *p->PixelPTEReqHeight = 0;
- *p->PTERequestSize = 0;
- *p->dpde0_bytes_per_frame_ub = 0;
- return 0;
- }
-
- MacroTileSizeBytes = p->MacroTileWidth * p->BytePerPixel * p->MacroTileHeight;
-
- if (p->ViewportStationary && p->is_phantom && (p->NumberOfDPPs == 1 || !dml_is_vertical_rotation(p->RotationAngle))) {
- vp_height_dpte_ub = (unsigned int)(math_floor2(p->ViewportYStart + p->ViewportHeight + p->MacroTileHeight - 1, p->MacroTileHeight) - math_floor2(p->ViewportYStart, p->MacroTileHeight));
- } else if (!dml_is_vertical_rotation(p->RotationAngle)) {
- vp_height_dpte_ub = (unsigned int)(math_ceil2((double)p->ViewportHeight - 1, p->MacroTileHeight) + p->MacroTileHeight);
- } else {
- vp_height_dpte_ub = (unsigned int)(math_ceil2((double)p->SwathWidth - 1, p->MacroTileHeight) + p->MacroTileHeight);
- }
-
- if (p->GPUVMEnable == true && p->GPUVMMaxPageTableLevels > 1) {
- *p->dpde0_bytes_per_frame_ub = (unsigned int)(64 * (math_ceil2((double)(p->Pitch * vp_height_dpte_ub * p->BytePerPixel - MacroTileSizeBytes) / (double)(8 * 2097152), 1) + 1));
- extra_dpde_bytes = 128 * (p->GPUVMMaxPageTableLevels - 2);
- } else {
- *p->dpde0_bytes_per_frame_ub = 0;
- extra_dpde_bytes = 0;
- }
-
- vm_bytes = *p->meta_pte_bytes_per_frame_ub + extra_mpde_bytes + *p->dpde0_bytes_per_frame_ub + extra_dpde_bytes;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
- dml2_printf("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
- dml2_printf("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
- dml2_printf("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
- dml2_printf("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
- dml2_printf("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
- dml2_printf("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
- dml2_printf("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
- dml2_printf("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
- dml2_printf("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
- dml2_printf("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
- dml2_printf("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
- dml2_printf("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
-#endif
-
- unsigned int PixelPTEReqWidth_linear = 0; // VBA_DELTA. VBA doesn't calculate this
-
- if (p->SurfaceTiling == dml2_sw_linear) {
- *p->PixelPTEReqHeight = 1;
- *p->PixelPTEReqWidth = p->GPUVMMinPageSizeKBytes * 1024 * 8 / p->BytePerPixel;
- PixelPTEReqWidth_linear = p->GPUVMMinPageSizeKBytes * 1024 * 8 / p->BytePerPixel;
- *p->PTERequestSize = 64;
-
- *p->vmpg_height = 1;
- *p->vmpg_width = p->GPUVMMinPageSizeKBytes * 1024 / p->BytePerPixel;
- } else if (p->GPUVMMinPageSizeKBytes * 1024 >= dml_get_tile_block_size_bytes(p->SurfaceTiling)) { // 1 64B 8x1 PTE
- *p->PixelPTEReqHeight = p->MacroTileHeight;
- *p->PixelPTEReqWidth = 8 * 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
- *p->PTERequestSize = 64;
-
- *p->vmpg_height = p->MacroTileHeight;
- *p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
-
- } else if (p->GPUVMMinPageSizeKBytes == 4 && dml_get_tile_block_size_bytes(p->SurfaceTiling) == 65536) { // 2 64B PTE requests to get 16 PTEs to cover the 64K tile
- // one 64KB tile, is 16x16x256B req
- *p->PixelPTEReqHeight = 16 * p->BlockHeight256Bytes;
- *p->PixelPTEReqWidth = 16 * p->BlockWidth256Bytes;
- *p->PTERequestSize = 128;
-
- *p->vmpg_height = *p->PixelPTEReqHeight;
- *p->vmpg_width = *p->PixelPTEReqWidth;
- } else {
- // default for rest of calculation to go through, when vm is disable, the calulated pte related values shouldnt be used anyways
- *p->PixelPTEReqHeight = p->MacroTileHeight;
- *p->PixelPTEReqWidth = 8 * 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
- *p->PTERequestSize = 64;
-
- *p->vmpg_height = p->MacroTileHeight;
- *p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
-
- if (p->GPUVMEnable == true) {
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
- __func__, p->GPUVMMinPageSizeKBytes, p->SurfaceTiling, dml_get_tile_block_size_bytes(p->SurfaceTiling));
- DML2_ASSERT(0);
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
- dml2_printf("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
- dml2_printf("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
- dml2_printf("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
- dml2_printf("DML::%s: Pitch = %u\n", __func__, p->Pitch);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
-#endif
-
- *p->dpte_row_height_one_row_per_frame = vp_height_dpte_ub;
- *p->dpte_row_width_ub_one_row_per_frame = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height_one_row_per_frame / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * (double)*p->PixelPTEReqWidth);
- *p->PixelPTEBytesPerRow_one_row_per_frame = (unsigned int)((double)*p->dpte_row_width_ub_one_row_per_frame / (double)*p->PixelPTEReqWidth * *p->PTERequestSize);
-
- if (p->SurfaceTiling == dml2_sw_linear) {
- *p->dpte_row_height = (unsigned int)(math_min2(128, (double)(1ULL << (unsigned int)math_floor2(math_log((float)(p->PTEBufferSizeInRequests * *p->PixelPTEReqWidth / p->Pitch), 2.0), 1))));
- *p->dpte_row_width_ub = (unsigned int)(math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height - 1), (double)*p->PixelPTEReqWidth) + *p->PixelPTEReqWidth);
- *p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqWidth * *p->PTERequestSize);
- *p->dpte_row_height_linear = 0;
-
- // VBA_DELTA, VBA doesn't have programming value for pte row height linear.
- *p->dpte_row_height_linear = (unsigned int)1 << (unsigned int)math_floor2(math_log((float)(p->PTEBufferSizeInRequests * PixelPTEReqWidth_linear / p->Pitch), 2.0), 1);
- if (*p->dpte_row_height_linear > 128)
- *p->dpte_row_height_linear = 128;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
-#endif
-
- } else if (!dml_is_vertical_rotation(p->RotationAngle)) {
- *p->dpte_row_height = *p->PixelPTEReqHeight;
-
- if (p->GPUVMMinPageSizeKBytes > 64) {
- *p->dpte_row_width_ub = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * *p->PixelPTEReqWidth);
- } else if (p->ViewportStationary && (p->NumberOfDPPs == 1)) {
- *p->dpte_row_width_ub = (unsigned int)(math_floor2(p->ViewportXStart + p->SwathWidth + *p->PixelPTEReqWidth - 1, *p->PixelPTEReqWidth) - math_floor2(p->ViewportXStart, *p->PixelPTEReqWidth));
- } else {
- *p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqWidth, 1) + 1.0) * *p->PixelPTEReqWidth);
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
-#endif
-
- *p->PixelPTEBytesPerRow = *p->dpte_row_width_ub / *p->PixelPTEReqWidth * *p->PTERequestSize;
- } else {
- *p->dpte_row_height = (unsigned int)(math_min2(*p->PixelPTEReqWidth, p->MacroTileWidth));
-
- if (p->ViewportStationary && (p->NumberOfDPPs == 1)) {
- *p->dpte_row_width_ub = (unsigned int)(math_floor2(p->ViewportYStart + p->ViewportHeight + *p->PixelPTEReqHeight - 1, *p->PixelPTEReqHeight) - math_floor2(p->ViewportYStart, *p->PixelPTEReqHeight));
- } else {
- *p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqHeight, 1) + 1) * *p->PixelPTEReqHeight);
- }
-
- *p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqHeight * *p->PTERequestSize);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
-#endif
- }
-
- if (p->GPUVMEnable != true) {
- *p->PixelPTEBytesPerRow = 0;
- *p->PixelPTEBytesPerRow_one_row_per_frame = 0;
- }
-
- *p->PixelPTEBytesPerRowStorage = *p->PixelPTEBytesPerRow;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
- dml2_printf("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
- dml2_printf("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
- dml2_printf("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
- dml2_printf("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
- dml2_printf("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
- dml2_printf("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
-#endif
-
- return vm_bytes;
-} // CalculateVMAndRowBytes
-
-static unsigned int CalculatePrefetchSourceLines(
- double VRatio,
- unsigned int VTaps,
- bool Interlace,
- bool ProgressiveToInterlaceUnitInOPP,
- unsigned int SwathHeight,
- enum dml2_rotation_angle RotationAngle,
- bool mirrored,
- bool ViewportStationary,
- unsigned int SwathWidth,
- unsigned int ViewportHeight,
- unsigned int ViewportXStart,
- unsigned int ViewportYStart,
-
- // Output
- unsigned int *VInitPreFill,
- unsigned int *MaxNumSwath)
-{
-
- unsigned int vp_start_rot = 0;
- unsigned int sw0_tmp = 0;
- unsigned int MaxPartialSwath = 0;
- double numLines = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VTaps = %u\n", __func__, VTaps);
- dml2_printf("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
- dml2_printf("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
- dml2_printf("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
- dml2_printf("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
-#endif
- if (ProgressiveToInterlaceUnitInOPP)
- *VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1) / 2.0, 1));
- else
- *VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1 + (Interlace ? 1 : 0) * 0.5 * VRatio) / 2.0, 1));
-
- if (ViewportStationary) {
- if (RotationAngle == dml2_rotation_180) {
- vp_start_rot = SwathHeight - (((unsigned int)(ViewportYStart + ViewportHeight - 1) % SwathHeight) + 1);
- } else if ((RotationAngle == dml2_rotation_270 && !mirrored) || (RotationAngle == dml2_rotation_90 && mirrored)) {
- vp_start_rot = ViewportXStart;
- } else if ((RotationAngle == dml2_rotation_90 && !mirrored) || (RotationAngle == dml2_rotation_270 && mirrored)) {
- vp_start_rot = SwathHeight - (((unsigned int)(ViewportYStart + SwathWidth - 1) % SwathHeight) + 1);
- } else {
- vp_start_rot = ViewportYStart;
- }
- sw0_tmp = SwathHeight - (vp_start_rot % SwathHeight);
- if (sw0_tmp < *VInitPreFill) {
- *MaxNumSwath = (unsigned int)(math_ceil2((*VInitPreFill - sw0_tmp) / (double)SwathHeight, 1) + 1);
- } else {
- *MaxNumSwath = 1;
- }
- MaxPartialSwath = (unsigned int)(math_max2(1, (unsigned int)(vp_start_rot + *VInitPreFill - 1) % SwathHeight));
- } else {
- *MaxNumSwath = (unsigned int)(math_ceil2((*VInitPreFill - 1.0) / (double)SwathHeight, 1) + 1);
- if (*VInitPreFill > 1) {
- MaxPartialSwath = (unsigned int)(math_max2(1, (unsigned int)(*VInitPreFill - 2) % SwathHeight));
- } else {
- MaxPartialSwath = (unsigned int)(math_max2(1, (unsigned int)(*VInitPreFill + SwathHeight - 2) % SwathHeight));
- }
- }
- numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
- dml2_printf("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
- dml2_printf("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
- dml2_printf("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
- dml2_printf("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
-#endif
- return (unsigned int)(numLines);
-
-}
-
-static void CalculateRowBandwidth(
- bool GPUVMEnable,
- bool use_one_row_for_frame,
- enum dml2_source_format_class SourcePixelFormat,
- double VRatio,
- double VRatioChroma,
- bool DCCEnable,
- double LineTime,
- unsigned int PixelPTEBytesPerRowLuma,
- unsigned int PixelPTEBytesPerRowChroma,
- unsigned int dpte_row_height_luma,
- unsigned int dpte_row_height_chroma,
-
- bool mrq_present,
- unsigned int meta_row_bytes_per_row_ub_l,
- unsigned int meta_row_bytes_per_row_ub_c,
- unsigned int meta_row_height_luma,
- unsigned int meta_row_height_chroma,
-
- // Output
- double *dpte_row_bw,
- double *meta_row_bw)
-{
- if (!DCCEnable || !mrq_present) {
- *meta_row_bw = 0;
- } else if (dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha) {
- *meta_row_bw = VRatio * meta_row_bytes_per_row_ub_l / (meta_row_height_luma * LineTime)
- + VRatioChroma * meta_row_bytes_per_row_ub_c / (meta_row_height_chroma * LineTime);
- } else {
- *meta_row_bw = VRatio * meta_row_bytes_per_row_ub_l / (meta_row_height_luma * LineTime);
- }
-
- if (GPUVMEnable != true) {
- *dpte_row_bw = 0;
- } else if (dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha) {
- *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
- + VRatioChroma * PixelPTEBytesPerRowChroma / (dpte_row_height_chroma * LineTime);
- } else {
- *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
- }
-}
-
-static void CalculateMALLUseForStaticScreen(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MALLAllocatedForDCN,
- unsigned int SurfaceSizeInMALL[],
- bool one_row_per_frame_fits_in_buffer[],
-
- // Output
- bool is_using_mall_for_ss[])
-{
-
- unsigned int SurfaceToAddToMALL;
- bool CanAddAnotherSurfaceToMALL;
- unsigned int TotalSurfaceSizeInMALL;
-
- TotalSurfaceSizeInMALL = 0;
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- is_using_mall_for_ss[k] = (display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_force_enable);
- if (is_using_mall_for_ss[k])
- TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
- dml2_printf("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
-#endif
- }
-
- SurfaceToAddToMALL = 0;
- CanAddAnotherSurfaceToMALL = true;
- while (CanAddAnotherSurfaceToMALL) {
- CanAddAnotherSurfaceToMALL = false;
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k] <= MALLAllocatedForDCN * 1024 * 1024 &&
- !is_using_mall_for_ss[k] && display_cfg->plane_descriptors[k].overrides.refresh_from_mall != dml2_refresh_from_mall_mode_override_force_disable && one_row_per_frame_fits_in_buffer[k] &&
- (!CanAddAnotherSurfaceToMALL || SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) {
- CanAddAnotherSurfaceToMALL = true;
- SurfaceToAddToMALL = k;
- dml2_printf("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
- }
- }
- if (CanAddAnotherSurfaceToMALL) {
- is_using_mall_for_ss[SurfaceToAddToMALL] = true;
- TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
-#endif
- }
- }
-}
-
-static void CalculateDCCConfiguration(
- bool DCCEnabled,
- bool DCCProgrammingAssumesScanDirectionUnknown,
- enum dml2_source_format_class SourcePixelFormat,
- unsigned int SurfaceWidthLuma,
- unsigned int SurfaceWidthChroma,
- unsigned int SurfaceHeightLuma,
- unsigned int SurfaceHeightChroma,
- unsigned int nomDETInKByte,
- unsigned int RequestHeight256ByteLuma,
- unsigned int RequestHeight256ByteChroma,
- enum dml2_swizzle_mode TilingFormat,
- unsigned int BytePerPixelY,
- unsigned int BytePerPixelC,
- double BytePerPixelDETY,
- double BytePerPixelDETC,
- enum dml2_rotation_angle RotationAngle,
-
- // Output
- enum dml2_core_internal_request_type *RequestLuma,
- enum dml2_core_internal_request_type *RequestChroma,
- unsigned int *MaxUncompressedBlockLuma,
- unsigned int *MaxUncompressedBlockChroma,
- unsigned int *MaxCompressedBlockLuma,
- unsigned int *MaxCompressedBlockChroma,
- unsigned int *IndependentBlockLuma,
- unsigned int *IndependentBlockChroma)
-{
- unsigned int DETBufferSizeForDCC = nomDETInKByte * 1024;
-
- unsigned int yuv420;
- unsigned int horz_div_l;
- unsigned int horz_div_c;
- unsigned int vert_div_l;
- unsigned int vert_div_c;
-
- unsigned int swath_buf_size;
- double detile_buf_vp_horz_limit;
- double detile_buf_vp_vert_limit;
-
- yuv420 = dml2_core_shared_is_420(SourcePixelFormat);
- horz_div_l = 1;
- horz_div_c = 1;
- vert_div_l = 1;
- vert_div_c = 1;
-
- if (BytePerPixelY == 1)
- vert_div_l = 0;
- if (BytePerPixelC == 1)
- vert_div_c = 0;
-
- if (BytePerPixelC == 0) {
- swath_buf_size = DETBufferSizeForDCC / 2 - 2 * 256;
- detile_buf_vp_horz_limit = (double)swath_buf_size / ((double)RequestHeight256ByteLuma * BytePerPixelY / (1 + horz_div_l));
- detile_buf_vp_vert_limit = (double)swath_buf_size / (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l));
- } else {
- swath_buf_size = DETBufferSizeForDCC / 2 - 2 * 2 * 256;
- detile_buf_vp_horz_limit = (double)swath_buf_size / ((double)RequestHeight256ByteLuma * BytePerPixelY / (1 + horz_div_l) + (double)RequestHeight256ByteChroma * BytePerPixelC / (1 + horz_div_c) / (1 + yuv420));
- detile_buf_vp_vert_limit = (double)swath_buf_size / (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l) + 256.0 / RequestHeight256ByteChroma / (1 + vert_div_c) / (1 + yuv420));
- }
-
- if (SourcePixelFormat == dml2_420_10) {
- detile_buf_vp_horz_limit = 1.5 * detile_buf_vp_horz_limit;
- detile_buf_vp_vert_limit = 1.5 * detile_buf_vp_vert_limit;
- }
-
- detile_buf_vp_horz_limit = math_floor2(detile_buf_vp_horz_limit - 1, 16);
- detile_buf_vp_vert_limit = math_floor2(detile_buf_vp_vert_limit - 1, 16);
-
- unsigned int MAS_vp_horz_limit;
- unsigned int MAS_vp_vert_limit;
- unsigned int max_vp_horz_width;
- unsigned int max_vp_vert_height;
- unsigned int eff_surf_width_l;
- unsigned int eff_surf_width_c;
- unsigned int eff_surf_height_l;
- unsigned int eff_surf_height_c;
-
- unsigned int full_swath_bytes_horz_wc_l;
- unsigned int full_swath_bytes_horz_wc_c;
- unsigned int full_swath_bytes_vert_wc_l;
- unsigned int full_swath_bytes_vert_wc_c;
-
- MAS_vp_horz_limit = SourcePixelFormat == dml2_rgbe_alpha ? 3840 : 6144;
- MAS_vp_vert_limit = SourcePixelFormat == dml2_rgbe_alpha ? 3840 : (BytePerPixelY == 8 ? 3072 : 6144);
- max_vp_horz_width = (unsigned int)(math_min2((double)MAS_vp_horz_limit, detile_buf_vp_horz_limit));
- max_vp_vert_height = (unsigned int)(math_min2((double)MAS_vp_vert_limit, detile_buf_vp_vert_limit));
- eff_surf_width_l = (SurfaceWidthLuma > max_vp_horz_width ? max_vp_horz_width : SurfaceWidthLuma);
- eff_surf_width_c = eff_surf_width_l / (1 + yuv420);
- eff_surf_height_l = (SurfaceHeightLuma > max_vp_vert_height ? max_vp_vert_height : SurfaceHeightLuma);
- eff_surf_height_c = eff_surf_height_l / (1 + yuv420);
-
- full_swath_bytes_horz_wc_l = eff_surf_width_l * RequestHeight256ByteLuma * BytePerPixelY;
- full_swath_bytes_vert_wc_l = eff_surf_height_l * 256 / RequestHeight256ByteLuma;
- if (BytePerPixelC > 0) {
- full_swath_bytes_horz_wc_c = eff_surf_width_c * RequestHeight256ByteChroma * BytePerPixelC;
- full_swath_bytes_vert_wc_c = eff_surf_height_c * 256 / RequestHeight256ByteChroma;
- } else {
- full_swath_bytes_horz_wc_c = 0;
- full_swath_bytes_vert_wc_c = 0;
- }
-
- if (SourcePixelFormat == dml2_420_10) {
- full_swath_bytes_horz_wc_l = (unsigned int)(math_ceil2((double)full_swath_bytes_horz_wc_l * 2.0 / 3.0, 256.0));
- full_swath_bytes_horz_wc_c = (unsigned int)(math_ceil2((double)full_swath_bytes_horz_wc_c * 2.0 / 3.0, 256.0));
- full_swath_bytes_vert_wc_l = (unsigned int)(math_ceil2((double)full_swath_bytes_vert_wc_l * 2.0 / 3.0, 256.0));
- full_swath_bytes_vert_wc_c = (unsigned int)(math_ceil2((double)full_swath_bytes_vert_wc_c * 2.0 / 3.0, 256.0));
- }
-
- unsigned int req128_horz_wc_l;
- unsigned int req128_horz_wc_c;
- unsigned int req128_vert_wc_l;
- unsigned int req128_vert_wc_c;
-
- if (2 * full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
- req128_horz_wc_l = 0;
- req128_horz_wc_c = 0;
- } else if (full_swath_bytes_horz_wc_l < 1.5 * full_swath_bytes_horz_wc_c && 2 * full_swath_bytes_horz_wc_l + full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
- req128_horz_wc_l = 0;
- req128_horz_wc_c = 1;
- } else if (full_swath_bytes_horz_wc_l >= 1.5 * full_swath_bytes_horz_wc_c && full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
- req128_horz_wc_l = 1;
- req128_horz_wc_c = 0;
- } else {
- req128_horz_wc_l = 1;
- req128_horz_wc_c = 1;
- }
-
- if (2 * full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
- req128_vert_wc_l = 0;
- req128_vert_wc_c = 0;
- } else if (full_swath_bytes_vert_wc_l < 1.5 * full_swath_bytes_vert_wc_c && 2 * full_swath_bytes_vert_wc_l + full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
- req128_vert_wc_l = 0;
- req128_vert_wc_c = 1;
- } else if (full_swath_bytes_vert_wc_l >= 1.5 * full_swath_bytes_vert_wc_c && full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
- req128_vert_wc_l = 1;
- req128_vert_wc_c = 0;
- } else {
- req128_vert_wc_l = 1;
- req128_vert_wc_c = 1;
- }
-
- unsigned int segment_order_horz_contiguous_luma;
- unsigned int segment_order_horz_contiguous_chroma;
- unsigned int segment_order_vert_contiguous_luma;
- unsigned int segment_order_vert_contiguous_chroma;
-
- if (BytePerPixelY == 2) {
- segment_order_horz_contiguous_luma = 0;
- segment_order_vert_contiguous_luma = 1;
- } else {
- segment_order_horz_contiguous_luma = 1;
- segment_order_vert_contiguous_luma = 0;
- }
-
- if (BytePerPixelC == 2) {
- segment_order_horz_contiguous_chroma = 0;
- segment_order_vert_contiguous_chroma = 1;
- } else {
- segment_order_horz_contiguous_chroma = 1;
- segment_order_vert_contiguous_chroma = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
- dml2_printf("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
- dml2_printf("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
- dml2_printf("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
- dml2_printf("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
- dml2_printf("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
- dml2_printf("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
-#endif
- if (DCCProgrammingAssumesScanDirectionUnknown == true) {
- if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
- *RequestLuma = dml2_core_internal_request_type_256_bytes;
- } else if ((req128_horz_wc_l == 1 && segment_order_horz_contiguous_luma == 0) || (req128_vert_wc_l == 1 && segment_order_vert_contiguous_luma == 0)) {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- if (req128_horz_wc_c == 0 && req128_vert_wc_c == 0) {
- *RequestChroma = dml2_core_internal_request_type_256_bytes;
- } else if ((req128_horz_wc_c == 1 && segment_order_horz_contiguous_chroma == 0) || (req128_vert_wc_c == 1 && segment_order_vert_contiguous_chroma == 0)) {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- } else if (!dml_is_vertical_rotation(RotationAngle)) {
- if (req128_horz_wc_l == 0) {
- *RequestLuma = dml2_core_internal_request_type_256_bytes;
- } else if (segment_order_horz_contiguous_luma == 0) {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- if (req128_horz_wc_c == 0) {
- *RequestChroma = dml2_core_internal_request_type_256_bytes;
- } else if (segment_order_horz_contiguous_chroma == 0) {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- } else {
- if (req128_vert_wc_l == 0) {
- *RequestLuma = dml2_core_internal_request_type_256_bytes;
- } else if (segment_order_vert_contiguous_luma == 0) {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- if (req128_vert_wc_c == 0) {
- *RequestChroma = dml2_core_internal_request_type_256_bytes;
- } else if (segment_order_vert_contiguous_chroma == 0) {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- }
-
- if (*RequestLuma == dml2_core_internal_request_type_256_bytes) {
- *MaxUncompressedBlockLuma = 256;
- *MaxCompressedBlockLuma = 256;
- *IndependentBlockLuma = 0;
- } else if (*RequestLuma == dml2_core_internal_request_type_128_bytes_contiguous) {
- *MaxUncompressedBlockLuma = 256;
- *MaxCompressedBlockLuma = 128;
- *IndependentBlockLuma = 128;
- } else {
- *MaxUncompressedBlockLuma = 256;
- *MaxCompressedBlockLuma = 64;
- *IndependentBlockLuma = 64;
- }
-
- if (*RequestChroma == dml2_core_internal_request_type_256_bytes) {
- *MaxUncompressedBlockChroma = 256;
- *MaxCompressedBlockChroma = 256;
- *IndependentBlockChroma = 0;
- } else if (*RequestChroma == dml2_core_internal_request_type_128_bytes_contiguous) {
- *MaxUncompressedBlockChroma = 256;
- *MaxCompressedBlockChroma = 128;
- *IndependentBlockChroma = 128;
- } else {
- *MaxUncompressedBlockChroma = 256;
- *MaxCompressedBlockChroma = 64;
- *IndependentBlockChroma = 64;
- }
-
- if (DCCEnabled != true || BytePerPixelC == 0) {
- *MaxUncompressedBlockChroma = 0;
- *MaxCompressedBlockChroma = 0;
- *IndependentBlockChroma = 0;
- }
-
- if (DCCEnabled != true) {
- *MaxUncompressedBlockLuma = 0;
- *MaxCompressedBlockLuma = 0;
- *IndependentBlockLuma = 0;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
- dml2_printf("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
- dml2_printf("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
- dml2_printf("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
- dml2_printf("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
- dml2_printf("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
-#endif
-
-}
-
-static void calculate_mcache_row_bytes(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_calculate_mcache_row_bytes_params *p)
-{
- unsigned int vmpg_bytes = 0;
- unsigned int blk_bytes = 0;
- float meta_per_mvmpg_per_channel = 0;
- unsigned int est_blk_per_vmpg = 2;
- unsigned int mvmpg_per_row_ub = 0;
- unsigned int full_vp_width_mvmpg_aligned = 0;
- unsigned int full_vp_height_mvmpg_aligned = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_chans = %u\n", __func__, p->num_chans);
- dml2_printf("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
- dml2_printf("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
- dml2_printf("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
- dml2_printf("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
- dml2_printf("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
- dml2_printf("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
- dml2_printf("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
- dml2_printf("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
- dml2_printf("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
- dml2_printf("DML::%s: blk_width = %u\n", __func__, p->blk_width);
- dml2_printf("DML::%s: blk_height = %u\n", __func__, p->blk_height);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
- dml2_printf("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
-#endif
- DML2_ASSERT(p->mcache_line_size_bytes != 0);
- DML2_ASSERT(p->mcache_size_bytes != 0);
-
- *p->mvmpg_width = 0;
- *p->mvmpg_height = 0;
-
- if (p->full_vp_height == 0 && p->full_vp_width == 0) {
- *p->num_mcaches = 0;
- *p->mcache_row_bytes = 0;
- } else {
- blk_bytes = dml_get_tile_block_size_bytes(p->tiling_mode);
-
- // if gpuvm is not enable, the alignment boundary should be in terms of tiling block size
- vmpg_bytes = p->gpuvm_page_size_kbytes * 1024;
-
- //With vmpg_bytes >= tile blk_bytes, the meta_row_width alignment equations are relative to the vmpg_width/height.
- // But for 4KB page with 64KB tile block, we need the meta for all pages in the tile block.
- // Therefore, the alignment is relative to the blk_width/height. The factor of 16 vmpg per 64KB tile block is applied at the end.
- *p->mvmpg_width = p->blk_width;
- *p->mvmpg_height = p->blk_height;
- if (p->gpuvm_enable) {
- if (vmpg_bytes >= blk_bytes) {
- *p->mvmpg_width = p->vmpg_width;
- *p->mvmpg_height = p->vmpg_height;
- } else if (!((blk_bytes == 65536) && (vmpg_bytes == 4096))) {
- dml2_printf("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
- DML2_ASSERT(0);
- }
- }
-
- //For plane0 & 1, first calculate full_vp_width/height_l/c aligned to vmpg_width/height_l/c
- full_vp_width_mvmpg_aligned = (unsigned int)(math_floor2((p->vp_start_x + p->full_vp_width) + *p->mvmpg_width - 1, *p->mvmpg_width) - math_floor2(p->vp_start_x, *p->mvmpg_width));
- full_vp_height_mvmpg_aligned = (unsigned int)(math_floor2((p->vp_start_y + p->full_vp_height) + *p->mvmpg_height - 1, *p->mvmpg_height) - math_floor2(p->vp_start_y, *p->mvmpg_height));
-
- *p->full_vp_access_width_mvmpg_aligned = p->surf_vert ? full_vp_height_mvmpg_aligned : full_vp_width_mvmpg_aligned;
-
- //Use the equation for the exact alignment when possible. Note that the exact alignment cannot be used for horizontal access if vmpg_bytes > blk_bytes.
- if (!p->surf_vert) { //horizontal access
- if (p->vp_stationary == 1 && vmpg_bytes <= blk_bytes)
- *p->meta_row_width_ub = full_vp_width_mvmpg_aligned;
- else
- *p->meta_row_width_ub = (unsigned int)math_ceil2((double)p->full_vp_width - 1, *p->mvmpg_width) + *p->mvmpg_width;
- mvmpg_per_row_ub = *p->meta_row_width_ub / *p->mvmpg_width;
- } else { //vertical access
- if (p->vp_stationary == 1)
- *p->meta_row_width_ub = full_vp_height_mvmpg_aligned;
- else
- *p->meta_row_width_ub = (unsigned int)math_ceil2((double)p->full_vp_height - 1, *p->mvmpg_height) + *p->mvmpg_height;
- mvmpg_per_row_ub = *p->meta_row_width_ub / *p->mvmpg_height;
- }
-
- unsigned int meta_per_mvmpg_per_channel_ub = 0;
-
- if (p->gpuvm_enable) {
- meta_per_mvmpg_per_channel = (float)vmpg_bytes / (float)256 / p->num_chans;
-
- //but using the est_blk_per_vmpg between 2 and 4, to be not as pessimestic
- if (p->surf_vert && vmpg_bytes > blk_bytes) {
- meta_per_mvmpg_per_channel = (float)est_blk_per_vmpg * blk_bytes / 256 / p->num_chans;
- }
-
- *p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel)); // dcc_dr_oh_nom
- } else {
- meta_per_mvmpg_per_channel = (float)blk_bytes / (float)256 / p->num_chans;
-
- if (!p->surf_vert)
- *p->dcc_dram_bw_nom_overhead_factor = 1 + 1.0 / 256.0;
- else
- *p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel));
- }
-
- meta_per_mvmpg_per_channel_ub = (unsigned int)math_ceil2((double)meta_per_mvmpg_per_channel, p->mcache_line_size_bytes);
-
- //but for 4KB vmpg with 64KB tile blk
- if (p->gpuvm_enable && (blk_bytes == 65536) && (vmpg_bytes == 4096))
- meta_per_mvmpg_per_channel_ub = 16 * meta_per_mvmpg_per_channel_ub;
-
- // If this mcache_row_bytes for the full viewport of the surface is less than or equal to mcache_bytes,
- // then one mcache can be used for this request stream. If not, it is useful to know the width of the viewport that can be supported in the mcache_bytes.
- if (p->gpuvm_enable || !p->surf_vert) {
- *p->mcache_row_bytes = mvmpg_per_row_ub * meta_per_mvmpg_per_channel_ub;
- } else { // horizontal and gpuvm disable
- *p->mcache_row_bytes = *p->meta_row_width_ub * p->blk_height * p->bytes_per_pixel / 256;
- *p->mcache_row_bytes = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->num_chans, p->mcache_line_size_bytes);
- }
-
- *p->dcc_dram_bw_pref_overhead_factor = 1 + math_max2(1.0 / 256.0, *p->mcache_row_bytes / p->full_swath_bytes); // dcc_dr_oh_pref
- *p->num_mcaches = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->mcache_size_bytes, 1);
-
- unsigned int mvmpg_per_mcache = p->mcache_size_bytes / meta_per_mvmpg_per_channel_ub;
- *p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1);
-
- DML2_ASSERT(*p->num_mcaches > 0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
- dml2_printf("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
- dml2_printf("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
- dml2_printf("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
- dml2_printf("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
- dml2_printf("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
- dml2_printf("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
-#endif
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
-#endif
-}
-
-static void calculate_mcache_setting(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_calculate_mcache_setting_params *p)
-{
- unsigned int n;
-
- struct dml2_core_shared_calculate_mcache_setting_locals *l = &scratch->calculate_mcache_setting_locals;
- memset(l, 0, sizeof(struct dml2_core_shared_calculate_mcache_setting_locals));
-
- *p->num_mcaches_l = 0;
- *p->mcache_row_bytes_l = 0;
- *p->dcc_dram_bw_nom_overhead_factor_l = 1.0;
- *p->dcc_dram_bw_pref_overhead_factor_l = 1.0;
-
- *p->num_mcaches_c = 0;
- *p->mcache_row_bytes_c = 0;
- *p->dcc_dram_bw_nom_overhead_factor_c = 1.0;
- *p->dcc_dram_bw_pref_overhead_factor_c = 1.0;
-
- *p->mall_comb_mcache_l = 0;
- *p->mall_comb_mcache_c = 0;
- *p->lc_comb_mcache = 0;
-
- if (!p->dcc_enable)
- return;
-
- l->is_dual_plane = dml2_core_shared_is_420(p->source_format) || p->source_format == dml2_rgbe_alpha;
-
- l->l_p.num_chans = p->num_chans;
- l->l_p.mem_word_bytes = p->mem_word_bytes;
- l->l_p.mcache_size_bytes = p->mcache_size_bytes;
- l->l_p.mcache_line_size_bytes = p->mcache_line_size_bytes;
- l->l_p.gpuvm_enable = p->gpuvm_enable;
- l->l_p.gpuvm_page_size_kbytes = p->gpuvm_page_size_kbytes;
- l->l_p.surf_vert = p->surf_vert;
- l->l_p.vp_stationary = p->vp_stationary;
- l->l_p.tiling_mode = p->tiling_mode;
- l->l_p.vp_start_x = p->vp_start_x_l;
- l->l_p.vp_start_y = p->vp_start_y_l;
- l->l_p.full_vp_width = p->full_vp_width_l;
- l->l_p.full_vp_height = p->full_vp_height_l;
- l->l_p.blk_width = p->blk_width_l;
- l->l_p.blk_height = p->blk_height_l;
- l->l_p.vmpg_width = p->vmpg_width_l;
- l->l_p.vmpg_height = p->vmpg_height_l;
- l->l_p.full_swath_bytes = p->full_swath_bytes_l;
- l->l_p.bytes_per_pixel = p->bytes_per_pixel_l;
-
- // output
- l->l_p.num_mcaches = p->num_mcaches_l;
- l->l_p.mcache_row_bytes = p->mcache_row_bytes_l;
- l->l_p.dcc_dram_bw_nom_overhead_factor = p->dcc_dram_bw_nom_overhead_factor_l;
- l->l_p.dcc_dram_bw_pref_overhead_factor = p->dcc_dram_bw_pref_overhead_factor_l;
- l->l_p.mvmpg_width = &l->mvmpg_width_l;
- l->l_p.mvmpg_height = &l->mvmpg_height_l;
- l->l_p.full_vp_access_width_mvmpg_aligned = &l->full_vp_access_width_mvmpg_aligned_l;
- l->l_p.meta_row_width_ub = &l->meta_row_width_l;
- l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l;
-
- calculate_mcache_row_bytes(scratch, &l->l_p);
- dml2_assert(*p->num_mcaches_l > 0);
-
- if (l->is_dual_plane) {
- l->c_p.num_chans = p->num_chans;
- l->c_p.mem_word_bytes = p->mem_word_bytes;
- l->c_p.mcache_size_bytes = p->mcache_size_bytes;
- l->c_p.mcache_line_size_bytes = p->mcache_line_size_bytes;
- l->c_p.gpuvm_enable = p->gpuvm_enable;
- l->c_p.gpuvm_page_size_kbytes = p->gpuvm_page_size_kbytes;
- l->c_p.surf_vert = p->surf_vert;
- l->c_p.vp_stationary = p->vp_stationary;
- l->c_p.tiling_mode = p->tiling_mode;
- l->c_p.vp_start_x = p->vp_start_x_c;
- l->c_p.vp_start_y = p->vp_start_y_c;
- l->c_p.full_vp_width = p->full_vp_width_c;
- l->c_p.full_vp_height = p->full_vp_height_c;
- l->c_p.blk_width = p->blk_width_c;
- l->c_p.blk_height = p->blk_height_c;
- l->c_p.vmpg_width = p->vmpg_width_c;
- l->c_p.vmpg_height = p->vmpg_height_c;
- l->c_p.full_swath_bytes = p->full_swath_bytes_c;
- l->c_p.bytes_per_pixel = p->bytes_per_pixel_c;
-
- // output
- l->c_p.num_mcaches = p->num_mcaches_c;
- l->c_p.mcache_row_bytes = p->mcache_row_bytes_c;
- l->c_p.dcc_dram_bw_nom_overhead_factor = p->dcc_dram_bw_nom_overhead_factor_c;
- l->c_p.dcc_dram_bw_pref_overhead_factor = p->dcc_dram_bw_pref_overhead_factor_c;
- l->c_p.mvmpg_width = &l->mvmpg_width_c;
- l->c_p.mvmpg_height = &l->mvmpg_height_c;
- l->c_p.full_vp_access_width_mvmpg_aligned = &l->full_vp_access_width_mvmpg_aligned_c;
- l->c_p.meta_row_width_ub = &l->meta_row_width_c;
- l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c;
-
- calculate_mcache_row_bytes(scratch, &l->c_p);
- dml2_assert(*p->num_mcaches_c > 0);
- }
-
- // Sharing for iMALL access
- l->mcache_remainder_l = *p->mcache_row_bytes_l % p->mcache_size_bytes;
- l->mcache_remainder_c = *p->mcache_row_bytes_c % p->mcache_size_bytes;
- l->mvmpg_access_width_l = p->surf_vert ? l->mvmpg_height_l : l->mvmpg_width_l;
- l->mvmpg_access_width_c = p->surf_vert ? l->mvmpg_height_c : l->mvmpg_width_c;
-
- if (p->imall_enable) {
- *p->mall_comb_mcache_l = (2 * l->mcache_remainder_l <= p->mcache_size_bytes);
-
- if (l->is_dual_plane)
- *p->mall_comb_mcache_c = (2 * l->mcache_remainder_c <= p->mcache_size_bytes);
- }
-
- if (!p->surf_vert) // horizonatal access
- l->luma_time_factor = (double)l->mvmpg_height_c / l->mvmpg_height_l * 2;
- else // vertical access
- l->luma_time_factor = (double)l->mvmpg_width_c / l->mvmpg_width_l * 2;
-
- // The algorithm starts with computing a non-integer, avg_mcache_element_size_l/c:
- l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
- if (l->is_dual_plane) {
- l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
-
- if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
- l->lc_comb_last_mcache_size = (unsigned int)((l->mcache_remainder_l * (*p->mall_comb_mcache_l ? 2 : 1) * l->luma_time_factor) +
- (l->mcache_remainder_c * (*p->mall_comb_mcache_c ? 2 : 1)));
- }
- *p->lc_comb_mcache = (l->lc_comb_last_mcache_size <= p->mcache_size_bytes) && (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
- dml2_printf("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
- dml2_printf("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
- dml2_printf("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
- dml2_printf("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
- dml2_printf("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
- dml2_printf("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
- dml2_printf("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
- dml2_printf("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
- dml2_printf("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
-
- if (l->is_dual_plane) {
- dml2_printf("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
- dml2_printf("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
- dml2_printf("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
- dml2_printf("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
- dml2_printf("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
- dml2_printf("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
- dml2_printf("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
- dml2_printf("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
- dml2_printf("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
- dml2_printf("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
- }
-#endif
- // calculate split_coordinate
- l->full_vp_access_width_l = p->surf_vert ? p->full_vp_height_l : p->full_vp_width_l;
- l->full_vp_access_width_c = p->surf_vert ? p->full_vp_height_c : p->full_vp_width_c;
-
- for (n = 0; n < *p->num_mcaches_l - 1; n++) {
- p->mcache_offsets_l[n] = (unsigned int)(math_floor2((n + 1) * l->avg_mcache_element_size_l / l->mvmpg_access_width_l, 1)) * l->mvmpg_access_width_l;
- }
- p->mcache_offsets_l[*p->num_mcaches_l - 1] = l->full_vp_access_width_l;
-
- if (l->is_dual_plane) {
- for (n = 0; n < *p->num_mcaches_c - 1; n++) {
- p->mcache_offsets_c[n] = (unsigned int)(math_floor2((n + 1) * l->avg_mcache_element_size_c / l->mvmpg_access_width_c, 1)) * l->mvmpg_access_width_c;
- }
- p->mcache_offsets_c[*p->num_mcaches_c - 1] = l->full_vp_access_width_c;
- }
-#ifdef __DML_VBA_DEBUG__
- for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
-
- if (l->is_dual_plane) {
- for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
- }
-#endif
-
- // Luma/Chroma combine in the last mcache
- // In the case of Luma/Chroma combine-mCache (with lc_comb_mcache==1), all mCaches except the last segment are filled as much as possible, when stay aligned to mvmpg boundary
- if (*p->lc_comb_mcache && l->is_dual_plane) {
- for (n = 0; n < *p->num_mcaches_l - 1; n++)
- p->mcache_offsets_l[n] = (n + 1) * l->mvmpg_per_mcache_lb_l * l->mvmpg_access_width_l;
- p->mcache_offsets_l[*p->num_mcaches_l - 1] = l->full_vp_access_width_l;
-
- for (n = 0; n < *p->num_mcaches_c - 1; n++)
- p->mcache_offsets_c[n] = (n + 1) * l->mvmpg_per_mcache_lb_c * l->mvmpg_access_width_c;
- p->mcache_offsets_c[*p->num_mcaches_c - 1] = l->full_vp_access_width_c;
-
-#ifdef __DML_VBA_DEBUG__
- for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
-
- for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
-#endif
- }
-
- *p->mcache_shift_granularity_l = l->mvmpg_access_width_l;
- *p->mcache_shift_granularity_c = l->mvmpg_access_width_c;
-}
-
-static void calculate_mall_bw_overhead_factor(
- double mall_prefetch_sdp_overhead_factor[], //mall_sdp_oh_nom/pref
- double mall_prefetch_dram_overhead_factor[], //mall_dram_oh_nom/pref
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int num_active_planes)
-{
- for (unsigned int k = 0; k < num_active_planes; ++k) {
- mall_prefetch_sdp_overhead_factor[k] = 1.0;
- mall_prefetch_dram_overhead_factor[k] = 1.0;
-
- // SDP - on the return side
- if (display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall) // always no data return
- mall_prefetch_sdp_overhead_factor[k] = 1.25;
- else if (display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
- mall_prefetch_sdp_overhead_factor[k] = 0.25;
-
- // DRAM
- if (display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall)
- mall_prefetch_dram_overhead_factor[k] = 2.0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
- dml2_printf("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
-#endif
- }
-}
-
-static double dml_get_return_bandwidth_available(
- const struct dml2_soc_bb *soc,
- enum dml2_core_internal_soc_state_type state_type,
- enum dml2_core_internal_bw_type bw_type,
- bool is_avg_bw,
- bool is_hvm_en,
- bool is_hvm_only,
- double dcflk_mhz,
- double fclk_mhz,
- double dram_bw_mbps)
-{
- double return_bw_mbps = 0.;
- double ideal_sdp_bandwidth = (double)soc->return_bus_width_bytes * dcflk_mhz;
- double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes;
- double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes;
-
- double derate_sdp_factor = 1;
- double derate_fabric_factor = 1;
- double derate_dram_factor = 1;
-
- if (is_avg_bw) {
- if (state_type == dml2_core_internal_soc_state_svp_prefetch) {
- derate_sdp_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_average.dcfclk_derate_percent / 100.0;
- derate_fabric_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_average.fclk_derate_percent / 100.0;
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100.0;
- } else { // just assume sys_active
- derate_sdp_factor = soc->qos_parameters.derate_table.system_active_average.dcfclk_derate_percent / 100.0;
- derate_fabric_factor = soc->qos_parameters.derate_table.system_active_average.fclk_derate_percent / 100.0;
- derate_dram_factor = soc->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100.0;
- }
- } else { // urgent bw
- if (state_type == dml2_core_internal_soc_state_svp_prefetch) {
- derate_sdp_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dcfclk_derate_percent / 100.0;
- derate_fabric_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.fclk_derate_percent / 100.0;
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100.0;
-
- if (is_hvm_en) {
- if (is_hvm_only)
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_vm / 100.0;
- else
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel_and_vm / 100.0;
- } else {
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100.0;
- }
- } else { // just assume sys_active
- derate_sdp_factor = soc->qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0;
- derate_fabric_factor = soc->qos_parameters.derate_table.system_active_urgent.fclk_derate_percent / 100.0;
-
- if (is_hvm_en) {
- if (is_hvm_only)
- derate_dram_factor = soc->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_vm / 100.0;
- else
- derate_dram_factor = soc->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel_and_vm / 100.0;
- } else {
- derate_dram_factor = soc->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100.0;
- }
- }
- }
-
- double derate_sdp_bandwidth = ideal_sdp_bandwidth * derate_sdp_factor;
- double derate_fabric_bandwidth = ideal_fabric_bandwidth * derate_fabric_factor;
- double derate_dram_bandwidth = ideal_dram_bandwidth * derate_dram_factor;
-
- if (bw_type == dml2_core_internal_bw_sdp)
- return_bw_mbps = math_min2(derate_sdp_bandwidth, derate_fabric_bandwidth);
- else // dml2_core_internal_bw_dram
- return_bw_mbps = derate_dram_bandwidth;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
- dml2_printf("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
- dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
- dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
- dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcflk_mhz = %f\n", __func__, dcflk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
- dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
- dml2_printf("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
- dml2_printf("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
- dml2_printf("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
- dml2_printf("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
- dml2_printf("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
-#endif
- return return_bw_mbps;
-}
-
-static void calculate_bandwidth_available(
- double avg_bandwidth_available_min[dml2_core_internal_soc_state_max],
- double avg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available_min[dml2_core_internal_soc_state_max], // min between SDP and DRAM
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_max],
- double urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_max],
-
- const struct dml2_soc_bb *soc,
- bool HostVMEnable,
- double dcfclk_mhz,
- double fclk_mhz,
- double dram_bw_mbps)
-{
- unsigned int n, m;
-
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
-
- // Calculate all the bandwidth availabe
- for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (n = 0; n < dml2_core_internal_bw_max; n++) {
- avg_bandwidth_available[m][n] = dml_get_return_bandwidth_available(soc,
- m, // soc_state
- n, // bw_type
- 1, // avg_bw
- HostVMEnable,
- 0, // hvm_only
- dcfclk_mhz,
- fclk_mhz,
- dram_bw_mbps);
-
- urg_bandwidth_available[m][n] = dml_get_return_bandwidth_available(soc, m, n, 0, HostVMEnable, 0, dcfclk_mhz, fclk_mhz, dram_bw_mbps);
-
-
- dml2_printf("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
-
- // urg_bandwidth_available_vm_only is indexed by soc_state
- if (n == dml2_core_internal_bw_dram) {
- urg_bandwidth_available_vm_only[m] = dml_get_return_bandwidth_available(soc, m, n, 0, HostVMEnable, 1, dcfclk_mhz, fclk_mhz, dram_bw_mbps);
- urg_bandwidth_available_pixel_and_vm[m] = dml_get_return_bandwidth_available(soc, m, n, 0, HostVMEnable, 0, dcfclk_mhz, fclk_mhz, dram_bw_mbps);
- }
- }
-
- avg_bandwidth_available_min[m] = math_min2(avg_bandwidth_available[m][dml2_core_internal_bw_dram], avg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
- urg_bandwidth_available_min[m] = math_min2(urg_bandwidth_available[m][dml2_core_internal_bw_dram], urg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
-#endif
- }
-}
-
-static void calculate_avg_bandwidth_required(
- double avg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int num_active_planes,
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double cursor_bw[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double mall_prefetch_dram_overhead_factor[],
- double mall_prefetch_sdp_overhead_factor[])
-{
- unsigned int n, m, k;
-
- // Average BW support check
- for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (n = 0; n < dml2_core_internal_bw_max; n++) { // sdp, dram
- avg_bandwidth_required[m][n] = 0;
- }
- }
-
- // SysActive and SVP Prefetch AVG bandwidth Check
- for (k = 0; k < num_active_planes; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: plane %0d\n", __func__, k);
- dml2_printf("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
- dml2_printf("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
-#endif
-
- double sdp_overhead_factor = mall_prefetch_sdp_overhead_factor[k];
- double dram_overhead_factor_p0 = dcc_dram_bw_nom_overhead_factor_p0[k] * mall_prefetch_dram_overhead_factor[k];
- double dram_overhead_factor_p1 = dcc_dram_bw_nom_overhead_factor_p1[k] * mall_prefetch_dram_overhead_factor[k];
-
- // FIXME_DCN4, was missing cursor_bw in here, but do I actually need that and tdlut bw for average bandwidth calculation?
- // active avg bw not include phantom, but svp_prefetch avg bw should include phantom pipes
- if (!dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) {
- avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] += sdp_overhead_factor * (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]) + cursor_bw[k];
- avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
- }
- avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] += sdp_overhead_factor * (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]) + cursor_bw[k];
- avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
-#endif
- }
-}
-
-static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateVMRowAndSwath_params *p)
-{
- struct dml2_core_calcs_CalculateVMRowAndSwath_locals *s = &scratch->CalculateVMRowAndSwath_locals;
-
- s->HostVMDynamicLevels = CalculateHostVMDynamicLevels(p->display_cfg->gpuvm_enable, p->display_cfg->hostvm_enable, p->HostVMMinPageSize, p->display_cfg->hostvm_max_non_cached_page_table_levels);
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->hostvm_enable == true) {
- p->vm_group_bytes[k] = 512;
- p->dpte_group_bytes[k] = 512;
- } else if (p->display_cfg->gpuvm_enable == true) {
- p->vm_group_bytes[k] = 2048;
- if (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes >= 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle)) {
- p->dpte_group_bytes[k] = 512;
- } else {
- p->dpte_group_bytes[k] = 2048;
- }
- } else {
- p->vm_group_bytes[k] = 0;
- p->dpte_group_bytes[k] = 0;
- }
-
- if (dml2_core_shared_is_420(p->myPipe[k].SourcePixelFormat) || p->myPipe[k].SourcePixelFormat == dml2_rgbe_alpha) {
- if ((p->myPipe[k].SourcePixelFormat == dml2_420_10 || p->myPipe[k].SourcePixelFormat == dml2_420_12) && !dml_is_vertical_rotation(p->myPipe[k].RotationAngle)) {
- s->PTEBufferSizeInRequestsForLuma[k] = (p->PTEBufferSizeInRequestsLuma + p->PTEBufferSizeInRequestsChroma) / 2;
- s->PTEBufferSizeInRequestsForChroma[k] = s->PTEBufferSizeInRequestsForLuma[k];
- } else {
- s->PTEBufferSizeInRequestsForLuma[k] = p->PTEBufferSizeInRequestsLuma;
- s->PTEBufferSizeInRequestsForChroma[k] = p->PTEBufferSizeInRequestsChroma;
- }
-
- scratch->calculate_vm_and_row_bytes_params.ViewportStationary = p->myPipe[k].ViewportStationary;
- scratch->calculate_vm_and_row_bytes_params.DCCEnable = p->myPipe[k].DCCEnable;
- scratch->calculate_vm_and_row_bytes_params.NumberOfDPPs = p->myPipe[k].DPPPerSurface;
- scratch->calculate_vm_and_row_bytes_params.BlockHeight256Bytes = p->myPipe[k].BlockHeight256BytesC;
- scratch->calculate_vm_and_row_bytes_params.BlockWidth256Bytes = p->myPipe[k].BlockWidth256BytesC;
- scratch->calculate_vm_and_row_bytes_params.SourcePixelFormat = p->myPipe[k].SourcePixelFormat;
- scratch->calculate_vm_and_row_bytes_params.SurfaceTiling = p->myPipe[k].SurfaceTiling;
- scratch->calculate_vm_and_row_bytes_params.BytePerPixel = p->myPipe[k].BytePerPixelC;
- scratch->calculate_vm_and_row_bytes_params.RotationAngle = p->myPipe[k].RotationAngle;
- scratch->calculate_vm_and_row_bytes_params.SwathWidth = p->SwathWidthC[k];
- scratch->calculate_vm_and_row_bytes_params.ViewportHeight = p->myPipe[k].ViewportHeightC;
- scratch->calculate_vm_and_row_bytes_params.ViewportXStart = p->myPipe[k].ViewportXStartC;
- scratch->calculate_vm_and_row_bytes_params.ViewportYStart = p->myPipe[k].ViewportYStartC;
- scratch->calculate_vm_and_row_bytes_params.GPUVMEnable = p->display_cfg->gpuvm_enable;
- scratch->calculate_vm_and_row_bytes_params.GPUVMMaxPageTableLevels = p->display_cfg->gpuvm_max_page_table_levels;
- scratch->calculate_vm_and_row_bytes_params.GPUVMMinPageSizeKBytes = p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- scratch->calculate_vm_and_row_bytes_params.PTEBufferSizeInRequests = s->PTEBufferSizeInRequestsForChroma[k];
- scratch->calculate_vm_and_row_bytes_params.Pitch = p->myPipe[k].PitchC;
- scratch->calculate_vm_and_row_bytes_params.MacroTileWidth = p->myPipe[k].BlockWidthC;
- scratch->calculate_vm_and_row_bytes_params.MacroTileHeight = p->myPipe[k].BlockHeightC;
- scratch->calculate_vm_and_row_bytes_params.is_phantom = dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]);
- scratch->calculate_vm_and_row_bytes_params.DCCMetaPitch = p->myPipe[k].DCCMetaPitchC;
- scratch->calculate_vm_and_row_bytes_params.mrq_present = p->mrq_present;
-
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRow = &s->PixelPTEBytesPerRowC[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRowStorage = &s->PixelPTEBytesPerRowStorageC[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_width_ub = &p->dpte_row_width_chroma_ub[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height = &p->dpte_row_height_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height_linear = &p->dpte_row_height_linear_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRow_one_row_per_frame = &s->PixelPTEBytesPerRowC_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_width_ub_one_row_per_frame = &s->dpte_row_width_chroma_ub_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height_one_row_per_frame = &s->dpte_row_height_chroma_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.vmpg_width = &p->vmpg_width_c[k];
- scratch->calculate_vm_and_row_bytes_params.vmpg_height = &p->vmpg_height_c[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEReqWidth = &p->PixelPTEReqWidthC[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEReqHeight = &p->PixelPTEReqHeightC[k];
- scratch->calculate_vm_and_row_bytes_params.PTERequestSize = &p->PTERequestSizeC[k];
- scratch->calculate_vm_and_row_bytes_params.dpde0_bytes_per_frame_ub = &p->dpde0_bytes_per_frame_ub_c[k];
-
- scratch->calculate_vm_and_row_bytes_params.meta_row_bytes = &s->meta_row_bytes_per_row_ub_c[k];
- scratch->calculate_vm_and_row_bytes_params.MetaRequestWidth = &p->meta_req_width_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.MetaRequestHeight = &p->meta_req_height_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_row_width = &p->meta_row_width_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_row_height = &p->meta_row_height_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_pte_bytes_per_frame_ub = &p->meta_pte_bytes_per_frame_ub_c[k];
-
- s->vm_bytes_c = CalculateVMAndRowBytes(&scratch->calculate_vm_and_row_bytes_params);
-
- p->PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
- p->myPipe[k].VRatioChroma,
- p->myPipe[k].VTapsChroma,
- p->myPipe[k].InterlaceEnable,
- p->myPipe[k].ProgressiveToInterlaceUnitInOPP,
- p->myPipe[k].SwathHeightC,
- p->myPipe[k].RotationAngle,
- p->myPipe[k].mirrored,
- p->myPipe[k].ViewportStationary,
- p->SwathWidthC[k],
- p->myPipe[k].ViewportHeightC,
- p->myPipe[k].ViewportXStartC,
- p->myPipe[k].ViewportYStartC,
-
- // Output
- &p->VInitPreFillC[k],
- &p->MaxNumSwathC[k]);
- } else {
- s->PTEBufferSizeInRequestsForLuma[k] = p->PTEBufferSizeInRequestsLuma + p->PTEBufferSizeInRequestsChroma;
- s->PTEBufferSizeInRequestsForChroma[k] = 0;
- s->PixelPTEBytesPerRowC[k] = 0;
- s->PixelPTEBytesPerRowStorageC[k] = 0;
- s->vm_bytes_c = 0;
- p->MaxNumSwathC[k] = 0;
- p->PrefetchSourceLinesC[k] = 0;
- s->dpte_row_height_chroma_one_row_per_frame[k] = 0;
- s->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
- s->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
- }
-
- scratch->calculate_vm_and_row_bytes_params.ViewportStationary = p->myPipe[k].ViewportStationary;
- scratch->calculate_vm_and_row_bytes_params.DCCEnable = p->myPipe[k].DCCEnable;
- scratch->calculate_vm_and_row_bytes_params.NumberOfDPPs = p->myPipe[k].DPPPerSurface;
- scratch->calculate_vm_and_row_bytes_params.BlockHeight256Bytes = p->myPipe[k].BlockHeight256BytesY;
- scratch->calculate_vm_and_row_bytes_params.BlockWidth256Bytes = p->myPipe[k].BlockWidth256BytesY;
- scratch->calculate_vm_and_row_bytes_params.SourcePixelFormat = p->myPipe[k].SourcePixelFormat;
- scratch->calculate_vm_and_row_bytes_params.SurfaceTiling = p->myPipe[k].SurfaceTiling;
- scratch->calculate_vm_and_row_bytes_params.BytePerPixel = p->myPipe[k].BytePerPixelY;
- scratch->calculate_vm_and_row_bytes_params.RotationAngle = p->myPipe[k].RotationAngle;
- scratch->calculate_vm_and_row_bytes_params.SwathWidth = p->SwathWidthY[k];
- scratch->calculate_vm_and_row_bytes_params.ViewportHeight = p->myPipe[k].ViewportHeight;
- scratch->calculate_vm_and_row_bytes_params.ViewportXStart = p->myPipe[k].ViewportXStart;
- scratch->calculate_vm_and_row_bytes_params.ViewportYStart = p->myPipe[k].ViewportYStart;
- scratch->calculate_vm_and_row_bytes_params.GPUVMEnable = p->display_cfg->gpuvm_enable;
- scratch->calculate_vm_and_row_bytes_params.GPUVMMaxPageTableLevels = p->display_cfg->gpuvm_max_page_table_levels;
- scratch->calculate_vm_and_row_bytes_params.GPUVMMinPageSizeKBytes = p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- scratch->calculate_vm_and_row_bytes_params.PTEBufferSizeInRequests = s->PTEBufferSizeInRequestsForLuma[k];
- scratch->calculate_vm_and_row_bytes_params.Pitch = p->myPipe[k].PitchY;
- scratch->calculate_vm_and_row_bytes_params.MacroTileWidth = p->myPipe[k].BlockWidthY;
- scratch->calculate_vm_and_row_bytes_params.MacroTileHeight = p->myPipe[k].BlockHeightY;
- scratch->calculate_vm_and_row_bytes_params.is_phantom = dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]);
- scratch->calculate_vm_and_row_bytes_params.DCCMetaPitch = p->myPipe[k].DCCMetaPitchY;
- scratch->calculate_vm_and_row_bytes_params.mrq_present = p->mrq_present;
-
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRow = &s->PixelPTEBytesPerRowY[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRowStorage = &s->PixelPTEBytesPerRowStorageY[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_width_ub = &p->dpte_row_width_luma_ub[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height = &p->dpte_row_height_luma[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height_linear = &p->dpte_row_height_linear_luma[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRow_one_row_per_frame = &s->PixelPTEBytesPerRowY_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_width_ub_one_row_per_frame = &s->dpte_row_width_luma_ub_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height_one_row_per_frame = &s->dpte_row_height_luma_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.vmpg_width = &p->vmpg_width_y[k];
- scratch->calculate_vm_and_row_bytes_params.vmpg_height = &p->vmpg_height_y[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEReqWidth = &p->PixelPTEReqWidthY[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEReqHeight = &p->PixelPTEReqHeightY[k];
- scratch->calculate_vm_and_row_bytes_params.PTERequestSize = &p->PTERequestSizeY[k];
- scratch->calculate_vm_and_row_bytes_params.dpde0_bytes_per_frame_ub = &p->dpde0_bytes_per_frame_ub_l[k];
-
- scratch->calculate_vm_and_row_bytes_params.meta_row_bytes = &s->meta_row_bytes_per_row_ub_l[k];
- scratch->calculate_vm_and_row_bytes_params.MetaRequestWidth = &p->meta_req_width_luma[k];
- scratch->calculate_vm_and_row_bytes_params.MetaRequestHeight = &p->meta_req_height_luma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_row_width = &p->meta_row_width_luma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_row_height = &p->meta_row_height_luma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_pte_bytes_per_frame_ub = &p->meta_pte_bytes_per_frame_ub_l[k];
-
- s->vm_bytes_l = CalculateVMAndRowBytes(&scratch->calculate_vm_and_row_bytes_params);
-
- p->PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
- p->myPipe[k].VRatio,
- p->myPipe[k].VTaps,
- p->myPipe[k].InterlaceEnable,
- p->myPipe[k].ProgressiveToInterlaceUnitInOPP,
- p->myPipe[k].SwathHeightY,
- p->myPipe[k].RotationAngle,
- p->myPipe[k].mirrored,
- p->myPipe[k].ViewportStationary,
- p->SwathWidthY[k],
- p->myPipe[k].ViewportHeight,
- p->myPipe[k].ViewportXStart,
- p->myPipe[k].ViewportYStart,
-
- // Output
- &p->VInitPreFillY[k],
- &p->MaxNumSwathY[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
- dml2_printf("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
-#endif
- p->vm_bytes[k] = (s->vm_bytes_l + s->vm_bytes_c) * (1 + 8 * s->HostVMDynamicLevels);
- p->meta_row_bytes[k] = s->meta_row_bytes_per_row_ub_l[k] + s->meta_row_bytes_per_row_ub_c[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
-#endif
- if (s->PixelPTEBytesPerRowStorageY[k] <= 64 * s->PTEBufferSizeInRequestsForLuma[k] && s->PixelPTEBytesPerRowStorageC[k] <= 64 * s->PTEBufferSizeInRequestsForChroma[k]) {
- p->PTEBufferSizeNotExceeded[k] = true;
- } else {
- p->PTEBufferSizeNotExceeded[k] = false;
- }
-
- s->one_row_per_frame_fits_in_buffer[k] = (s->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForLuma[k] &&
- s->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForChroma[k]);
-#ifdef __DML_VBA_DEBUG__
- if (p->PTEBufferSizeNotExceeded[k] == 0 || s->one_row_per_frame_fits_in_buffer[k] == 0) {
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
-
- dml2_printf("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
- }
-#endif
- }
-
- CalculateMALLUseForStaticScreen(
- p->display_cfg,
- p->NumberOfActiveSurfaces,
- p->MALLAllocatedForDCN,
- p->SurfaceSizeInMALL,
- s->one_row_per_frame_fits_in_buffer,
- // Output
- p->is_using_mall_for_ss);
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->gpuvm_enable) {
- if (p->display_cfg->plane_descriptors[k].overrides.hw.force_pte_buffer_mode.enable == 1) {
- p->PTE_BUFFER_MODE[k] = p->display_cfg->plane_descriptors[k].overrides.hw.force_pte_buffer_mode.value;
- }
- p->PTE_BUFFER_MODE[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
- dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64);
- p->BIGK_FRAGMENT_SIZE[k] = (unsigned int)(math_log((float)p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes * 1024, 2) - 12);
- } else {
- p->PTE_BUFFER_MODE[k] = 0;
- p->BIGK_FRAGMENT_SIZE[k] = 0;
- }
- }
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->DCCMetaBufferSizeNotExceeded[k] = true;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
-#endif
- p->use_one_row_for_frame[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
- (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle));
-
- p->use_one_row_for_frame_flip[k] = p->use_one_row_for_frame[k] && !(p->display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame);
-
- if (p->use_one_row_for_frame[k]) {
- p->dpte_row_height_luma[k] = s->dpte_row_height_luma_one_row_per_frame[k];
- p->dpte_row_width_luma_ub[k] = s->dpte_row_width_luma_ub_one_row_per_frame[k];
- s->PixelPTEBytesPerRowY[k] = s->PixelPTEBytesPerRowY_one_row_per_frame[k];
- p->dpte_row_height_chroma[k] = s->dpte_row_height_chroma_one_row_per_frame[k];
- p->dpte_row_width_chroma_ub[k] = s->dpte_row_width_chroma_ub_one_row_per_frame[k];
- s->PixelPTEBytesPerRowC[k] = s->PixelPTEBytesPerRowC_one_row_per_frame[k];
- p->PTEBufferSizeNotExceeded[k] = s->one_row_per_frame_fits_in_buffer[k];
- }
-
- if (p->meta_row_bytes[k] <= p->DCCMetaBufferSizeBytes) {
- p->DCCMetaBufferSizeNotExceeded[k] = true;
- } else {
- p->DCCMetaBufferSizeNotExceeded[k] = false;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
-#endif
- }
-
- s->PixelPTEBytesPerRowY[k] = s->PixelPTEBytesPerRowY[k] * (1 + 8 * s->HostVMDynamicLevels);
- s->PixelPTEBytesPerRowC[k] = s->PixelPTEBytesPerRowC[k] * (1 + 8 * s->HostVMDynamicLevels);
- p->PixelPTEBytesPerRow[k] = s->PixelPTEBytesPerRowY[k] + s->PixelPTEBytesPerRowC[k];
-
- // if one row of dPTEs is meant to span the entire frame, then for these calculations, we will pretend like that one big row is fetched in two halfs
- if (p->use_one_row_for_frame[k])
- p->PixelPTEBytesPerRow[k] = p->PixelPTEBytesPerRow[k] / 2;
-
- CalculateRowBandwidth(
- p->display_cfg->gpuvm_enable,
- p->use_one_row_for_frame[k],
- p->myPipe[k].SourcePixelFormat,
- p->myPipe[k].VRatio,
- p->myPipe[k].VRatioChroma,
- p->myPipe[k].DCCEnable,
- p->myPipe[k].HTotal / p->myPipe[k].PixelClock,
- s->PixelPTEBytesPerRowY[k],
- s->PixelPTEBytesPerRowC[k],
- p->dpte_row_height_luma[k],
- p->dpte_row_height_chroma[k],
-
- p->mrq_present,
- s->meta_row_bytes_per_row_ub_l[k],
- s->meta_row_bytes_per_row_ub_c[k],
- p->meta_row_height_luma[k],
- p->meta_row_height_chroma[k],
-
- // Output
- &p->dpte_row_bw[k],
- &p->meta_row_bw[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
- dml2_printf("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
- dml2_printf("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
- dml2_printf("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
-#endif
- }
-}
-
-static double CalculateUrgentLatency(
- double UrgentLatencyPixelDataOnly,
- double UrgentLatencyPixelMixedWithVMData,
- double UrgentLatencyVMDataOnly,
- bool DoUrgentLatencyAdjustment,
- double UrgentLatencyAdjustmentFabricClockComponent,
- double UrgentLatencyAdjustmentFabricClockReference,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int urgent_ramp_uclk_cycles,
- unsigned int df_qos_response_time_fclk_cycles,
- unsigned int max_round_trip_to_furthest_cs_fclk_cycles,
- unsigned int mall_overhead_fclk_cycles,
- double umc_urgent_ramp_latency_margin,
- double fabric_max_transport_latency_margin)
-{
- double urgent_latency = 0;
- if (qos_type == dml2_qos_param_type_dcn4x) {
- urgent_latency = (df_qos_response_time_fclk_cycles + mall_overhead_fclk_cycles) / FabricClock
- + max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1 + fabric_max_transport_latency_margin / 100.0)
- + urgent_ramp_uclk_cycles / uclk_freq_mhz * (1 + umc_urgent_ramp_latency_margin / 100.0);
- } else {
- urgent_latency = math_max3(UrgentLatencyPixelDataOnly, UrgentLatencyPixelMixedWithVMData, UrgentLatencyVMDataOnly);
- if (DoUrgentLatencyAdjustment == true) {
- urgent_latency = urgent_latency + UrgentLatencyAdjustmentFabricClockComponent * (UrgentLatencyAdjustmentFabricClockReference / FabricClock - 1);
- }
- }
-#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
- } else {
- dml2_printf("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
- dml2_printf("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
- dml2_printf("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
- }
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
-#endif
- return urgent_latency;
-}
-
-static double CalculateTripToMemory(
- double UrgLatency,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int trip_to_memory_uclk_cycles,
- unsigned int max_round_trip_to_furthest_cs_fclk_cycles,
- unsigned int mall_overhead_fclk_cycles,
- double umc_max_latency_margin,
- double fabric_max_transport_latency_margin)
-{
- double trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4x) {
- trip_to_memory_us = mall_overhead_fclk_cycles / FabricClock
- + max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
- + trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
- } else {
- trip_to_memory_us = UrgLatency;
- }
-
-#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
- dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
- dml2_printf("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
- dml2_printf("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
- } else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
- }
- dml2_printf("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
-#endif
-
-
- return trip_to_memory_us;
-}
-
-static double CalculateMetaTripToMemory(
- double UrgLatency,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int meta_trip_to_memory_uclk_cycles,
- unsigned int meta_trip_to_memory_fclk_cycles,
- double umc_max_latency_margin,
- double fabric_max_transport_latency_margin)
-{
- double meta_trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4x) {
- meta_trip_to_memory_us = meta_trip_to_memory_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
- + meta_trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
- } else {
- meta_trip_to_memory_us = UrgLatency;
- }
-
-#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
- dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- } else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
- }
- dml2_printf("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
-#endif
-
-
- return meta_trip_to_memory_us;
-}
-
-static void calculate_cursor_req_attributes(
- unsigned int cursor_width,
- unsigned int cursor_bpp,
-
- // output
- unsigned int *cursor_lines_per_chunk,
- unsigned int *cursor_bytes_per_line,
- unsigned int *cursor_bytes_per_chunk,
- unsigned int *cursor_bytes)
-{
- unsigned int cursor_pitch = 0;
- unsigned int cursor_bytes_per_req = 0;
- unsigned int cursor_width_bytes = 0;
- unsigned int cursor_height = 0;
-
- //SW determines the cursor pitch to support the maximum cursor_width that will be used but the following restrictions apply.
- //- For 2bpp, cursor_pitch = 256 pixels due to min cursor request size of 64B
- //- For 32 or 64 bpp, cursor_pitch = 64, 128 or 256 pixels depending on the cursor width
- if (cursor_bpp == 2)
- cursor_pitch = 256;
- else
- cursor_pitch = (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1);
-
- //The cursor requestor uses a cursor request size of 64B, 128B, or 256B depending on the cursor_width and cursor_bpp as follows.
-
- cursor_width_bytes = (unsigned int)math_ceil2((double)cursor_width * cursor_bpp / 8, 1);
- if (cursor_width_bytes <= 64)
- cursor_bytes_per_req = 64;
- else if (cursor_width_bytes <= 128)
- cursor_bytes_per_req = 128;
- else
- cursor_bytes_per_req = 256;
-
- //If cursor_width_bytes is greater than 256B, then multiple 256B requests are issued to fetch the entire cursor line.
- *cursor_bytes_per_line = (unsigned int)math_ceil2((double)cursor_width_bytes, cursor_bytes_per_req);
-
- //Nominally, the cursor chunk is 1KB or 2KB but it is restricted to a power of 2 number of lines with a maximum of 16 lines.
- if (cursor_bpp == 2) {
- *cursor_lines_per_chunk = 16;
- } else if (cursor_bpp == 32) {
- if (cursor_width <= 32)
- *cursor_lines_per_chunk = 16;
- else if (cursor_width <= 64)
- *cursor_lines_per_chunk = 8;
- else if (cursor_width <= 128)
- *cursor_lines_per_chunk = 4;
- else
- *cursor_lines_per_chunk = 2;
- } else if (cursor_bpp == 64) {
- if (cursor_width <= 16)
- *cursor_lines_per_chunk = 16;
- else if (cursor_width <= 32)
- *cursor_lines_per_chunk = 8;
- else if (cursor_width <= 64)
- *cursor_lines_per_chunk = 4;
- else if (cursor_width <= 128)
- *cursor_lines_per_chunk = 2;
- else
- *cursor_lines_per_chunk = 1;
- } else {
- if (cursor_width > 0) {
- dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_assert(0);
- }
- }
-
- *cursor_bytes_per_chunk = *cursor_bytes_per_line * *cursor_lines_per_chunk;
-
- // For the cursor implementation, all requested data is stored in the return buffer. Given this fact, the cursor_bytes can be directly compared with the CursorBufferSize.
- // Only cursor_width is provided for worst case sizing so assume that the cursor is square
- cursor_height = cursor_width;
- *cursor_bytes = *cursor_bytes_per_line * cursor_height;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_printf("DML::%s: cursor_width = %d\n", __func__, cursor_width);
- dml2_printf("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
- dml2_printf("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
- dml2_printf("DML::%s: cursor_pitch = %d\n", __func__, cursor_pitch);
-#endif
-}
-
-static void calculate_cursor_urgent_burst_factor(
- unsigned int CursorBufferSize,
- unsigned int CursorWidth,
- unsigned int cursor_bytes_per_chunk,
- unsigned int cursor_lines_per_chunk,
- double LineTime,
- double UrgentLatency,
-
- double *UrgentBurstFactorCursor,
- bool *NotEnoughUrgentLatencyHiding)
-{
- unsigned int LinesInCursorBuffer = 0;
- double CursorBufferSizeInTime = 0;
-
- if (CursorWidth > 0) {
- LinesInCursorBuffer = (unsigned int)math_floor2(CursorBufferSize * 1024.0 / (double)cursor_bytes_per_chunk, 1) * cursor_lines_per_chunk;
-
- CursorBufferSizeInTime = LinesInCursorBuffer * LineTime;
- if (CursorBufferSizeInTime - UrgentLatency <= 0) {
- *NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorCursor = 0;
- } else {
- *NotEnoughUrgentLatencyHiding = 0;
- *UrgentBurstFactorCursor = CursorBufferSizeInTime / (CursorBufferSizeInTime - UrgentLatency);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
- dml2_printf("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
- dml2_printf("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
- dml2_printf("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
-#endif
-
- }
-}
-
-static void CalculateUrgentBurstFactor(
- const struct dml2_plane_parameters *plane_cfg,
- unsigned int swath_width_luma_ub,
- unsigned int swath_width_chroma_ub,
- unsigned int SwathHeightY,
- unsigned int SwathHeightC,
- double LineTime,
- double UrgentLatency,
- double VRatio,
- double VRatioC,
- double BytePerPixelInDETY,
- double BytePerPixelInDETC,
- unsigned int DETBufferSizeY,
- unsigned int DETBufferSizeC,
- // Output
- double *UrgentBurstFactorLuma,
- double *UrgentBurstFactorChroma,
- bool *NotEnoughUrgentLatencyHiding)
-{
- double LinesInDETLuma;
- double LinesInDETChroma;
- double DETBufferSizeInTimeLuma;
- double DETBufferSizeInTimeChroma;
-
- *NotEnoughUrgentLatencyHiding = 0;
- *UrgentBurstFactorLuma = 0;
- *UrgentBurstFactorChroma = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VRatioC = %f\n", __func__, VRatioC);
- dml2_printf("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
- dml2_printf("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
- dml2_printf("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
- dml2_printf("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
-#endif
- DML2_ASSERT(VRatio > 0);
-
- LinesInDETLuma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub;
-
- DETBufferSizeInTimeLuma = math_floor2(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
- if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
- *NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorLuma = 0;
- } else {
- *UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
- }
-
- if (BytePerPixelInDETC > 0) {
- LinesInDETChroma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeC) / BytePerPixelInDETC / swath_width_chroma_ub;
-
- DETBufferSizeInTimeChroma = math_floor2(LinesInDETChroma, SwathHeightC) * LineTime / VRatioC;
- if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
- *NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorChroma = 0;
- } else {
- *UrgentBurstFactorChroma = DETBufferSizeInTimeChroma / (DETBufferSizeInTimeChroma - UrgentLatency);
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
- dml2_printf("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
- dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
-#endif
-
-}
-
-static void CalculateDCFCLKDeepSleep(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int BytePerPixelY[],
- unsigned int BytePerPixelC[],
- unsigned int SwathWidthY[],
- unsigned int SwathWidthC[],
- unsigned int DPPPerSurface[],
- double PSCL_THROUGHPUT[],
- double PSCL_THROUGHPUT_CHROMA[],
- double Dppclk[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- unsigned int ReturnBusWidth,
-
- // Output
- double *DCFClkDeepSleep)
-{
- double DisplayPipeLineDeliveryTimeLuma;
- double DisplayPipeLineDeliveryTimeChroma;
- double DCFClkDeepSleepPerSurface[DML2_MAX_PLANES];
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- double pixel_rate_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
- if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
- DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerSurface[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_rate_mhz;
- } else {
- DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
- }
- if (BytePerPixelC[k] == 0) {
- DisplayPipeLineDeliveryTimeChroma = 0;
- } else {
- if (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio <= 1) {
- DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] * DPPPerSurface[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio / pixel_rate_mhz;
- } else {
- DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] / PSCL_THROUGHPUT_CHROMA[k] / Dppclk[k];
- }
- }
-
- if (BytePerPixelC[k] > 0) {
- DCFClkDeepSleepPerSurface[k] = math_max2(__DML2_CALCS_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] / 32.0 / DisplayPipeLineDeliveryTimeLuma,
- __DML2_CALCS_DCFCLK_FACTOR__ * SwathWidthC[k] * BytePerPixelC[k] / 32.0 / DisplayPipeLineDeliveryTimeChroma);
- } else {
- DCFClkDeepSleepPerSurface[k] = __DML2_CALCS_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] / 64.0 / DisplayPipeLineDeliveryTimeLuma;
- }
- DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], pixel_rate_mhz / 16);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
- dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
-#endif
- }
-
- double ReadBandwidth = 0.0;
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- ReadBandwidth = ReadBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
- }
-
- *DCFClkDeepSleep = math_max2(8.0, __DML2_CALCS_DCFCLK_FACTOR__ * ReadBandwidth / (double)ReturnBusWidth);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
- dml2_printf("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
- dml2_printf("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
-#endif
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- *DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
- }
- dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
-}
-
-static double CalculateWriteBackDelay(
- enum dml2_source_format_class WritebackPixelFormat,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackVTaps,
- unsigned int WritebackDestinationWidth,
- unsigned int WritebackDestinationHeight,
- unsigned int WritebackSourceHeight,
- unsigned int HTotal)
-{
- double CalculateWriteBackDelay;
- double Line_length;
- double Output_lines_last_notclamped;
- double WritebackVInit;
-
- WritebackVInit = (WritebackVRatio + WritebackVTaps + 1) / 2;
- Line_length = math_max2((double)WritebackDestinationWidth, math_ceil2((double)WritebackDestinationWidth / 6.0, 1.0) * WritebackVTaps);
- Output_lines_last_notclamped = WritebackDestinationHeight - 1 - math_ceil2(((double)WritebackSourceHeight - (double)WritebackVInit) / (double)WritebackVRatio, 1.0);
- if (Output_lines_last_notclamped < 0) {
- CalculateWriteBackDelay = 0;
- } else {
- CalculateWriteBackDelay = Output_lines_last_notclamped * Line_length + (HTotal - WritebackDestinationWidth) + 80;
- }
- return CalculateWriteBackDelay;
-}
-
-static unsigned int CalculateMaxVStartup(
- bool ptoi_supported,
- unsigned int vblank_nom_default_us,
- const struct dml2_timing_cfg *timing,
- double write_back_delay_us)
-{
- unsigned int vblank_size = 0;
- unsigned int max_vstartup_lines = 0;
-
- double line_time_us = (double)timing->h_total / ((double)timing->pixel_clock_khz / 1000);
- unsigned int vblank_actual = timing->v_total - timing->v_active;
- unsigned int vblank_nom_default_in_line = (unsigned int)math_floor2((double)vblank_nom_default_us / line_time_us, 1.0);
- unsigned int vblank_nom_input = (unsigned int)math_min2(timing->vblank_nom, vblank_nom_default_in_line);
- unsigned int vblank_avail = (vblank_nom_input == 0) ? vblank_nom_default_in_line : vblank_nom_input;
-
- vblank_size = (unsigned int)math_min2(vblank_actual, vblank_avail);
-
- if (timing->interlaced && !ptoi_supported)
- max_vstartup_lines = (unsigned int)(math_floor2(vblank_size / 2.0, 1.0));
- else
- max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VBlankNom = %u\n", __func__, timing->vblank_nom);
- dml2_printf("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
- dml2_printf("DML::%s: line_time_us = %f\n", __func__, line_time_us);
- dml2_printf("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
- dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
- dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
-#endif
- return max_vstartup_lines;
-}
-
-static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *p)
-{
- struct dml2_core_shared_CalculateSwathAndDETConfiguration_locals *l = &scratch->CalculateSwathAndDETConfiguration_locals;
- memset(l, 0, sizeof(struct dml2_core_shared_CalculateSwathAndDETConfiguration_locals));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
- }
-#endif
- CalculateSwathWidth(
- p->display_cfg,
- p->ForceSingleDPP,
- p->NumberOfActiveSurfaces,
- p->ODMMode,
- p->BytePerPixY,
- p->BytePerPixC,
- p->Read256BytesBlockHeightY,
- p->Read256BytesBlockHeightC,
- p->Read256BytesBlockWidthY,
- p->Read256BytesBlockWidthC,
- p->surf_linear128_l,
- p->surf_linear128_c,
- p->DPPPerSurface,
-
- // Output
- p->req_per_swath_ub_l,
- p->req_per_swath_ub_c,
- l->SwathWidthSingleDPP,
- l->SwathWidthSingleDPPChroma,
- p->SwathWidth,
- p->SwathWidthChroma,
- l->MaximumSwathHeightY,
- l->MaximumSwathHeightC,
- p->swath_width_luma_ub,
- p->swath_width_chroma_ub);
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->full_swath_bytes_l[k] = (unsigned int)(p->swath_width_luma_ub[k] * p->BytePerPixDETY[k] * l->MaximumSwathHeightY[k]);
- p->full_swath_bytes_c[k] = (unsigned int)(p->swath_width_chroma_ub[k] * p->BytePerPixDETC[k] * l->MaximumSwathHeightC[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, l->MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, l->MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
-#endif
- if (p->display_cfg->plane_descriptors[k].pixel_format == dml2_420_10) {
- p->full_swath_bytes_l[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_l[k], 256));
- p->full_swath_bytes_c[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_c[k], 256));
- }
- }
-
- unsigned int TotalActiveDPP = 0;
- bool NoChromaOrLinear = true;
- unsigned int SurfaceDoingUnboundedRequest = 0;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- TotalActiveDPP = TotalActiveDPP + (p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]);
- if (p->DPPPerSurface[k] > 0)
- SurfaceDoingUnboundedRequest = k;
- if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format) || p->display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha
- || p->display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
- NoChromaOrLinear = false;
- }
- l->SwathTimeValueUs[k] = (unsigned int) ((double)l->MaximumSwathHeightY[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total
- / p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz * 1000);
- }
-
- *p->UnboundedRequestEnabled = UnboundedRequest(p->display_cfg->overrides.hw.force_unbounded_requesting.enable, p->display_cfg->overrides.hw.force_unbounded_requesting.value, TotalActiveDPP, NoChromaOrLinear);
-
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.display_cfg = p->display_cfg;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ForceSingleDPP = p->ForceSingleDPP;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.NumberOfActiveSurfaces = p->NumberOfActiveSurfaces;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.UnboundedRequestEnabled = *p->UnboundedRequestEnabled;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.nomDETInKByte = p->nomDETInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.MaxTotalDETInKByte = p->MaxTotalDETInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ConfigReturnBufferSizeInKByte = p->ConfigReturnBufferSizeInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.MinCompressedBufferSizeInKByte = p->MinCompressedBufferSizeInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ConfigReturnBufferSegmentSizeInkByte = p->ConfigReturnBufferSegmentSizeInkByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.CompressedBufferSegmentSizeInkByte = p->CompressedBufferSegmentSizeInkByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ReadBandwidthLuma = p->ReadBandwidthLuma;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ReadBandwidthChroma = p->ReadBandwidthChroma;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.full_swath_bytes_l = p->full_swath_bytes_l;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.full_swath_bytes_c = p->full_swath_bytes_c;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.DPPPerSurface = p->DPPPerSurface;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.DETBufferSizeInKByte = p->DETBufferSizeInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.CompressedBufferSizeInkByte = p->CompressedBufferSizeInkByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.swath_time_value_us = l->SwathTimeValueUs;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.bestEffortMinActiveLatencyHidingUs = p->display_cfg->overrides.best_effort_min_active_latency_hiding_us;
- if (p->funcs->calculate_det_buffer_size) {
- p->funcs->calculate_det_buffer_size(&scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params);
- } else {
- CalculateDETBufferSize(&scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
-#endif
-
- unsigned int DETBufferSizeInKByteForSwathCalculation;
- *p->ViewportSizeSupport = true;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
-
- DETBufferSizeInKByteForSwathCalculation = (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 1024 : p->DETBufferSizeInKByte[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
-#endif
-
- if (p->full_swath_bytes_l[k] + p->full_swath_bytes_c[k] <= DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- p->SwathHeightY[k] = l->MaximumSwathHeightY[k];
- p->SwathHeightC[k] = l->MaximumSwathHeightC[k];
- l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k];
- l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k];
- p->request_size_bytes_luma[k] = 256;
- p->request_size_bytes_chroma[k] = 256;
-
- } else if (p->full_swath_bytes_l[k] >= 1.5 * p->full_swath_bytes_c[k] && p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] <= DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- p->SwathHeightY[k] = l->MaximumSwathHeightY[k] / 2;
- p->SwathHeightC[k] = l->MaximumSwathHeightC[k];
- l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
- l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k];
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- p->request_size_bytes_chroma[k] = 256;
-
- } else if (p->full_swath_bytes_l[k] < 1.5 * p->full_swath_bytes_c[k] && p->full_swath_bytes_l[k] + p->full_swath_bytes_c[k] / 2 <= DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- p->SwathHeightY[k] = l->MaximumSwathHeightY[k];
- p->SwathHeightC[k] = l->MaximumSwathHeightC[k] / 2;
- l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k];
- l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = 256;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
-
- } else {
- p->SwathHeightY[k] = l->MaximumSwathHeightY[k] / 2;
- p->SwathHeightC[k] = l->MaximumSwathHeightC[k] / 2;
- l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
- l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- }
-
- if (p->SwathHeightC[k] == 0)
- p->request_size_bytes_chroma[k] = 0;
-
- if ((p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] / 2 > DETBufferSizeInKByteForSwathCalculation * 1024 / 2) ||
- p->SwathWidth[k] > p->MaximumSwathWidthLuma[k] || (p->SwathHeightC[k] > 0 && p->SwathWidthChroma[k] > p->MaximumSwathWidthChroma[k])) {
- *p->ViewportSizeSupport = false;
- p->ViewportSizeSupportPerSurface[k] = false;
- } else {
- p->ViewportSizeSupportPerSurface[k] = true;
- }
-
- if (p->SwathHeightC[k] == 0) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
-#endif
- p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024;
- p->DETBufferSizeC[k] = 0;
- } else if (l->RoundedUpSwathSizeBytesY[k] <= 1.5 * l->RoundedUpSwathSizeBytesC[k]) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
-#endif
- p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
- p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
- } else {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
-#endif
- p->DETBufferSizeY[k] = (unsigned int)(math_floor2(p->DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024));
- p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 - p->DETBufferSizeY[k];
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, l->RoundedUpSwathSizeBytesY[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, l->RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
- dml2_printf("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
-#endif
-
- }
-
- const long TTUFIFODEPTH = 8;
- const long MAXIMUMCOMPRESSION = 4;
- *p->compbuf_reserved_space_64b = 2 * p->pixel_chunk_size_kbytes * 1024 / 64;
- if (*p->UnboundedRequestEnabled) {
- *p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
- (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(l->RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / 64)), 1.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, l->RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
-#endif
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
-#endif
-
- *p->hw_debug5 = false;
- if (!p->mrq_present) {
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!(*p->UnboundedRequestEnabled)
- && p->display_cfg->plane_descriptors[k].surface.dcc.enable
- && ((p->rob_buffer_size_kbytes * 1024 + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (l->RoundedUpSwathSizeBytesY[k] + l->RoundedUpSwathSizeBytesC[k])))
- *p->hw_debug5 = true;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, l->RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
-#endif
- }
- }
-}
-
-static void CalculateODMMode(
- unsigned int MaximumPixelsPerLinePerDSCUnit,
- unsigned int HActive,
- enum dml2_output_encoder_class Output,
- enum dml2_odm_mode ODMUse,
- double MaxDispclk,
- bool DSCEnable,
- unsigned int TotalNumberOfActiveDPP,
- unsigned int MaxNumDPP,
- double PixelClock,
-
- // Output
- bool *TotalAvailablePipesSupport,
- unsigned int *NumberOfDPP,
- enum dml2_odm_mode *ODMMode,
- double *RequiredDISPCLKPerSurface)
-{
- double SurfaceRequiredDISPCLKWithoutODMCombine;
- double SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
- double SurfaceRequiredDISPCLKWithODMCombineThreeToOne;
- double SurfaceRequiredDISPCLKWithODMCombineFourToOne;
-
- SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
- *TotalAvailablePipesSupport = true;
-
- if (ODMUse == dml2_odm_mode_bypass || ODMUse == dml2_odm_mode_auto)
- *ODMMode = dml2_odm_mode_bypass;
- else if (ODMUse == dml2_odm_mode_combine_2to1)
- *ODMMode = dml2_odm_mode_combine_2to1;
- else if (ODMUse == dml2_odm_mode_combine_3to1)
- *ODMMode = dml2_odm_mode_combine_3to1;
- else if (ODMUse == dml2_odm_mode_combine_4to1)
- *ODMMode = dml2_odm_mode_combine_4to1;
- else if (ODMUse == dml2_odm_mode_split_1to2)
- *ODMMode = dml2_odm_mode_split_1to2;
- else if (ODMUse == dml2_odm_mode_mso_1to2)
- *ODMMode = dml2_odm_mode_mso_1to2;
- else if (ODMUse == dml2_odm_mode_mso_1to4)
- *ODMMode = dml2_odm_mode_mso_1to4;
-
- *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithoutODMCombine;
- *NumberOfDPP = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMUse = %d\n", __func__, ODMUse);
- dml2_printf("DML::%s: Output = %d\n", __func__, Output);
- dml2_printf("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
- dml2_printf("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
- dml2_printf("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
-#endif
-
- if (ODMUse == dml2_odm_mode_combine_4to1 || (ODMUse == dml2_odm_mode_auto &&
- (SurfaceRequiredDISPCLKWithODMCombineThreeToOne > MaxDispclk || (DSCEnable && (HActive > 3 * MaximumPixelsPerLinePerDSCUnit))))) {
- if (TotalNumberOfActiveDPP + 4 <= MaxNumDPP) {
- *ODMMode = dml2_odm_mode_combine_4to1;
- *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
- *NumberOfDPP = 4;
- } else {
- *TotalAvailablePipesSupport = false;
- }
- } else if (ODMUse == dml2_odm_mode_combine_3to1 || (ODMUse == dml2_odm_mode_auto &&
- ((SurfaceRequiredDISPCLKWithODMCombineTwoToOne > MaxDispclk && SurfaceRequiredDISPCLKWithODMCombineThreeToOne <= MaxDispclk) ||
- (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit))))) {
- if (TotalNumberOfActiveDPP + 3 <= MaxNumDPP) {
- *ODMMode = dml2_odm_mode_combine_3to1;
- *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineThreeToOne;
- *NumberOfDPP = 3;
- } else {
- *TotalAvailablePipesSupport = false;
- }
-
- } else if (ODMUse == dml2_odm_mode_combine_2to1 || (ODMUse == dml2_odm_mode_auto &&
- ((SurfaceRequiredDISPCLKWithoutODMCombine > MaxDispclk && SurfaceRequiredDISPCLKWithODMCombineTwoToOne <= MaxDispclk) ||
- (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit))))) {
- if (TotalNumberOfActiveDPP + 2 <= MaxNumDPP) {
- *ODMMode = dml2_odm_mode_combine_2to1;
- *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
- *NumberOfDPP = 2;
- } else {
- *TotalAvailablePipesSupport = false;
- }
-
- } else {
- if (TotalNumberOfActiveDPP + 1 <= MaxNumDPP) {
- *NumberOfDPP = 1;
- } else {
- *TotalAvailablePipesSupport = false;
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
- dml2_printf("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
- dml2_printf("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
- dml2_printf("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
-#endif
-
-}
-
-static void CalculateOutputLink(
- struct dml2_core_internal_scratch *s,
- double PHYCLK,
- double PHYCLKD18,
- double PHYCLKD32,
- double Downspreading,
- bool IsMainSurfaceUsingTheIndicatedTiming,
- enum dml2_output_encoder_class Output,
- enum dml2_output_format_class OutputFormat,
- unsigned int HTotal,
- unsigned int HActive,
- double PixelClockBackEnd,
- double ForcedOutputLinkBPP,
- unsigned int DSCInputBitPerComponent,
- unsigned int NumberOfDSCSlices,
- double AudioSampleRate,
- unsigned int AudioSampleLayout,
- enum dml2_odm_mode ODMModeNoDSC,
- enum dml2_odm_mode ODMModeDSC,
- enum dml2_dsc_enable_option DSCEnable,
- unsigned int OutputLinkDPLanes,
- enum dml2_output_link_dp_rate OutputLinkDPRate,
-
- // Output
- bool *RequiresDSC,
- bool *RequiresFEC,
- double *OutBpp,
- enum dml2_core_internal_output_type *OutputType,
- enum dml2_core_internal_output_type_rate *OutputRate,
- unsigned int *RequiredSlots)
-{
- bool LinkDSCEnable;
- unsigned int dummy;
- *RequiresDSC = false;
- *RequiresFEC = false;
- *OutBpp = 0;
-
- *OutputType = dml2_core_internal_output_type_unknown;
- *OutputRate = dml2_core_internal_output_rate_unknown;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
- dml2_printf("DML::%s: IsMainSurfaceUsingTheIndicatedTiming = %u\n", __func__, IsMainSurfaceUsingTheIndicatedTiming);
- dml2_printf("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
- dml2_printf("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
- dml2_printf("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
- dml2_printf("DML::%s: Output (encoder) = %u\n", __func__, Output);
- dml2_printf("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
-#endif
- if (IsMainSurfaceUsingTheIndicatedTiming) {
- if (Output == dml2_hdmi) {
- *RequiresDSC = false;
- *RequiresFEC = false;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, math_min2(600, PHYCLK) * 10, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, false, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = "HDMI";
- *OutputType = dml2_core_internal_output_type_hdmi;
- } else if (Output == dml2_dp || Output == dml2_dp2p0 || Output == dml2_edp) {
- if (DSCEnable == dml2_dsc_enable) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- if (Output == dml2_dp || Output == dml2_dp2p0) {
- *RequiresFEC = true;
- } else {
- *RequiresFEC = false;
- }
- } else {
- *RequiresDSC = false;
- LinkDSCEnable = false;
- if (Output == dml2_dp2p0) {
- *RequiresFEC = true;
- } else {
- *RequiresFEC = false;
- }
- }
- if (Output == dml2_dp2p0) {
- *OutBpp = 0;
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr10) && PHYCLKD32 >= 10000.0 / 32) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLKD32 < 13500.0 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " UHBR10";
- *OutputType = dml2_core_internal_output_type_dp2p0;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr10;
- }
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr13p5) && *OutBpp == 0 && PHYCLKD32 >= 13500.0 / 32) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
-
- if (*OutBpp == 0 && PHYCLKD32 < 20000 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " UHBR13p5";
- *OutputType = dml2_core_internal_output_type_dp2p0;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr13p5;
- }
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr20) && *OutBpp == 0 && PHYCLKD32 >= 20000 / 32) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 20000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 20000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " UHBR20";
- *OutputType = dml2_core_internal_output_type_dp2p0;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr20;
- }
- } else { // output is dp or edp
- *OutBpp = 0;
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_hbr) && PHYCLK >= 270) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 2700, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLK < 540 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- if (Output == dml2_dp) {
- *RequiresFEC = true;
- }
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 2700, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " HBR";
- *OutputType = (Output == dml2_dp) ? dml2_core_internal_output_type_dp : dml2_core_internal_output_type_edp;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_hbr;
- }
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_hbr2) && *OutBpp == 0 && PHYCLK >= 540) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 5400, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
-
- if (*OutBpp == 0 && PHYCLK < 810 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- if (Output == dml2_dp) {
- *RequiresFEC = true;
- }
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 5400, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " HBR2";
- *OutputType = (Output == dml2_dp) ? dml2_core_internal_output_type_dp : dml2_core_internal_output_type_edp;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_hbr2;
- }
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_hbr3) && *OutBpp == 0 && PHYCLK >= 810) { // VBA_ERROR, vba code doesn't have hbr3 check
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 8100, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
-
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- if (Output == dml2_dp) {
- *RequiresFEC = true;
- }
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 8100, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " HBR3";
- *OutputType = (Output == dml2_dp) ? dml2_core_internal_output_type_dp : dml2_core_internal_output_type_edp;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_hbr3;
- }
- }
- } else if (Output == dml2_hdmifrl) {
- if (DSCEnable == dml2_dsc_enable) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *RequiresFEC = true;
- } else {
- *RequiresDSC = false;
- LinkDSCEnable = false;
- *RequiresFEC = false;
- }
- *OutBpp = 0;
- if (PHYCLKD18 >= 3000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 3000, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = Output & "3x3";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_3x3;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 6000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 6000, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = Output & "6x3";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_6x3;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 6000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 6000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = Output & "6x4";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_6x4;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 8000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 8000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = Output & "8x4";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_8x4;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 10000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 10000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0 && PHYCLKD18 < 12000.0 / 18) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *RequiresFEC = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 10000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- }
- //OutputTypeAndRate = Output & "10x4";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_10x4;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 12000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 12000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *RequiresFEC = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 12000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- }
- //OutputTypeAndRate = Output & "12x4";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_12x4;
- }
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
- dml2_printf("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
- dml2_printf("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
-#endif
-}
-
-static double CalculateWriteBackDISPCLK(
- enum dml2_source_format_class WritebackPixelFormat,
- double PixelClock,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackHTaps,
- unsigned int WritebackVTaps,
- unsigned int WritebackSourceWidth,
- unsigned int WritebackDestinationWidth,
- unsigned int HTotal,
- unsigned int WritebackLineBufferSize)
-{
- double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
-
- DISPCLK_H = PixelClock * math_ceil2((double)WritebackHTaps / 8.0, 1) / WritebackHRatio;
- DISPCLK_V = PixelClock * (WritebackVTaps * math_ceil2((double)WritebackDestinationWidth / 6.0, 1) + 8.0) / (double)HTotal;
- DISPCLK_HB = PixelClock * WritebackVTaps * (WritebackDestinationWidth * WritebackVTaps - WritebackLineBufferSize / 57.0) / 6.0 / (double)WritebackSourceWidth;
- return math_max3(DISPCLK_H, DISPCLK_V, DISPCLK_HB);
-}
-
-static double RequiredDTBCLK(
- bool DSCEnable,
- double PixelClock,
- enum dml2_output_format_class OutputFormat,
- double OutputBpp,
- unsigned int DSCSlices,
- unsigned int HTotal,
- unsigned int HActive,
- unsigned int AudioRate,
- unsigned int AudioLayout)
-{
- if (DSCEnable != true) {
- return math_max2(PixelClock / 4.0 * OutputBpp / 24.0, 25.0);
- } else {
- double PixelWordRate = PixelClock / (OutputFormat == dml2_444 ? 1 : 2);
- double HCActive = math_ceil2(DSCSlices * math_ceil2(OutputBpp * math_ceil2(HActive / DSCSlices, 1) / 8.0, 1) / 3.0, 1);
- double HCBlank = 64 + 32 * math_ceil2(AudioRate * (AudioLayout == 1 ? 1 : 0.25) * HTotal / (PixelClock * 1000), 1);
- double AverageTribyteRate = PixelWordRate * (HCActive + HCBlank) / HTotal;
- double HActiveTribyteRate = PixelWordRate * HCActive / HActive;
- return math_max4(PixelWordRate / 4.0, AverageTribyteRate / 4.0, HActiveTribyteRate / 4.0, 25.0) * 1.002;
- }
-}
-
-static unsigned int DSCDelayRequirement(
- bool DSCEnabled,
- enum dml2_odm_mode ODMMode,
- unsigned int DSCInputBitPerComponent,
- double OutputBpp,
- unsigned int HActive,
- unsigned int HTotal,
- unsigned int NumberOfDSCSlices,
- enum dml2_output_format_class OutputFormat,
- enum dml2_output_encoder_class Output,
- double PixelClock,
- double PixelClockBackEnd)
-{
- unsigned int DSCDelayRequirement_val = 0;
- unsigned int NumberOfDSCSlicesFactor = 1;
-
- if (DSCEnabled == true && OutputBpp != 0) {
-
- if (ODMMode == dml2_odm_mode_combine_4to1)
- NumberOfDSCSlicesFactor = 4;
- else if (ODMMode == dml2_odm_mode_combine_3to1)
- NumberOfDSCSlicesFactor = 3;
- else if (ODMMode == dml2_odm_mode_combine_2to1)
- NumberOfDSCSlicesFactor = 2;
-
- DSCDelayRequirement_val = NumberOfDSCSlicesFactor * (dscceComputeDelay(DSCInputBitPerComponent, OutputBpp, (unsigned int)(math_ceil2((double)HActive / (double)NumberOfDSCSlices, 1.0)),
- (NumberOfDSCSlices / NumberOfDSCSlicesFactor), OutputFormat, Output) + dscComputeDelay(OutputFormat, Output));
-
- DSCDelayRequirement_val = (unsigned int)(DSCDelayRequirement_val + (HTotal - HActive) * math_ceil2((double)DSCDelayRequirement_val / (double)HActive, 1.0));
- DSCDelayRequirement_val = (unsigned int)(DSCDelayRequirement_val * PixelClock / PixelClockBackEnd);
-
- } else {
- DSCDelayRequirement_val = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, ODMMode);
- dml2_printf("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
- dml2_printf("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
- dml2_printf("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
- dml2_printf("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
-#endif
-
- return DSCDelayRequirement_val;
-}
-
-static void CalculateSurfaceSizeInMall(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MALLAllocatedForDCN,
- unsigned int BytesPerPixelY[],
- unsigned int BytesPerPixelC[],
- unsigned int Read256BytesBlockWidthY[],
- unsigned int Read256BytesBlockWidthC[],
- unsigned int Read256BytesBlockHeightY[],
- unsigned int Read256BytesBlockHeightC[],
- unsigned int ReadBlockWidthY[],
- unsigned int ReadBlockWidthC[],
- unsigned int ReadBlockHeightY[],
- unsigned int ReadBlockHeightC[],
-
- // Output
- unsigned int SurfaceSizeInMALL[],
- bool *ExceededMALLSize)
-{
- unsigned int TotalSurfaceSizeInMALLForSS = 0;
- unsigned int TotalSurfaceSizeInMALLForSubVP = 0;
- unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024;
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- const struct dml2_composition_cfg *composition = &display_cfg->plane_descriptors[k].composition;
- const struct dml2_surface_cfg *surface = &display_cfg->plane_descriptors[k].surface;
-
- if (composition->viewport.stationary) {
- SurfaceSizeInMALL[k] = (unsigned int)(math_min2(math_ceil2((double)surface->plane0.width, ReadBlockWidthY[k]),
- math_floor2(composition->viewport.plane0.x_start + composition->viewport.plane0.width + ReadBlockWidthY[k] - 1, ReadBlockWidthY[k]) -
- math_floor2((double)composition->viewport.plane0.x_start, ReadBlockWidthY[k])) *
- math_min2(math_ceil2((double)surface->plane0.height, ReadBlockHeightY[k]),
- math_floor2((double)composition->viewport.plane0.y_start + composition->viewport.plane0.height + ReadBlockHeightY[k] - 1, ReadBlockHeightY[k]) -
- math_floor2((double)composition->viewport.plane0.y_start, ReadBlockHeightY[k])) * BytesPerPixelY[k]);
-
- if (ReadBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_min2(math_ceil2((double)surface->plane1.width, ReadBlockWidthC[k]),
- math_floor2((double)composition->viewport.plane1.y_start + composition->viewport.plane1.width + ReadBlockWidthC[k] - 1, ReadBlockWidthC[k]) -
- math_floor2((double)composition->viewport.plane1.y_start, ReadBlockWidthC[k])) *
- math_min2(math_ceil2((double)surface->plane1.height, ReadBlockHeightC[k]),
- math_floor2((double)composition->viewport.plane1.y_start + composition->viewport.plane1.height + ReadBlockHeightC[k] - 1, ReadBlockHeightC[k]) -
- math_floor2(composition->viewport.plane1.y_start, ReadBlockHeightC[k])) * BytesPerPixelC[k]);
- }
- if (surface->dcc.enable) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_min2(math_ceil2(surface->plane0.width, 8 * Read256BytesBlockWidthY[k]),
- math_floor2(composition->viewport.plane0.x_start + composition->viewport.plane0.width + 8 * Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k]) -
- math_floor2(composition->viewport.plane0.x_start, 8 * Read256BytesBlockWidthY[k])) *
- math_min2(math_ceil2(surface->plane0.height, 8 * Read256BytesBlockHeightY[k]),
- math_floor2(composition->viewport.plane0.y_start + composition->viewport.plane0.height + 8 * Read256BytesBlockHeightY[k] - 1, 8 * Read256BytesBlockHeightY[k]) -
- math_floor2(composition->viewport.plane0.y_start, 8 * Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024);
- if (Read256BytesBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_min2(math_ceil2(surface->plane1.width, 8 * Read256BytesBlockWidthC[k]),
- math_floor2(composition->viewport.plane1.y_start + composition->viewport.plane1.width + 8 * Read256BytesBlockWidthC[k] - 1, 8 * Read256BytesBlockWidthC[k]) -
- math_floor2(composition->viewport.plane1.y_start, 8 * Read256BytesBlockWidthC[k])) *
- math_min2(math_ceil2(surface->plane1.height, 8 * Read256BytesBlockHeightC[k]),
- math_floor2(composition->viewport.plane1.y_start + composition->viewport.plane1.height + 8 * Read256BytesBlockHeightC[k] - 1, 8 * Read256BytesBlockHeightC[k]) -
- math_floor2(composition->viewport.plane1.y_start, 8 * Read256BytesBlockHeightC[k])) * BytesPerPixelC[k] / 256);
- }
- }
- } else {
- SurfaceSizeInMALL[k] = (unsigned int)(math_ceil2(math_min2(surface->plane0.width, composition->viewport.plane0.width + ReadBlockWidthY[k] - 1), ReadBlockWidthY[k]) *
- math_ceil2(math_min2(surface->plane0.height, composition->viewport.plane0.height + ReadBlockHeightY[k] - 1), ReadBlockHeightY[k]) * BytesPerPixelY[k]);
- if (ReadBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_ceil2(math_min2(surface->plane1.width, composition->viewport.plane1.width + ReadBlockWidthC[k] - 1), ReadBlockWidthC[k]) *
- math_ceil2(math_min2(surface->plane1.height, composition->viewport.plane1.height + ReadBlockHeightC[k] - 1), ReadBlockHeightC[k]) * BytesPerPixelC[k]);
- }
- if (surface->dcc.enable) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_ceil2(math_min2(surface->plane0.width, composition->viewport.plane0.width + 8 * Read256BytesBlockWidthY[k] - 1), 8 * Read256BytesBlockWidthY[k]) *
- math_ceil2(math_min2(surface->plane0.height, composition->viewport.plane0.height + 8 * Read256BytesBlockHeightY[k] - 1), 8 * Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024);
-
- if (Read256BytesBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_ceil2(math_min2(surface->plane1.width, composition->viewport.plane1.width + 8 * Read256BytesBlockWidthC[k] - 1), 8 * Read256BytesBlockWidthC[k]) *
- math_ceil2(math_min2(surface->plane1.height, composition->viewport.plane1.height + 8 * Read256BytesBlockHeightC[k] - 1), 8 * Read256BytesBlockHeightC[k]) * BytesPerPixelC[k] / 256);
- }
- }
- }
- }
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- /* SS and Subvp counted separate as they are never used at the same time */
- if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]))
- TotalSurfaceSizeInMALLForSubVP += SurfaceSizeInMALL[k];
- else if (display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_force_enable)
- TotalSurfaceSizeInMALLForSS += SurfaceSizeInMALL[k];
- }
-
- *ExceededMALLSize = (TotalSurfaceSizeInMALLForSS > MALLAllocatedForDCNInBytes) ||
- (TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
- dml2_printf("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
-#endif
-}
-
-static void calculate_tdlut_setting(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_calculate_tdlut_setting_params *p)
-{
- if (!p->setup_for_tdlut) {
- *p->tdlut_groups_per_2row_ub = 0;
- *p->tdlut_opt_time = 0;
- *p->tdlut_drain_time = 0;
- *p->tdlut_bytes_per_group = 0;
- *p->tdlut_pte_bytes_per_frame = 0;
- *p->tdlut_bytes_per_frame = 0;
- return;
- }
-
- // locals
- unsigned int tdlut_bpe = 8;
- unsigned int tdlut_width;
- unsigned int tdlut_pitch_bytes;
- unsigned int tdlut_footprint_bytes;
- unsigned int vmpg_bytes;
- unsigned int tdlut_vmpg_per_frame;
- unsigned int tdlut_pte_req_per_frame;
- unsigned int tdlut_bytes_per_line;
- unsigned int tdlut_delivery_cycles;
- double tdlut_drain_rate;
- unsigned int tdlut_mpc_width;
- unsigned int tdlut_bytes_per_group_simple;
-
- if (p->tdlut_mpc_width_flag) {
- tdlut_mpc_width = 33;
- tdlut_bytes_per_group_simple = 39 * 256;
- } else {
- tdlut_mpc_width = 17;
- tdlut_bytes_per_group_simple = 10 * 256;
- }
-
- vmpg_bytes = p->gpuvm_page_size_kbytes * 1024;
-
- if (p->tdlut_addressing_mode == dml2_tdlut_simple_linear) {
- if (p->tdlut_width_mode == dml2_tdlut_width_17_cube)
- tdlut_width = 4916;
- else
- tdlut_width = 35940;
- } else {
- if (p->tdlut_width_mode == dml2_tdlut_width_17_cube)
- tdlut_width = 17;
- else // dml2_tdlut_width_33_cube
- tdlut_width = 33;
- }
-
- if (p->is_gfx11)
- tdlut_pitch_bytes = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256); //256B alignment
- else
- tdlut_pitch_bytes = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 128); //128B alignment
-
- if (p->tdlut_addressing_mode == dml2_tdlut_sw_linear)
- tdlut_footprint_bytes = tdlut_pitch_bytes * tdlut_width * tdlut_width;
- else
- tdlut_footprint_bytes = tdlut_pitch_bytes;
-
- if (!p->gpuvm_enable) {
- tdlut_vmpg_per_frame = 0;
- tdlut_pte_req_per_frame = 0;
- } else {
- tdlut_vmpg_per_frame = (unsigned int)math_ceil2(tdlut_footprint_bytes - 1, vmpg_bytes) / vmpg_bytes + 1;
- tdlut_pte_req_per_frame = (unsigned int)math_ceil2(tdlut_vmpg_per_frame - 1, 8) / 8 + 1;
- }
- tdlut_bytes_per_line = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 64); //64b request
- *p->tdlut_pte_bytes_per_frame = tdlut_pte_req_per_frame * 64;
-
- if (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) {
- //the tdlut_width is either 17 or 33 but the 33x33x33 is subsampled every other line/slice
- *p->tdlut_bytes_per_frame = tdlut_bytes_per_line * tdlut_mpc_width * tdlut_mpc_width;
- *p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
- //the delivery cycles is DispClk cycles per line * number of lines * number of slices
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width / 2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
- tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
- } else {
- //tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
- *p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
- *p->tdlut_bytes_per_group = tdlut_bytes_per_group_simple;
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width / 2.0, 1);
- tdlut_drain_rate = 2 * tdlut_bpe * p->dispclk_mhz;
- }
-
- //the tdlut is fetched during the 2 row times of prefetch.
- if (p->setup_for_tdlut) {
- *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2(*p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
- *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
- *p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
- dml2_printf("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
- dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
- dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %u\n", __func__, p->tdlut_addressing_mode);
- dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
- dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
- dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
- dml2_printf("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
- dml2_printf("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
- dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
- dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
-#endif
-}
-
-static void CalculateTarb(
- const struct dml2_display_cfg *display_cfg,
- unsigned int PixelChunkSizeInKByte,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- unsigned int dpte_group_bytes[],
- unsigned int tdlut_bytes_per_group[],
- double HostVMInefficiencyFactor,
- double HostVMInefficiencyFactorPrefetch,
- unsigned int HostVMMinPageSize,
- double ReturnBW,
- unsigned int MetaChunkSize,
-
- // output
- double *Tarb,
- double *Tarb_prefetch)
-{
- double extra_bytes = 0;
- double extra_bytes_prefetch = 0;
- double HostVMDynamicLevels = CalculateHostVMDynamicLevels(display_cfg->gpuvm_enable, display_cfg->hostvm_enable, HostVMMinPageSize, display_cfg->hostvm_max_non_cached_page_table_levels);
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- extra_bytes = extra_bytes + (NumberOfDPP[k] * PixelChunkSizeInKByte * 1024);
-
- if (display_cfg->plane_descriptors[k].surface.dcc.enable)
- extra_bytes = extra_bytes + (MetaChunkSize * 1024);
-
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- extra_bytes = extra_bytes + tdlut_bytes_per_group[k];
- }
-
- extra_bytes_prefetch = extra_bytes;
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (display_cfg->gpuvm_enable == true) {
- extra_bytes = extra_bytes + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
- extra_bytes_prefetch = extra_bytes_prefetch + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactorPrefetch;
- }
- }
- *Tarb = extra_bytes / ReturnBW;
- *Tarb_prefetch = extra_bytes_prefetch / ReturnBW;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
- dml2_printf("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
- dml2_printf("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
- dml2_printf("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
-#endif
-}
-
-static double CalculateTWait(
- long reserved_vblank_time_ns,
- double UrgentLatency,
- double Ttrip)
-{
- double TWait;
- double t_urg_trip = math_max2(UrgentLatency, Ttrip);
- TWait = reserved_vblank_time_ns / 1000.0 + t_urg_trip;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, Ttrip);
- dml2_printf("DML::%s: TWait = %f\n", __func__, TWait);
-#endif
- return TWait;
-}
-
-
-static void CalculateVUpdateAndDynamicMetadataParameters(
- unsigned int MaxInterDCNTileRepeaters,
- double Dppclk,
- double Dispclk,
- double DCFClkDeepSleep,
- double PixelClock,
- unsigned int HTotal,
- unsigned int VBlank,
- unsigned int DynamicMetadataTransmittedBytes,
- unsigned int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int InterlaceEnable,
- bool ProgressiveToInterlaceUnitInOPP,
-
- // Output
- double *TSetup,
- double *Tdmbf,
- double *Tdmec,
- double *Tdmsks,
- unsigned int *VUpdateOffsetPix,
- unsigned int *VUpdateWidthPix,
- unsigned int *VReadyOffsetPix)
-{
- double TotalRepeaterDelayTime;
- TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2 / Dppclk + 3 / Dispclk);
- *VUpdateWidthPix = (unsigned int)(math_ceil2((14.0 / DCFClkDeepSleep + 12.0 / Dppclk + TotalRepeaterDelayTime) * PixelClock, 1.0));
- *VReadyOffsetPix = (unsigned int)(math_ceil2(math_max2(150.0 / Dppclk, TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / Dppclk) * PixelClock, 1.0));
- *VUpdateOffsetPix = (unsigned int)(math_ceil2(HTotal / 4.0, 1.0));
- *TSetup = (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
- *Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / Dispclk;
- *Tdmec = HTotal / PixelClock;
-
- if (DynamicMetadataLinesBeforeActiveRequired == 0) {
- *Tdmsks = VBlank * HTotal / PixelClock / 2.0;
- } else {
- *Tdmsks = DynamicMetadataLinesBeforeActiveRequired * HTotal / PixelClock;
- }
- if (InterlaceEnable == 1 && ProgressiveToInterlaceUnitInOPP == false) {
- *Tdmsks = *Tdmsks / 2;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
- dml2_printf("DML::%s: VBlank = %u\n", __func__, VBlank);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, Dppclk);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
- dml2_printf("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
- dml2_printf("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
-
- dml2_printf("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
- dml2_printf("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
- dml2_printf("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
-
- dml2_printf("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
-#endif
-}
-
-static double get_urgent_bandwidth_required(
- struct dml2_core_shared_get_urgent_bandwidth_required_locals *l,
- const struct dml2_display_cfg *display_cfg,
- enum dml2_core_internal_soc_state_type state_type,
- enum dml2_core_internal_bw_type bw_type,
- bool inc_flip_bw, // including flip bw
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double dcc_dram_bw_pref_overhead_factor_p0[],
- double dcc_dram_bw_pref_overhead_factor_p1[],
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double PrefetchBandwidthLuma[],
- double PrefetchBandwidthChroma[],
- double cursor_bw[],
- double dpte_row_bw[],
- double meta_row_bw[],
- double prefetch_cursor_bw[],
- double prefetch_vmrow_bw[],
- double flip_bw[],
- double UrgentBurstFactorLuma[],
- double UrgentBurstFactorChroma[],
- double UrgentBurstFactorCursor[],
- double UrgentBurstFactorLumaPre[],
- double UrgentBurstFactorChromaPre[],
- double UrgentBurstFactorCursorPre[])
-{
- memset(l, 0, sizeof(struct dml2_core_shared_get_urgent_bandwidth_required_locals));
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- l->mall_svp_prefetch_factor = (state_type == dml2_core_internal_soc_state_svp_prefetch) ? (bw_type == dml2_core_internal_bw_dram ? mall_prefetch_dram_overhead_factor[k] : mall_prefetch_sdp_overhead_factor[k]) : 1.0;
- l->tmp_nom_adj_factor_p0 = (bw_type == dml2_core_internal_bw_dram ? dcc_dram_bw_nom_overhead_factor_p0[k] : 1.0) * l->mall_svp_prefetch_factor;
- l->tmp_nom_adj_factor_p1 = (bw_type == dml2_core_internal_bw_dram ? dcc_dram_bw_nom_overhead_factor_p1[k] : 1.0) * l->mall_svp_prefetch_factor;
- l->tmp_pref_adj_factor_p0 = (bw_type == dml2_core_internal_bw_dram ? dcc_dram_bw_pref_overhead_factor_p0[k] : 1.0) * l->mall_svp_prefetch_factor;
- l->tmp_pref_adj_factor_p1 = (bw_type == dml2_core_internal_bw_dram ? dcc_dram_bw_pref_overhead_factor_p1[k] : 1.0) * l->mall_svp_prefetch_factor;
-
- l->adj_factor_p0 = UrgentBurstFactorLuma[k] * l->tmp_nom_adj_factor_p0;
- l->adj_factor_p1 = UrgentBurstFactorChroma[k] * l->tmp_nom_adj_factor_p1;
- l->adj_factor_cur = UrgentBurstFactorCursor[k];
- l->adj_factor_p0_pre = UrgentBurstFactorLumaPre[k] * l->tmp_pref_adj_factor_p0;
- l->adj_factor_p1_pre = UrgentBurstFactorChromaPre[k] * l->tmp_pref_adj_factor_p1;
- l->adj_factor_cur_pre = UrgentBurstFactorCursorPre[k];
-
- // both dchub_urgent_bw_at_sdp_noflip and dchub_urgent_bw_at_dram_noflip don't include the phantom_pipe because iflips dont occur while phantom_pipe is active
- bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]);
- bool exclude_this_plane = 0;
-
- // Exclude phantom pipe in bw calculation for non svp prefetch state
- if (state_type != dml2_core_internal_soc_state_svp_prefetch && is_phantom)
- exclude_this_plane = 1;
-
- if (display_cfg->plane_descriptors[k].immediate_flip == false || !inc_flip_bw)
- l->per_plane_flip_bw[k] = NumberOfDPP[k] * (dpte_row_bw[k] + meta_row_bw[k]);
- else
- l->per_plane_flip_bw[k] = NumberOfDPP[k] * flip_bw[k];
-
-
- if (!exclude_this_plane) {
- l->required_bandwidth_mbps_this_surface = math_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
- l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur,
- l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre);
-
- l->required_bandwidth_mbps = l->required_bandwidth_mbps + l->required_bandwidth_mbps_this_surface;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
- dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
- dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
- dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
- dml2_printf("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
-
- dml2_printf("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
-
- dml2_printf("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
- dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
-
- dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
- dml2_printf("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
-#endif
- }
-
- return l->required_bandwidth_mbps;
-}
-
-static void CalculateExtraLatency(
- const struct dml2_display_cfg *display_cfg,
- unsigned int ROBBufferSizeInKByte,
- unsigned int RoundTripPingLatencyCycles,
- unsigned int ReorderingBytes,
- double DCFCLK,
- double FabricClock,
- unsigned int PixelChunkSizeInKByte,
- double ReturnBW,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- unsigned int dpte_group_bytes[],
- unsigned int tdlut_bytes_per_group[],
- double HostVMInefficiencyFactor,
- double HostVMInefficiencyFactorPrefetch,
- unsigned int HostVMMinPageSize,
- enum dml2_qos_param_type qos_type,
- bool max_oustanding_when_urgent_expected,
- unsigned int max_outstanding_requests,
- unsigned int request_size_bytes_luma[],
- unsigned int request_size_bytes_chroma[],
- unsigned int MetaChunkSize,
- unsigned int dchub_arb_to_ret_delay,
- double Ttrip,
- unsigned int hostvm_mode,
-
- // output
- double *ExtraLatency,
- double *ExtraLatency_sr,
- double *ExtraLatencyPrefetch)
-{
- double Tarb;
- double Tarb_prefetch;
-
- CalculateTarb(
- display_cfg,
- PixelChunkSizeInKByte,
- NumberOfActiveSurfaces,
- NumberOfDPP,
- dpte_group_bytes,
- tdlut_bytes_per_group,
- HostVMInefficiencyFactor,
- HostVMInefficiencyFactorPrefetch,
- HostVMMinPageSize,
- ReturnBW,
- MetaChunkSize,
- // output
- &Tarb,
- &Tarb_prefetch);
-
- unsigned int max_request_size_bytes = 0;
- double Tex_trips = (display_cfg->hostvm_enable && hostvm_mode == 1) ? (2.0 * Ttrip) : 0.0;
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (request_size_bytes_luma[k] > max_request_size_bytes)
- max_request_size_bytes = request_size_bytes_luma[k];
- if (request_size_bytes_chroma[k] > max_request_size_bytes)
- max_request_size_bytes = request_size_bytes_chroma[k];
- }
-
- if (qos_type == dml2_qos_param_type_dcn4x) {
- *ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
- *ExtraLatency = *ExtraLatency_sr;
- if (max_oustanding_when_urgent_expected)
- *ExtraLatency = *ExtraLatency + (ROBBufferSizeInKByte * 1024 - max_outstanding_requests * max_request_size_bytes) / ReturnBW;
- } else {
- *ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK + RoundTripPingLatencyCycles / FabricClock + ReorderingBytes / ReturnBW;
- *ExtraLatency = *ExtraLatency_sr;
- }
- *ExtraLatency = *ExtraLatency + Tex_trips;
- *ExtraLatencyPrefetch = *ExtraLatency + Tarb_prefetch;
- *ExtraLatency = *ExtraLatency + Tarb;
- *ExtraLatency_sr = *ExtraLatency_sr + Tarb;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
- dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
- dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
- dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
- dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
- dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
- dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
- dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
- dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
- dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
- dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
- dml2_printf("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
-#endif
-}
-
-static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculatePrefetchSchedule_params *p)
-{
- struct dml2_core_calcs_CalculatePrefetchSchedule_locals *s = &scratch->CalculatePrefetchSchedule_locals;
-
- s->NoTimeToPrefetch = false;
- s->DPPCycles = 0;
- s->DISPCLKCycles = 0;
- s->DSTTotalPixelsAfterScaler = 0.0;
- s->LineTime = 0.0;
- s->dst_y_prefetch_equ = 0.0;
- s->prefetch_bw_oto = 0.0;
- s->Tvm_oto = 0.0;
- s->Tr0_oto = 0.0;
- s->Tvm_oto_lines = 0.0;
- s->Tr0_oto_lines = 0.0;
- s->dst_y_prefetch_oto = 0.0;
- s->TimeForFetchingVM = 0.0;
- s->TimeForFetchingRowInVBlank = 0.0;
- s->LinesToRequestPrefetchPixelData = 0.0;
- s->HostVMDynamicLevelsTrips = 0;
- s->trip_to_mem = 0.0;
- *p->Tvm_trips = 0.0;
- *p->Tr0_trips = 0.0;
- s->Tvm_trips_rounded = 0.0;
- s->Tr0_trips_rounded = 0.0;
- s->max_Tsw = 0.0;
- s->Lsw_oto = 0.0;
- s->Tpre_rounded = 0.0;
- s->prefetch_bw_equ = 0.0;
- s->Tvm_equ = 0.0;
- s->Tr0_equ = 0.0;
- s->Tdmbf = 0.0;
- s->Tdmec = 0.0;
- s->Tdmsks = 0.0;
- s->prefetch_sw_bytes = 0.0;
- s->prefetch_bw_pr = 0.0;
- s->bytes_pp = 0.0;
- s->dep_bytes = 0.0;
- s->min_Lsw_oto = 0.0;
- s->Tsw_est1 = 0.0;
- s->Tsw_est3 = 0.0;
- s->cursor_prefetch_bytes = 0;
- *p->prefetch_cursor_bw = 0;
- bool dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
-
- s->TWait_p = p->TWait - p->Ttrip; // TWait includes max(Turg, Ttrip)
-
- if (p->display_cfg->gpuvm_enable == true && p->display_cfg->hostvm_enable == true) {
- s->HostVMDynamicLevelsTrips = p->display_cfg->hostvm_max_non_cached_page_table_levels;
- } else {
- s->HostVMDynamicLevelsTrips = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
- dml2_printf("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
- dml2_printf("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
- dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: MaxVStartup = %u\n", __func__, p->MaxVStartup);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
-#endif
- CalculateVUpdateAndDynamicMetadataParameters(
- p->MaxInterDCNTileRepeaters,
- p->myPipe->Dppclk,
- p->myPipe->Dispclk,
- p->myPipe->DCFClkDeepSleep,
- p->myPipe->PixelClock,
- p->myPipe->HTotal,
- p->myPipe->VBlank,
- p->DynamicMetadataTransmittedBytes,
- p->DynamicMetadataLinesBeforeActiveRequired,
- p->myPipe->InterlaceEnable,
- p->myPipe->ProgressiveToInterlaceUnitInOPP,
- p->TSetup,
-
- // Output
- &s->Tdmbf,
- &s->Tdmec,
- &s->Tdmsks,
- p->VUpdateOffsetPix,
- p->VUpdateWidthPix,
- p->VReadyOffsetPix);
-
- s->LineTime = p->myPipe->HTotal / p->myPipe->PixelClock;
- s->trip_to_mem = p->Ttrip;
- *p->Tvm_trips = p->ExtraLatencyPrefetch + s->trip_to_mem * (p->display_cfg->gpuvm_max_page_table_levels * (s->HostVMDynamicLevelsTrips + 1));
- if (dcc_mrq_enable)
- *p->Tvm_trips_flip = *p->Tvm_trips;
- else
- *p->Tvm_trips_flip = *p->Tvm_trips - s->trip_to_mem;
- *p->Tr0_trips_flip = s->trip_to_mem * (s->HostVMDynamicLevelsTrips + 1);
- *p->Tr0_trips = math_max2(*p->Tr0_trips_flip, p->tdlut_opt_time / 2);
-
- if (p->DynamicMetadataVMEnabled == true) {
- *p->Tdmdl_vm = s->TWait_p + *p->Tvm_trips;
- *p->Tdmdl = *p->Tdmdl_vm + p->Ttrip;
- } else {
- *p->Tdmdl_vm = 0;
- *p->Tdmdl = p->TWait + p->ExtraLatencyPrefetch; // Tex
- }
-
- if (p->DynamicMetadataEnable == true) {
- if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
- *p->NotEnoughTimeForDynamicMetadata = true;
- dml2_printf("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- } else {
- *p->NotEnoughTimeForDynamicMetadata = false;
- }
- } else {
- *p->NotEnoughTimeForDynamicMetadata = false;
- }
-
- if (p->myPipe->ScalerEnabled)
- s->DPPCycles = (unsigned int)(p->DPPCLKDelaySubtotalPlusCNVCFormater + p->DPPCLKDelaySCL);
- else
- s->DPPCycles = (unsigned int)(p->DPPCLKDelaySubtotalPlusCNVCFormater + p->DPPCLKDelaySCLLBOnly);
-
- s->DPPCycles = (unsigned int)(s->DPPCycles + p->myPipe->NumberOfCursors * p->DPPCLKDelayCNVCCursor);
-
- s->DISPCLKCycles = (unsigned int)p->DISPCLKDelaySubtotal;
-
- if (p->myPipe->Dppclk == 0.0 || p->myPipe->Dispclk == 0.0)
- return true;
-
- *p->DSTXAfterScaler = (unsigned int)math_round(s->DPPCycles * p->myPipe->PixelClock / p->myPipe->Dppclk + s->DISPCLKCycles * p->myPipe->PixelClock / p->myPipe->Dispclk + p->DSCDelay);
- *p->DSTXAfterScaler = (unsigned int)math_round(*p->DSTXAfterScaler + (p->myPipe->ODMMode != dml2_odm_mode_bypass ? 18 : 0) + (p->myPipe->DPPPerSurface - 1) * p->DPP_RECOUT_WIDTH +
- ((p->myPipe->ODMMode == dml2_odm_mode_split_1to2 || p->myPipe->ODMMode == dml2_odm_mode_mso_1to2) ? (double)p->myPipe->HActive / 2.0 : 0) +
- ((p->myPipe->ODMMode == dml2_odm_mode_mso_1to4) ? (double)p->myPipe->HActive * 3.0 / 4.0 : 0));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
- dml2_printf("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
- dml2_printf("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
- dml2_printf("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
- dml2_printf("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
- dml2_printf("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
-
- dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
-#endif
-
- if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
- *p->DSTYAfterScaler = 1;
- else
- *p->DSTYAfterScaler = 0;
-
- s->DSTTotalPixelsAfterScaler = *p->DSTYAfterScaler * p->myPipe->HTotal + *p->DSTXAfterScaler;
- *p->DSTYAfterScaler = (unsigned int)(math_floor2(s->DSTTotalPixelsAfterScaler / p->myPipe->HTotal, 1));
- *p->DSTXAfterScaler = (unsigned int)(s->DSTTotalPixelsAfterScaler - ((double)(*p->DSTYAfterScaler * p->myPipe->HTotal)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
-#endif
-
- s->NoTimeToPrefetch = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
-#endif
- if (p->display_cfg->gpuvm_enable) {
- s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
- *p->Tvm_trips_flip_rounded = math_ceil2(4.0 * *p->Tvm_trips_flip / s->LineTime, 1.0) / 4.0 * s->LineTime;
- } else {
- s->Tvm_trips_rounded = s->LineTime / 4.0;
- *p->Tvm_trips_flip_rounded = s->LineTime / 4.0;
- }
- s->Tvm_trips_rounded = math_max2(s->Tvm_trips_rounded, s->LineTime / 4.0);
- *p->Tvm_trips_flip_rounded = math_max2(*p->Tvm_trips_flip_rounded, s->LineTime / 4.0);
-
- if (p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable) {
- s->Tr0_trips_rounded = math_ceil2(4.0 * *p->Tr0_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
- *p->Tr0_trips_flip_rounded = math_ceil2(4.0 * *p->Tr0_trips_flip / s->LineTime, 1.0) / 4.0 * s->LineTime;
- } else {
- s->Tr0_trips_rounded = s->LineTime / 4.0;
- *p->Tr0_trips_flip_rounded = s->LineTime / 4.0;
- }
- s->Tr0_trips_rounded = math_max2(s->Tr0_trips_rounded, s->LineTime / 4.0);
- *p->Tr0_trips_flip_rounded = math_max2(*p->Tr0_trips_flip_rounded, s->LineTime / 4.0);
-
- *p->Tno_bw_flip = 0;
- if (p->display_cfg->gpuvm_enable == true) {
- if (p->display_cfg->gpuvm_max_page_table_levels >= 3) {
- *p->Tno_bw = p->ExtraLatencyPrefetch + s->trip_to_mem * (double)((p->display_cfg->gpuvm_max_page_table_levels - 2) * (s->HostVMDynamicLevelsTrips + 1));
- } else if (p->display_cfg->gpuvm_max_page_table_levels == 1 && !dcc_mrq_enable && !p->setup_for_tdlut) {
- *p->Tno_bw = p->ExtraLatencyPrefetch;
- } else {
- *p->Tno_bw = 0;
- }
- *p->Tno_bw_flip = *p->Tno_bw;
- } else {
- *p->Tno_bw = 0;
- }
-
- if (dml2_core_shared_is_420(p->myPipe->SourcePixelFormat)) {
- s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC / 4.0;
- } else {
- s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC;
- }
-
- s->prefetch_bw_pr = s->bytes_pp * p->myPipe->PixelClock / (double)p->myPipe->DPPPerSurface;
- if (p->myPipe->VRatio < 1.0)
- s->prefetch_bw_pr = p->myPipe->VRatio * s->prefetch_bw_pr;
- s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
-
- s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
- s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
-
- s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
-
- unsigned int vm_bytes = p->vm_bytes; // vm_bytes is dpde0_bytes_per_frame_ub_l + dpde0_bytes_per_frame_ub_c + 2*extra_dpde_bytes;
- unsigned int extra_tdpe_bytes = (unsigned int)math_max2(0, (p->display_cfg->gpuvm_max_page_table_levels - 1) * 128);
-
- if (p->setup_for_tdlut)
- vm_bytes = vm_bytes + p->tdlut_pte_bytes_per_frame + (p->display_cfg->gpuvm_enable ? extra_tdpe_bytes : 0);
-
- unsigned long tdlut_row_bytes = (unsigned long) math_ceil2(p->tdlut_bytes_per_frame/2.0, 1.0);
- s->prefetch_bw_oto = math_max3(s->prefetch_bw_oto,
- p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
- s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0;
-
- if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_oto = math_max3(
- *p->Tvm_trips,
- *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto,
- s->LineTime / 4.0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4);
-#endif
-
- } else
- s->Tvm_oto = s->LineTime / 4.0;
-
- if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) {
- s->Tr0_oto = math_max3(
- *p->Tr0_trips,
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
- s->LineTime / 4.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
-#endif
- } else
- s->Tr0_oto = (s->LineTime - s->Tvm_oto) / 4.0;
-
- s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0;
- s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0;
- s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
-
- //To (time for delay after scaler) in line time
- unsigned int Lo = (unsigned int)(*p->DSTYAfterScaler + (double)*p->DSTXAfterScaler / (double)p->myPipe->HTotal);
-
- //Tpre_equ in line time
- s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(s->TWait_p + p->TCalc, *p->Tdmdl - p->Ttrip)) / s->LineTime - Lo;
- s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, s->prefetch_sw_bytes);
- dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
- dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
- dml2_printf("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
- dml2_printf("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
- dml2_printf("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
- dml2_printf("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
- dml2_printf("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
- dml2_printf("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
- dml2_printf("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
-#endif
-
- s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- s->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
-
- dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
- dml2_printf("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
- dml2_printf("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
- dml2_printf("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- dml2_printf("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
-
- s->dep_bytes = math_max2(vm_bytes * p->HostVMInefficiencyFactor, p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
-
- dml2_printf("DML::%s: dep_bytes: %f\n", __func__, s->dep_bytes);
- dml2_printf("DML::%s: prefetch_sw_bytes: %f\n", __func__, s->prefetch_sw_bytes);
- dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes * p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
-
- if (s->prefetch_sw_bytes < s->dep_bytes) {
- s->prefetch_sw_bytes = 2 * s->dep_bytes;
- dml2_printf("DML::%s: bump prefetch_sw_bytes to %f\n", __func__, s->prefetch_sw_bytes);
- }
-
- *p->dst_y_per_vm_vblank = 0;
- *p->dst_y_per_row_vblank = 0;
- *p->VRatioPrefetchY = 0;
- *p->VRatioPrefetchC = 0;
- *p->RequiredPrefetchPixelDataBWLuma = 0;
-
- if (s->dst_y_prefetch_equ > 1) {
- s->prefetch_bw1 = 0.;
- s->prefetch_bw2 = 0.;
- s->prefetch_bw3 = 0.;
- s->prefetch_bw4 = 0.;
-
- if (s->Tpre_rounded - *p->Tno_bw > 0) {
- s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor
- + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)
- + s->prefetch_sw_bytes)
- / (s->Tpre_rounded - *p->Tno_bw);
- s->Tsw_est1 = s->prefetch_sw_bytes / s->prefetch_bw1;
- } else
- s->prefetch_bw1 = 0;
-
- dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
- if (p->VStartup == p->MaxVStartup && (s->Tsw_est1 / s->LineTime < s->min_Lsw_oto) && s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0) {
- s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
- (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
- dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
- }
-
- if (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded > 0)
- s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded);
- else
- s->prefetch_bw2 = 0;
-
- if (s->Tpre_rounded - s->Tvm_trips_rounded > 0) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - s->Tvm_trips_rounded);
- s->Tsw_est3 = s->prefetch_sw_bytes / s->prefetch_bw3;
- } else
- s->prefetch_bw3 = 0;
-
-
- dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
- if (p->VStartup == p->MaxVStartup && (s->Tsw_est3 / s->LineTime < s->min_Lsw_oto) && s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded > 0) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
- dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
- }
-
- if (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded > 0)
- s->prefetch_bw4 = s->prefetch_sw_bytes / (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded);
- else
- s->prefetch_bw4 = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tpre_rounded: %f\n", __func__, s->Tpre_rounded);
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tvm_trips_rounded: %f\n", __func__, s->Tvm_trips_rounded);
- dml2_printf("DML::%s: Tr0_trips_rounded: %f\n", __func__, 2 * s->Tr0_trips_rounded);
- dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
- dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
- dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
- dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
- dml2_printf("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
- dml2_printf("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
-#endif
-
- {
- bool Case1OK = false;
- bool Case2OK = false;
- bool Case3OK = false;
-
- if (s->prefetch_bw1 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1 >= s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw1 >= s->Tr0_trips_rounded) {
- Case1OK = true;
- }
- }
-
- if (s->prefetch_bw2 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2 >= s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw2 < s->Tr0_trips_rounded) {
- Case2OK = true;
- }
- }
-
- if (s->prefetch_bw3 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3 < s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw3 >= s->Tr0_trips_rounded) {
- Case3OK = true;
- }
- }
-
- if (Case1OK) {
- s->prefetch_bw_equ = s->prefetch_bw1;
- } else if (Case2OK) {
- s->prefetch_bw_equ = s->prefetch_bw2;
- } else if (Case3OK) {
- s->prefetch_bw_equ = s->prefetch_bw3;
- } else {
- s->prefetch_bw_equ = s->prefetch_bw4;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
- dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
- dml2_printf("DML::%s: Case3OK: %u\n", __func__, Case3OK);
- dml2_printf("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
-#endif
- s->prefetch_bw_equ = math_max3(s->prefetch_bw_equ,
- p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
-
- if (s->prefetch_bw_equ > 0) {
- if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_equ = math_max3(*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_equ, *p->Tvm_trips, s->LineTime / 4);
- } else {
- s->Tvm_equ = s->LineTime / 4;
- }
-
- if (p->display_cfg->gpuvm_enable == true || dcc_mrq_enable || p->setup_for_tdlut) {
- s->Tr0_equ = math_max3((p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_equ, // PixelPTEBytesPerRow is dpte_row_bytes
- *p->Tr0_trips,
- s->LineTime / 4);
- } else {
- s->Tr0_equ = s->LineTime / 4;
- }
- } else {
- s->Tvm_equ = 0;
- s->Tr0_equ = 0;
- dml2_printf("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
- dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
-#endif
-
- if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
- *p->dst_y_prefetch = s->dst_y_prefetch_oto;
- s->TimeForFetchingVM = s->Tvm_oto;
- s->TimeForFetchingRowInVBlank = s->Tr0_oto;
-
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using oto bw scheduling for prefetch\n", __func__);
-#endif
-
- } else {
- *p->dst_y_prefetch = s->dst_y_prefetch_equ;
- s->TimeForFetchingVM = s->Tvm_equ;
- s->TimeForFetchingRowInVBlank = s->Tr0_equ;
-
- if (p->VStartup == p->MaxVStartup) {
- *p->dst_y_per_vm_vblank = math_floor2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_floor2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- } else {
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
-#endif
- }
- dml2_assert(*p->dst_y_prefetch < 64);
-
- // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
- s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw
-
- s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
- *p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
- dml2_printf("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
-
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
- dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
-#endif
- unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime);
-
- if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) {
- *p->VRatioPrefetchY = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData;
- *p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY, 1.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
- dml2_printf("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
-#endif
- if ((p->SwathHeightY > 4) && (p->VInitPreFillY > 3)) {
- if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillY - 3.0) / 2.0) {
- *p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY,
- (double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
- } else {
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
- *p->VRatioPrefetchY = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
-#endif
- }
-
- *p->VRatioPrefetchC = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData;
- *p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, 1.0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
- dml2_printf("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
-#endif
- if ((p->SwathHeightC > 4) && (p->VInitPreFillC > 3)) {
- if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillC - 3.0) / 2.0) {
- *p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
- } else {
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
- *p->VRatioPrefetchC = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
-#endif
- }
-
- *p->RequiredPrefetchPixelDataBWLuma = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelY * p->swath_width_luma_ub / s->LineTime;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
-#endif
- *p->RequiredPrefetchPixelDataBWChroma = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelC * p->swath_width_chroma_ub / s->LineTime;
- } else {
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: MyErr set, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
- *p->VRatioPrefetchY = 0;
- *p->VRatioPrefetchC = 0;
- *p->RequiredPrefetchPixelDataBWLuma = 0;
- *p->RequiredPrefetchPixelDataBWChroma = 0;
- }
-
- dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
- dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
- dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
- dml2_printf("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
- dml2_printf("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
- dml2_printf("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
- dml2_printf("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
- dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
-
- } else {
- dml2_printf("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- s->NoTimeToPrefetch = true;
- s->TimeForFetchingVM = 0;
- s->TimeForFetchingRowInVBlank = 0;
- *p->dst_y_per_vm_vblank = 0;
- *p->dst_y_per_row_vblank = 0;
- s->LinesToRequestPrefetchPixelData = 0;
- *p->VRatioPrefetchY = 0;
- *p->VRatioPrefetchC = 0;
- *p->RequiredPrefetchPixelDataBWLuma = 0;
- *p->RequiredPrefetchPixelDataBWChroma = 0;
- }
-
- {
- double prefetch_vm_bw;
- double prefetch_row_bw;
-
- if (vm_bytes == 0) {
- prefetch_vm_bw = 0;
- } else if (*p->dst_y_per_vm_vblank > 0) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
-#endif
- prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
-#endif
- } else {
- prefetch_vm_bw = 0;
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
- }
-
- if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
- prefetch_row_bw = 0;
- } else if (*p->dst_y_per_row_vblank > 0) {
- prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
-#endif
- } else {
- prefetch_row_bw = 0;
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
- }
-
- *p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
- }
-
- if (s->NoTimeToPrefetch) {
- s->TimeForFetchingVM = 0;
- s->TimeForFetchingRowInVBlank = 0;
- *p->dst_y_per_vm_vblank = 0;
- *p->dst_y_per_row_vblank = 0;
- *p->dst_y_prefetch = 0;
- s->LinesToRequestPrefetchPixelData = 0;
- *p->VRatioPrefetchY = 0;
- *p->VRatioPrefetchC = 0;
- *p->RequiredPrefetchPixelDataBWLuma = 0;
- *p->RequiredPrefetchPixelDataBWChroma = 0;
- }
-
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
- return s->NoTimeToPrefetch;
-}
-
-static void calculate_peak_bandwidth_required(
- struct dml2_core_internal_scratch *s,
-
- // output
- double urg_vactive_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int inc_flip_bw,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double dcc_dram_bw_pref_overhead_factor_p0[],
- double dcc_dram_bw_pref_overhead_factor_p1[],
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double PrefetchBandwidthLuma[],
- double PrefetchBandwidthChroma[],
- double cursor_bw[],
- double dpte_row_bw[],
- double meta_row_bw[],
- double prefetch_cursor_bw[],
- double prefetch_vmrow_bw[],
- double flip_bw[],
- double UrgentBurstFactorLuma[],
- double UrgentBurstFactorChroma[],
- double UrgentBurstFactorCursor[],
- double UrgentBurstFactorLumaPre[],
- double UrgentBurstFactorChromaPre[],
- double UrgentBurstFactorCursorPre[])
-{
- unsigned int n;
- unsigned int m;
-
- struct dml2_core_shared_calculate_peak_bandwidth_required_locals *l = &s->calculate_peak_bandwidth_required_locals;
-
- memset(l, 0, sizeof(struct dml2_core_shared_calculate_peak_bandwidth_required_locals));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: inc_flip_bw = %d\n", __func__, inc_flip_bw);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, NumberOfActiveSurfaces);
-#endif
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- l->unity_array[k] = 1.0;
- l->zero_array[k] = 0.0;
- }
-
- for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (n = 0; n < dml2_core_internal_bw_max; n++) {
- urg_vactive_bandwidth_required[m][n] = get_urgent_bandwidth_required(
- &s->get_urgent_bandwidth_required_locals,
- display_cfg,
- m,
- n,
- 0, //inc_flip_bw,
- NumberOfActiveSurfaces,
- NumberOfDPP,
- dcc_dram_bw_nom_overhead_factor_p0,
- dcc_dram_bw_nom_overhead_factor_p1,
- dcc_dram_bw_pref_overhead_factor_p0,
- dcc_dram_bw_pref_overhead_factor_p1,
- mall_prefetch_sdp_overhead_factor,
- mall_prefetch_dram_overhead_factor,
- ReadBandwidthLuma,
- ReadBandwidthChroma,
- l->zero_array, //PrefetchBandwidthLuma,
- l->zero_array, //PrefetchBandwidthChroma,
- cursor_bw,
- dpte_row_bw,
- meta_row_bw,
- l->zero_array, //prefetch_cursor_bw,
- l->zero_array, //prefetch_vmrow_bw,
- l->zero_array, //flip_bw,
- UrgentBurstFactorLuma,
- UrgentBurstFactorChroma,
- UrgentBurstFactorCursor,
- UrgentBurstFactorLumaPre,
- UrgentBurstFactorChromaPre,
- UrgentBurstFactorCursorPre);
-
-
- urg_bandwidth_required[m][n] = get_urgent_bandwidth_required(
- &s->get_urgent_bandwidth_required_locals,
- display_cfg,
- m,
- n,
- inc_flip_bw,
- NumberOfActiveSurfaces,
- NumberOfDPP,
- dcc_dram_bw_nom_overhead_factor_p0,
- dcc_dram_bw_nom_overhead_factor_p1,
- dcc_dram_bw_pref_overhead_factor_p0,
- dcc_dram_bw_pref_overhead_factor_p1,
- mall_prefetch_sdp_overhead_factor,
- mall_prefetch_dram_overhead_factor,
- ReadBandwidthLuma,
- ReadBandwidthChroma,
- PrefetchBandwidthLuma,
- PrefetchBandwidthChroma,
- cursor_bw,
- dpte_row_bw,
- meta_row_bw,
- prefetch_cursor_bw,
- prefetch_vmrow_bw,
- flip_bw,
- UrgentBurstFactorLuma,
- UrgentBurstFactorChroma,
- UrgentBurstFactorCursor,
- UrgentBurstFactorLumaPre,
- UrgentBurstFactorChromaPre,
- UrgentBurstFactorCursorPre);
-
- non_urg_bandwidth_required[m][n] = get_urgent_bandwidth_required(
- &s->get_urgent_bandwidth_required_locals,
- display_cfg,
- m,
- n,
- inc_flip_bw,
- NumberOfActiveSurfaces,
- NumberOfDPP,
- dcc_dram_bw_nom_overhead_factor_p0,
- dcc_dram_bw_nom_overhead_factor_p1,
- dcc_dram_bw_pref_overhead_factor_p0,
- dcc_dram_bw_pref_overhead_factor_p1,
- mall_prefetch_sdp_overhead_factor,
- mall_prefetch_dram_overhead_factor,
- ReadBandwidthLuma,
- ReadBandwidthChroma,
- PrefetchBandwidthLuma,
- PrefetchBandwidthChroma,
- cursor_bw,
- dpte_row_bw,
- meta_row_bw,
- prefetch_cursor_bw,
- prefetch_vmrow_bw,
- flip_bw,
- l->unity_array,
- l->unity_array,
- l->unity_array,
- l->unity_array,
- l->unity_array,
- l->unity_array);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_vactive_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), non_urg_bandwidth_required[m][n]);
-#endif
- dml2_assert(urg_bandwidth_required[m][n] >= non_urg_bandwidth_required[m][n]);
- }
- }
-}
-
-static void check_urgent_bandwidth_support(
- double *frac_urg_bandwidth_nom,
- double *frac_urg_bandwidth_mall,
- bool *vactive_bandwidth_support_ok, // vactive ok
- bool *bandwidth_support_ok, // max of vm, prefetch, vactive all ok
-
- unsigned int mall_allocated_for_dcn_mbytes,
- double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_vactive_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max])
-{
- *bandwidth_support_ok = 1;
- *vactive_bandwidth_support_ok = 1;
-
- double frac_urg_bandwidth_nom_sdp = non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] / urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- double frac_urg_bandwidth_nom_dram = non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] / urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- double frac_urg_bandwidth_mall_sdp = non_urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] / urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- double frac_urg_bandwidth_mall_dram = non_urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] / urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- // Check urgent bandwidth required at sdp vs urgent bandwidth avail at sdp -> FractionOfUrgentBandwidth
- // Check urgent bandwidth required at dram vs urgent bandwidth avail at dram
- // Check urgent bandwidth required at sdp vs urgent bandwidth avail at sdp, svp_prefetch -> FractionOfUrgentBandwidthMALL
- // Check urgent bandwidth required at dram vs urgent bandwidth avail at dram, svp_prefetch
-
- *bandwidth_support_ok &= urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] <= urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- *bandwidth_support_ok &= urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] <= urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
-
- if (mall_allocated_for_dcn_mbytes > 0) {
- *bandwidth_support_ok &= urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] <= urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- *bandwidth_support_ok &= urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] <= urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
- }
-
- *frac_urg_bandwidth_nom = math_max2(frac_urg_bandwidth_nom_sdp, frac_urg_bandwidth_nom_dram);
- *frac_urg_bandwidth_mall = math_max2(frac_urg_bandwidth_mall_sdp, frac_urg_bandwidth_mall_dram);
-
- *bandwidth_support_ok &= (*frac_urg_bandwidth_nom <= 1.0);
-
- if (mall_allocated_for_dcn_mbytes > 0)
- *bandwidth_support_ok &= (*frac_urg_bandwidth_mall <= 1.0);
-
- *vactive_bandwidth_support_ok &= urg_vactive_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] <= urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- *vactive_bandwidth_support_ok &= urg_vactive_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] <= urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- if (mall_allocated_for_dcn_mbytes > 0) {
- *vactive_bandwidth_support_ok &= urg_vactive_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] <= urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- *vactive_bandwidth_support_ok &= urg_vactive_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] <= urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
-
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
- dml2_printf("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
-#endif
-
-}
-
-static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal_soc_state_type eval_state,
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max], // no flip
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max])
-{
- double flip_bw_available_mbps;
- double flip_bw_available_sdp_mbps;
- double flip_bw_available_dram_mbps;
-
- flip_bw_available_sdp_mbps = urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp] - urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp];
- flip_bw_available_dram_mbps = urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram] - urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram];
- flip_bw_available_mbps = flip_bw_available_sdp_mbps < flip_bw_available_dram_mbps ? flip_bw_available_sdp_mbps : flip_bw_available_dram_mbps;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
- dml2_printf("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
- dml2_printf("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
-#endif
-
- return flip_bw_available_mbps;
-}
-
-static void calculate_immediate_flip_bandwidth_support(
- // Output
- double *frac_urg_bandwidth_flip,
- bool *flip_bandwidth_support_ok,
-
- // Input
- enum dml2_core_internal_soc_state_type eval_state,
- double urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max])
-{
- double frac_urg_bw_flip_sdp = non_urg_bandwidth_required_flip[eval_state][dml2_core_internal_bw_sdp] / urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp];
- double frac_urg_bw_flip_dram = non_urg_bandwidth_required_flip[eval_state][dml2_core_internal_bw_dram] / urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram];
-
- *flip_bandwidth_support_ok = true;
- for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) { // check sdp and dram
- *flip_bandwidth_support_ok &= urg_bandwidth_available[eval_state][n] >= urg_bandwidth_required_flip[eval_state][n];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str((enum dml2_core_internal_bw_type) eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
-#endif
- dml2_assert(urg_bandwidth_required_flip[eval_state][n] > non_urg_bandwidth_required_flip[eval_state][n]);
- }
-
- *frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram;
- *flip_bandwidth_support_ok &= (*frac_urg_bandwidth_flip <= 1);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
- dml2_printf("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
-
- for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
- __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
- urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required_flip[m][n]) ? "<" : ">=", urg_bandwidth_required_flip[m][n]);
- }
- }
-#endif
-}
-
-static void CalculateFlipSchedule(
- struct dml2_core_internal_scratch *s,
- bool iflip_enable,
- bool use_lb_flip_bw,
- double HostVMInefficiencyFactor,
- double Tvm_trips_flip,
- double Tr0_trips_flip,
- double Tvm_trips_flip_rounded,
- double Tr0_trips_flip_rounded,
- bool GPUVMEnable,
- double vm_bytes, // vm_bytes
- double DPTEBytesPerRow, // dpte_row_bytes
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum dml2_source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw_flip,
- unsigned int dpte_row_height,
- unsigned int dpte_row_height_chroma,
- bool use_one_row_for_frame_flip,
- unsigned int max_flip_time_us,
- unsigned int per_pipe_flip_bytes,
- unsigned int meta_row_bytes,
- unsigned int meta_row_height,
- unsigned int meta_row_height_chroma,
- bool dcc_mrq_enable,
-
- // Output
- double *dst_y_per_vm_flip,
- double *dst_y_per_row_flip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
-{
- struct dml2_core_shared_CalculateFlipSchedule_locals *l = &s->CalculateFlipSchedule_locals;
-
- l->dual_plane = dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha;
- l->dpte_row_bytes = DPTEBytesPerRow;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
- dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
- dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
- dml2_printf("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
- dml2_printf("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
- dml2_printf("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
- dml2_printf("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
- dml2_printf("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
- dml2_printf("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
- dml2_printf("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
-#endif
-
- if (TotImmediateFlipBytes > 0 && (GPUVMEnable || dcc_mrq_enable)) {
- if (l->dual_plane) {
- if (dcc_mrq_enable & GPUVMEnable) {
- l->min_row_height = math_min2(dpte_row_height, meta_row_height);
- l->min_row_height_chroma = math_min2(dpte_row_height_chroma, meta_row_height_chroma);
- } else if (GPUVMEnable) {
- l->min_row_height = dpte_row_height;
- l->min_row_height_chroma = dpte_row_height_chroma;
- } else {
- l->min_row_height = meta_row_height;
- l->min_row_height_chroma = meta_row_height_chroma;
- }
- l->min_row_time = math_min2(l->min_row_height * LineTime / VRatio, l->min_row_height_chroma * LineTime / VRatioChroma);
- } else {
- if (dcc_mrq_enable & GPUVMEnable)
- l->min_row_height = math_min2(dpte_row_height, meta_row_height);
- else if (GPUVMEnable)
- l->min_row_height = dpte_row_height;
- else
- l->min_row_height = meta_row_height;
-
- l->min_row_time = l->min_row_height * LineTime / VRatio;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
-#endif
- dml2_assert(l->min_row_time > 0);
-
- if (use_lb_flip_bw) {
- // For mode check, calculation the flip bw requirement with worst case flip time
- l->max_flip_time = math_min2(l->min_row_time, math_max2(Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded, (double)max_flip_time_us));
-
- //The lower bound on flip bandwidth
- // Note: The get_urgent_bandwidth_required already consider dpte_row_bw and meta_row_bw in bandwidth calculation, so leave final_flip_bw = 0 if iflip not required
- l->lb_flip_bw = 0;
-
- if (iflip_enable) {
- l->hvm_scaled_vm_bytes = vm_bytes * HostVMInefficiencyFactor;
- l->num_rows = 2;
- l->hvm_scaled_row_bytes = (l->num_rows * l->dpte_row_bytes * HostVMInefficiencyFactor + l->num_rows * meta_row_bytes);
- l->hvm_scaled_vm_row_bytes = l->hvm_scaled_vm_bytes + l->hvm_scaled_row_bytes;
- l->lb_flip_bw = math_max3(
- l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip),
- l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded),
- l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_row_bytes);
- dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
- dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
- dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
- dml2_printf("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
-
- if (l->lb_flip_bw > 0) {
- dml2_printf("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
- dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
- dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
- dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
- }
-#endif
- l->lb_flip_bw = math_max3(l->lb_flip_bw,
- l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip,
- (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
- dml2_printf("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
-#endif
- }
-
- *final_flip_bw = l->lb_flip_bw;
-
- *dst_y_per_vm_flip = 1; // not used
- *dst_y_per_row_flip = 1; // not used
- *ImmediateFlipSupportedForPipe = true;
- } else {
- if (iflip_enable) {
- l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
-#endif
- if (l->ImmediateFlipBW == 0) {
- l->Tvm_flip = 0;
- l->Tr0_flip = 0;
- } else {
- l->Tvm_flip = math_max3(Tvm_trips_flip,
- Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW,
- LineTime / 4.0);
-
- l->Tr0_flip = math_max3(Tr0_trips_flip,
- (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW,
- LineTime / 4.0);
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
-
- dml2_printf("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
-#endif
- *dst_y_per_vm_flip = math_ceil2(4.0 * (l->Tvm_flip / LineTime), 1.0) / 4.0;
- *dst_y_per_row_flip = math_ceil2(4.0 * (l->Tr0_flip / LineTime), 1.0) / 4.0;
-
- *final_flip_bw = math_max2(vm_bytes * HostVMInefficiencyFactor / (*dst_y_per_vm_flip * LineTime),
- (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (*dst_y_per_row_flip * LineTime));
-
- if (*dst_y_per_vm_flip >= 32 || *dst_y_per_row_flip >= 16 || l->Tvm_flip + 2 * l->Tr0_flip > l->min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
- } else {
- *ImmediateFlipSupportedForPipe = iflip_enable;
- }
- } else {
- l->Tvm_flip = 0;
- l->Tr0_flip = 0;
- *dst_y_per_vm_flip = 0;
- *dst_y_per_row_flip = 0;
- *final_flip_bw = 0;
- *ImmediateFlipSupportedForPipe = iflip_enable;
- }
- }
- } else {
- l->Tvm_flip = 0;
- l->Tr0_flip = 0;
- *dst_y_per_vm_flip = 0;
- *dst_y_per_row_flip = 0;
- *final_flip_bw = 0;
- *ImmediateFlipSupportedForPipe = iflip_enable;
- }
-
-#ifdef __DML_VBA_DEBUG__
- if (!use_lb_flip_bw) {
- dml2_printf("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
- dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
- dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
- }
- dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
- dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
-#endif
-}
-
-static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *p)
-{
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals *s = &scratch->CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals;
-
- s->TotalActiveWriteback = 0;
- p->Watermark->UrgentWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
-#endif
-
- p->Watermark->USRRetrainingWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency + p->mmSOCParameters.USRRetrainingLatency + p->mmSOCParameters.SMNLatency;
- p->Watermark->DRAMClockChangeWatermark = p->mmSOCParameters.DRAMClockChangeLatency + p->Watermark->UrgentWatermark;
- p->Watermark->FCLKChangeWatermark = p->mmSOCParameters.FCLKChangeLatency + p->Watermark->UrgentWatermark;
- p->Watermark->StutterExitWatermark = p->mmSOCParameters.SRExitTime + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
- p->Watermark->StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitTime + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
- p->Watermark->Z8StutterExitWatermark = p->mmSOCParameters.SRExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
- p->Watermark->Z8StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
- dml2_printf("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
- dml2_printf("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
- dml2_printf("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
- dml2_printf("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
- dml2_printf("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
- dml2_printf("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
- dml2_printf("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
- dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
-#endif
-
- s->TotalActiveWriteback = 0;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- s->TotalActiveWriteback = s->TotalActiveWriteback + 1;
- }
- }
-
- if (s->TotalActiveWriteback <= 1) {
- p->Watermark->WritebackUrgentWatermark = p->mmSOCParameters.WritebackLatency;
- } else {
- p->Watermark->WritebackUrgentWatermark = p->mmSOCParameters.WritebackLatency + p->WritebackChunkSize * 1024.0 / 32.0 / p->SOCCLK;
- }
- if (p->USRRetrainingRequired)
- p->Watermark->WritebackUrgentWatermark = p->Watermark->WritebackUrgentWatermark + p->mmSOCParameters.USRRetrainingLatency;
-
- if (s->TotalActiveWriteback <= 1) {
- p->Watermark->WritebackDRAMClockChangeWatermark = p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.WritebackLatency;
- p->Watermark->WritebackFCLKChangeWatermark = p->mmSOCParameters.FCLKChangeLatency + p->mmSOCParameters.WritebackLatency;
- } else {
- p->Watermark->WritebackDRAMClockChangeWatermark = p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.WritebackLatency + p->WritebackChunkSize * 1024.0 / 32.0 / p->SOCCLK;
- p->Watermark->WritebackFCLKChangeWatermark = p->mmSOCParameters.FCLKChangeLatency + p->mmSOCParameters.WritebackLatency + p->WritebackChunkSize * 1024 / 32 / p->SOCCLK;
- }
-
- if (p->USRRetrainingRequired)
- p->Watermark->WritebackDRAMClockChangeWatermark = p->Watermark->WritebackDRAMClockChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
-
- if (p->USRRetrainingRequired)
- p->Watermark->WritebackFCLKChangeWatermark = p->Watermark->WritebackFCLKChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
- dml2_printf("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
- dml2_printf("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
- dml2_printf("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
-#endif
-
- s->TotalPixelBW = 0.0;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- double h_total = (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- double pixel_clock_mhz = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000.0;
- double v_ratio = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- double v_ratio_c = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
-
- s->TotalPixelBW = s->TotalPixelBW + p->DPPPerSurface[k]
- * (p->SwathWidthY[k] * p->BytePerPixelDETY[k] * v_ratio + p->SwathWidthC[k] * p->BytePerPixelDETC[k] * v_ratio_c) / (h_total / pixel_clock_mhz);
- }
-
- *p->global_fclk_change_supported = true;
- *p->global_dram_clock_change_supported = true;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- double h_total = (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- double pixel_clock_mhz = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000.0;
- double v_ratio = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- double v_ratio_c = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- double v_taps = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- double v_taps_c = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- double h_ratio = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio;
- double h_ratio_c = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio;
- double LBBitPerPixel = 57;
-
- s->LBLatencyHidingSourceLinesY[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthY[k] / math_max2(h_ratio, 1.0)), 1)) - (v_taps - 1));
- s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines= %u\n", __func__, k, p->MaxLineBufferLines);
- dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
- dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
- dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
- dml2_printf("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
-#endif
-
- s->EffectiveLBLatencyHidingY = s->LBLatencyHidingSourceLinesY[k] / v_ratio * (h_total / pixel_clock_mhz);
- s->EffectiveLBLatencyHidingC = s->LBLatencyHidingSourceLinesC[k] / v_ratio_c * (h_total / pixel_clock_mhz);
-
- s->EffectiveDETBufferSizeY = p->DETBufferSizeY[k];
- if (p->UnboundedRequestEnabled) {
- s->EffectiveDETBufferSizeY = s->EffectiveDETBufferSizeY + p->CompressedBufferSizeInkByte * 1024 * (p->SwathWidthY[k] * p->BytePerPixelDETY[k] * v_ratio) / (h_total / pixel_clock_mhz) / s->TotalPixelBW;
- }
-
- s->LinesInDETY[k] = (double)s->EffectiveDETBufferSizeY / p->BytePerPixelDETY[k] / p->SwathWidthY[k];
- s->LinesInDETYRoundedDownToSwath[k] = (unsigned int)(math_floor2(s->LinesInDETY[k], p->SwathHeightY[k]));
- s->FullDETBufferingTimeY = s->LinesInDETYRoundedDownToSwath[k] * (h_total / pixel_clock_mhz) / v_ratio;
-
- s->ActiveClockChangeLatencyHidingY = s->EffectiveLBLatencyHidingY + s->FullDETBufferingTimeY - ((double)p->DSTXAfterScaler[k] / h_total + (double)p->DSTYAfterScaler[k]) * h_total / pixel_clock_mhz;
-
- if (p->NumberOfActiveSurfaces > 1) {
- s->ActiveClockChangeLatencyHidingY = s->ActiveClockChangeLatencyHidingY - (1.0 - 1.0 / (double)p->NumberOfActiveSurfaces) * (double)p->SwathHeightY[k] * (double)h_total / pixel_clock_mhz / v_ratio;
- }
-
- if (p->BytePerPixelDETC[k] > 0) {
- s->LinesInDETC[k] = p->DETBufferSizeC[k] / p->BytePerPixelDETC[k] / p->SwathWidthC[k];
- s->LinesInDETCRoundedDownToSwath[k] = (unsigned int)(math_floor2(s->LinesInDETC[k], p->SwathHeightC[k]));
- s->FullDETBufferingTimeC = s->LinesInDETCRoundedDownToSwath[k] * (h_total / pixel_clock_mhz) / v_ratio_c;
- s->ActiveClockChangeLatencyHidingC = s->EffectiveLBLatencyHidingC + s->FullDETBufferingTimeC - ((double)p->DSTXAfterScaler[k] / (double)h_total + (double)p->DSTYAfterScaler[k]) * (double)h_total / pixel_clock_mhz;
- if (p->NumberOfActiveSurfaces > 1) {
- s->ActiveClockChangeLatencyHidingC = s->ActiveClockChangeLatencyHidingC - (1.0 - 1.0 / (double)p->NumberOfActiveSurfaces) * (double)p->SwathHeightC[k] * (double)h_total / pixel_clock_mhz / v_ratio_c;
- }
- s->ActiveClockChangeLatencyHiding = math_min2(s->ActiveClockChangeLatencyHidingY, s->ActiveClockChangeLatencyHidingC);
- } else {
- s->ActiveClockChangeLatencyHiding = s->ActiveClockChangeLatencyHidingY;
- }
-
- s->ActiveDRAMClockChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->DRAMClockChangeWatermark;
- s->ActiveFCLKChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->FCLKChangeWatermark;
- s->USRRetrainingLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->USRRetrainingWatermark;
-
- if (p->VActiveLatencyHidingMargin)
- p->VActiveLatencyHidingMargin[k] = s->ActiveDRAMClockChangeLatencyMargin[k];
-
- p->VActiveLatencyHidingUs[k] = s->ActiveClockChangeLatencyHiding;
-
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- s->WritebackLatencyHiding = (double)p->WritebackInterfaceBufferSize * 1024.0 / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height * (double)h_total / pixel_clock_mhz) * 4.0);
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
- s->WritebackLatencyHiding = s->WritebackLatencyHiding / 2;
- }
- s->WritebackDRAMClockChangeLatencyMargin = s->WritebackLatencyHiding - p->Watermark->WritebackDRAMClockChangeWatermark;
-
- s->WritebackFCLKChangeLatencyMargin = s->WritebackLatencyHiding - p->Watermark->WritebackFCLKChangeWatermark;
-
- s->ActiveDRAMClockChangeLatencyMargin[k] = math_min2(s->ActiveDRAMClockChangeLatencyMargin[k], s->WritebackDRAMClockChangeLatencyMargin);
- s->ActiveFCLKChangeLatencyMargin[k] = math_min2(s->ActiveFCLKChangeLatencyMargin[k], s->WritebackFCLKChangeLatencyMargin);
- }
- p->MaxActiveDRAMClockChangeLatencySupported[k] = dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 0 : (s->ActiveDRAMClockChangeLatencyMargin[k] + p->mmSOCParameters.DRAMClockChangeLatency);
-
- enum dml2_uclk_pstate_change_strategy uclk_pstate_change_strategy = p->display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy;
- double reserved_vblank_time_us = (double)p->display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns / 1000;
-
- p->FCLKChangeSupport[k] = dml2_fclock_change_unsupported;
- if (s->ActiveFCLKChangeLatencyMargin[k] > 0)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vactive;
- else if (reserved_vblank_time_us >= p->mmSOCParameters.FCLKChangeLatency)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vblank;
-
- if (p->FCLKChangeSupport[k] == dml2_fclock_change_unsupported)
- *p->global_fclk_change_supported = false;
-
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_unsupported;
- if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_auto) {
- if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank_and_vactive;
- else if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
- else if (reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
- } else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vactive && s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
- else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vblank && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
- else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_drr)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_drr;
- else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_svp)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_svp;
- else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_full_frame;
-
- if (p->DRAMClockChangeSupport[k] == dml2_dram_clock_change_unsupported)
- *p->global_dram_clock_change_supported = false;
-
- s->dst_y_pstate = (unsigned int)(math_ceil2((p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.UrgentLatency) / (h_total / pixel_clock_mhz), 1));
- s->src_y_pstate_l = (unsigned int)(math_ceil2(s->dst_y_pstate * v_ratio, p->SwathHeightY[k]));
- s->src_y_ahead_l = (unsigned int)(math_floor2(p->DETBufferSizeY[k] / p->BytePerPixelDETY[k] / p->SwathWidthY[k], p->SwathHeightY[k]) + s->LBLatencyHidingSourceLinesY[k]);
- s->sub_vp_lines_l = s->src_y_pstate_l + s->src_y_ahead_l + p->meta_row_height_l[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
- dml2_printf("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
- dml2_printf("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
- dml2_printf("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
- dml2_printf("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, p->meta_row_height_l[k]);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
-#endif
- p->SubViewportLinesNeededInMALL[k] = s->sub_vp_lines_l;
-
- if (p->BytePerPixelDETC[k] > 0) {
- s->src_y_pstate_c = (unsigned int)(math_ceil2(s->dst_y_pstate * v_ratio_c, p->SwathHeightC[k]));
- s->src_y_ahead_c = (unsigned int)(math_floor2(p->DETBufferSizeC[k] / p->BytePerPixelDETC[k] / p->SwathWidthC[k], p->SwathHeightC[k]) + s->LBLatencyHidingSourceLinesC[k]);
- s->sub_vp_lines_c = s->src_y_pstate_c + s->src_y_ahead_c + p->meta_row_height_c[k];
-
- if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format))
- p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, 2 * s->sub_vp_lines_c));
- else
- p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, p->meta_row_height_c[k]);
- dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
- dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
-#endif
- }
- }
-
- bool FoundCriticalSurface = false;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if ((!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) && ((!FoundCriticalSurface)
- || ((s->ActiveFCLKChangeLatencyMargin[k] + p->mmSOCParameters.FCLKChangeLatency) < *p->MaxActiveFCLKChangeLatencySupported))) {
- FoundCriticalSurface = true;
- *p->MaxActiveFCLKChangeLatencySupported = s->ActiveFCLKChangeLatencyMargin[k] + p->mmSOCParameters.FCLKChangeLatency;
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
- dml2_printf("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
- dml2_printf("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
- dml2_printf("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
-#endif
-}
-
-static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config)
-{
- double bw_mbps = 0;
- bw_mbps = ((double)uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0;
-
- return bw_mbps;
-}
-
-static double dram_bw_kbps_to_uclk_mhz(unsigned long long bw_kbps, const struct dml2_dram_params *dram_config)
-{
- double uclk_mhz = 0;
-
- uclk_mhz = (double)bw_kbps / (dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0;
-
- return uclk_mhz;
-}
-
-static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params)
-{
- unsigned int i;
- unsigned int index = 0;
-
- for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
-
- if (i == 0)
- index = 0;
- else
- index = i - 1;
-
- if (uclk_freq_khz < per_uclk_dpm_params[i].minimum_uclk_khz ||
- per_uclk_dpm_params[i].minimum_uclk_khz == 0) {
- break;
- }
- }
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
-#endif
- return index;
-}
-
-static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
-{
- unsigned int i;
- bool clk_entry_found = 0;
-
- for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
-
- if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
- break;
- }
- }
-
- dml2_assert(clk_entry_found);
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
-#endif
- return i;
-}
-
-static unsigned int get_pipe_flip_bytes(
- double hostvm_inefficiency_factor,
- unsigned int vm_bytes,
- unsigned int dpte_row_bytes,
- unsigned int meta_row_bytes)
-{
- unsigned int flip_bytes = 0;
-
- flip_bytes += (unsigned int)((vm_bytes * hostvm_inefficiency_factor) + 2 * meta_row_bytes);
- flip_bytes += (unsigned int)(2 * dpte_row_bytes * hostvm_inefficiency_factor);
-
- return flip_bytes;
-}
-
-static void calculate_hostvm_inefficiency_factor(
- double *HostVMInefficiencyFactor,
- double *HostVMInefficiencyFactorPrefetch,
-
- bool gpuvm_enable,
- bool hostvm_enable,
- unsigned int remote_iommu_outstanding_translations,
- unsigned int max_outstanding_reqs,
- double urg_bandwidth_avail_active_pixel_and_vm,
- double urg_bandwidth_avail_active_vm_only)
-{
- *HostVMInefficiencyFactor = 1;
- *HostVMInefficiencyFactorPrefetch = 1;
-
- if (gpuvm_enable && hostvm_enable) {
- *HostVMInefficiencyFactor = urg_bandwidth_avail_active_pixel_and_vm / urg_bandwidth_avail_active_vm_only;
- *HostVMInefficiencyFactorPrefetch = *HostVMInefficiencyFactor;
-
- if ((*HostVMInefficiencyFactorPrefetch < 4) && (remote_iommu_outstanding_translations < max_outstanding_reqs))
- *HostVMInefficiencyFactorPrefetch = 4;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
- dml2_printf("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
- dml2_printf("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
-#endif
- }
-}
-
-static void CalculatePixelDeliveryTimes(
- const struct dml2_display_cfg *display_cfg,
- const struct core_display_cfg_support_info *cfg_support_info,
- unsigned int NumberOfActiveSurfaces,
- double VRatioPrefetchY[],
- double VRatioPrefetchC[],
- unsigned int swath_width_luma_ub[],
- unsigned int swath_width_chroma_ub[],
- double PSCL_THROUGHPUT[],
- double PSCL_THROUGHPUT_CHROMA[],
- double Dppclk[],
- unsigned int BytePerPixelC[],
- unsigned int req_per_swath_ub_l[],
- unsigned int req_per_swath_ub_c[],
-
- // Output
- double DisplayPipeLineDeliveryTimeLuma[],
- double DisplayPipeLineDeliveryTimeChroma[],
- double DisplayPipeLineDeliveryTimeLumaPrefetch[],
- double DisplayPipeLineDeliveryTimeChromaPrefetch[],
- double DisplayPipeRequestDeliveryTimeLuma[],
- double DisplayPipeRequestDeliveryTimeChroma[],
- double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
- double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
-{
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
- dml2_printf("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
- dml2_printf("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
- dml2_printf("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
- dml2_printf("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
-#endif
- if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
- DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
- } else {
- DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
- }
-
- if (BytePerPixelC[k] == 0) {
- DisplayPipeLineDeliveryTimeChroma[k] = 0;
- } else {
- if (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio <= 1) {
- DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio / pixel_clock_mhz;
- } else {
- DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / Dppclk[k];
- }
- }
-
- if (VRatioPrefetchY[k] <= 1) {
- DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
- } else {
- DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
- }
-
- if (BytePerPixelC[k] == 0) {
- DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
- } else {
- if (VRatioPrefetchC[k] <= 1) {
- DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio / pixel_clock_mhz;
- } else {
- DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / Dppclk[k];
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
-#endif
- }
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
-
- DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k] / req_per_swath_ub_l[k];
- DisplayPipeRequestDeliveryTimeLumaPrefetch[k] = DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub_l[k];
- if (BytePerPixelC[k] == 0) {
- DisplayPipeRequestDeliveryTimeChroma[k] = 0;
- DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
- } else {
- DisplayPipeRequestDeliveryTimeChroma[k] = DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub_c[k];
- DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub_c[k];
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
- }
-}
-
-static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTETimes_params *p)
-{
- unsigned int meta_chunk_width;
- unsigned int min_meta_chunk_width;
- unsigned int meta_chunk_per_row_int;
- unsigned int meta_row_remainder;
- unsigned int meta_chunk_threshold;
- unsigned int meta_chunks_per_row_ub;
- unsigned int meta_chunk_width_chroma;
- unsigned int min_meta_chunk_width_chroma;
- unsigned int meta_chunk_per_row_int_chroma;
- unsigned int meta_row_remainder_chroma;
- unsigned int meta_chunk_threshold_chroma;
- unsigned int meta_chunks_per_row_ub_chroma;
- unsigned int dpte_group_width_luma;
- unsigned int dpte_groups_per_row_luma_ub;
- unsigned int dpte_group_width_chroma;
- unsigned int dpte_groups_per_row_chroma_ub;
- double pixel_clock_mhz;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->DST_Y_PER_PTE_ROW_NOM_L[k] = p->dpte_row_height[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- if (p->BytePerPixelC[k] == 0) {
- p->DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
- } else {
- p->DST_Y_PER_PTE_ROW_NOM_C[k] = p->dpte_row_height_chroma[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- }
- p->DST_Y_PER_META_ROW_NOM_L[k] = p->meta_row_height[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- if (p->BytePerPixelC[k] == 0) {
- p->DST_Y_PER_META_ROW_NOM_C[k] = 0;
- } else {
- p->DST_Y_PER_META_ROW_NOM_C[k] = p->meta_row_height_chroma[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- }
- }
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->plane_descriptors[k].surface.dcc.enable == true && p->mrq_present) {
- meta_chunk_width = p->MetaChunkSize * 1024 * 256 / p->BytePerPixelY[k] / p->meta_row_height[k];
- min_meta_chunk_width = p->MinMetaChunkSizeBytes * 256 / p->BytePerPixelY[k] / p->meta_row_height[k];
- meta_chunk_per_row_int = p->meta_row_width[k] / meta_chunk_width;
- meta_row_remainder = p->meta_row_width[k] % meta_chunk_width;
- if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- meta_chunk_threshold = 2 * min_meta_chunk_width - p->meta_req_width[k];
- } else {
- meta_chunk_threshold = 2 * min_meta_chunk_width - p->meta_req_height[k];
- }
- if (meta_row_remainder <= meta_chunk_threshold) {
- meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
- } else {
- meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
- }
- p->TimePerMetaChunkNominal[k] = p->meta_row_height[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio *
- p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub;
- p->TimePerMetaChunkVBlank[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub;
- p->TimePerMetaChunkFlip[k] = p->dst_y_per_row_flip[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub;
- if (p->BytePerPixelC[k] == 0) {
- p->TimePerChromaMetaChunkNominal[k] = 0;
- p->TimePerChromaMetaChunkVBlank[k] = 0;
- p->TimePerChromaMetaChunkFlip[k] = 0;
- } else {
- meta_chunk_width_chroma = p->MetaChunkSize * 1024 * 256 / p->BytePerPixelC[k] / p->meta_row_height_chroma[k];
- min_meta_chunk_width_chroma = p->MinMetaChunkSizeBytes * 256 / p->BytePerPixelC[k] / p->meta_row_height_chroma[k];
- meta_chunk_per_row_int_chroma = (unsigned int)((double)p->meta_row_width_chroma[k] / meta_chunk_width_chroma);
- meta_row_remainder_chroma = p->meta_row_width_chroma[k] % meta_chunk_width_chroma;
- if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - p->meta_req_width_chroma[k];
- } else {
- meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - p->meta_req_height_chroma[k];
- }
- if (meta_row_remainder_chroma <= meta_chunk_threshold_chroma) {
- meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 1;
- } else {
- meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 2;
- }
- p->TimePerChromaMetaChunkNominal[k] = p->meta_row_height_chroma[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub_chroma;
- p->TimePerChromaMetaChunkVBlank[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub_chroma;
- p->TimePerChromaMetaChunkFlip[k] = p->dst_y_per_row_flip[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub_chroma;
- }
- } else {
- p->TimePerMetaChunkNominal[k] = 0;
- p->TimePerMetaChunkVBlank[k] = 0;
- p->TimePerMetaChunkFlip[k] = 0;
- p->TimePerChromaMetaChunkNominal[k] = 0;
- p->TimePerChromaMetaChunkVBlank[k] = 0;
- p->TimePerChromaMetaChunkFlip[k] = 0;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
-#endif
- }
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->DST_Y_PER_PTE_ROW_NOM_L[k] = p->dpte_row_height[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- if (p->BytePerPixelC[k] == 0) {
- p->DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
- } else {
- p->DST_Y_PER_PTE_ROW_NOM_C[k] = p->dpte_row_height_chroma[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- }
- }
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- pixel_clock_mhz = ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
- if (p->display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- p->time_per_tdlut_group[k] = 2 * p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / p->tdlut_groups_per_2row_ub[k];
- else
- p->time_per_tdlut_group[k] = 0;
-
- dml2_printf("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
-
- if (p->display_cfg->gpuvm_enable == true) {
- if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- dpte_group_width_luma = (unsigned int)((double)p->dpte_group_bytes[k] / (double)p->PTERequestSizeY[k] * p->PixelPTEReqWidthY[k]);
- } else {
- dpte_group_width_luma = (unsigned int)((double)p->dpte_group_bytes[k] / (double)p->PTERequestSizeY[k] * p->PixelPTEReqHeightY[k]);
- }
- if (p->use_one_row_for_frame[k]) {
- dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma / 2.0, 1.0));
- } else {
- dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma, 1.0));
- }
-
- if (dpte_groups_per_row_luma_ub <= 2) {
- dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
- }
-
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
-
- p->time_per_pte_group_nom_luma[k] = p->DST_Y_PER_PTE_ROW_NOM_L[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
- p->time_per_pte_group_vblank_luma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
- p->time_per_pte_group_flip_luma[k] = p->dst_y_per_row_flip[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
- if (p->BytePerPixelC[k] == 0) {
- p->time_per_pte_group_nom_chroma[k] = 0;
- p->time_per_pte_group_vblank_chroma[k] = 0;
- p->time_per_pte_group_flip_chroma[k] = 0;
- } else {
- if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- dpte_group_width_chroma = (unsigned int)((double)p->dpte_group_bytes[k] / (double)p->PTERequestSizeC[k] * p->PixelPTEReqWidthC[k]);
- } else {
- dpte_group_width_chroma = (unsigned int)((double)p->dpte_group_bytes[k] / (double)p->PTERequestSizeC[k] * p->PixelPTEReqHeightC[k]);
- }
-
- if (p->use_one_row_for_frame[k]) {
- dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma / 2.0, 1.0));
- } else {
- dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma, 1.0));
- }
- if (dpte_groups_per_row_chroma_ub <= 2) {
- dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
- }
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
-
- p->time_per_pte_group_nom_chroma[k] = p->DST_Y_PER_PTE_ROW_NOM_C[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
- p->time_per_pte_group_vblank_chroma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
- p->time_per_pte_group_flip_chroma[k] = p->dst_y_per_row_flip[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
- }
- } else {
- p->time_per_pte_group_nom_luma[k] = 0;
- p->time_per_pte_group_vblank_luma[k] = 0;
- p->time_per_pte_group_flip_luma[k] = 0;
- p->time_per_pte_group_nom_chroma[k] = 0;
- p->time_per_pte_group_vblank_chroma[k] = 0;
- p->time_per_pte_group_flip_chroma[k] = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
-
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
-#endif
- }
-} // CalculateMetaAndPTETimes
-
-static void CalculateVMGroupAndRequestTimes(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int BytePerPixelC[],
- double dst_y_per_vm_vblank[],
- double dst_y_per_vm_flip[],
- unsigned int dpte_row_width_luma_ub[],
- unsigned int dpte_row_width_chroma_ub[],
- unsigned int vm_group_bytes[],
- unsigned int dpde0_bytes_per_frame_ub_l[],
- unsigned int dpde0_bytes_per_frame_ub_c[],
- unsigned int tdlut_pte_bytes_per_frame[],
- unsigned int meta_pte_bytes_per_frame_ub_l[],
- unsigned int meta_pte_bytes_per_frame_ub_c[],
- bool mrq_present,
-
- // Output
- double TimePerVMGroupVBlank[],
- double TimePerVMGroupFlip[],
- double TimePerVMRequestVBlank[],
- double TimePerVMRequestFlip[])
-{
- unsigned int num_group_per_lower_vm_stage = 1;
- unsigned int num_req_per_lower_vm_stage = 1;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- bool dcc_mrq_enable = display_cfg->plane_descriptors[k].surface.dcc.enable && mrq_present;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
- dml2_printf("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
-#endif
-
- if (display_cfg->gpuvm_enable) {
- if (display_cfg->gpuvm_max_page_table_levels >= 2) {
- num_group_per_lower_vm_stage += (unsigned int)math_ceil2((double)(dpde0_bytes_per_frame_ub_l[k]) / (double)(vm_group_bytes[k]), 1);
-
- if (BytePerPixelC[k] > 0)
- num_group_per_lower_vm_stage += (unsigned int)math_ceil2((double)(dpde0_bytes_per_frame_ub_c[k]) / (double)(vm_group_bytes[k]), 1);
- }
-
- if (dcc_mrq_enable) {
- if (BytePerPixelC[k] > 0) {
- num_group_per_lower_vm_stage += (unsigned int)(2.0 /*for each mpde0 group*/ + math_ceil2((double)(meta_pte_bytes_per_frame_ub_l[k]) / (double)(vm_group_bytes[k]), 1) +
- math_ceil2((double)(meta_pte_bytes_per_frame_ub_c[k]) / (double)(vm_group_bytes[k]), 1));
- } else {
- num_group_per_lower_vm_stage += (unsigned int)(1.0 + math_ceil2((double)(meta_pte_bytes_per_frame_ub_l[k]) / (double)(vm_group_bytes[k]), 1));
- }
- }
-
- unsigned int num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage;
- unsigned int num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage;
-
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && display_cfg->gpuvm_enable) {
- num_group_per_lower_vm_stage_pref += (unsigned int)math_ceil2(tdlut_pte_bytes_per_frame[k] / vm_group_bytes[k], 1);
- if (display_cfg->gpuvm_max_page_table_levels >= 2)
- num_group_per_lower_vm_stage_pref += 1; // tdpe0 group
- }
-
- if (display_cfg->gpuvm_max_page_table_levels >= 2) {
- num_req_per_lower_vm_stage += dpde0_bytes_per_frame_ub_l[k] / 64;
- if (BytePerPixelC[k] > 0)
- num_req_per_lower_vm_stage += dpde0_bytes_per_frame_ub_c[k];
- }
-
- if (dcc_mrq_enable) {
- num_req_per_lower_vm_stage += meta_pte_bytes_per_frame_ub_l[k] / 64;
- if (BytePerPixelC[k] > 0)
- num_req_per_lower_vm_stage += meta_pte_bytes_per_frame_ub_c[k] / 64;
- }
-
- unsigned int num_req_per_lower_vm_stage_flip = num_req_per_lower_vm_stage;
- unsigned int num_req_per_lower_vm_stage_pref = num_req_per_lower_vm_stage;
-
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && display_cfg->gpuvm_enable) {
- num_req_per_lower_vm_stage_pref += tdlut_pte_bytes_per_frame[k] / 64;
- }
-
- double line_time = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz;
-
- if (num_group_per_lower_vm_stage_flip <= 2) {
- num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage_flip + 1;
- }
-
- if (num_group_per_lower_vm_stage_pref <= 2) {
- num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage_pref + 1;
- }
-
- TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref;
- TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip;
- TimePerVMRequestVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_req_per_lower_vm_stage_pref;
- TimePerVMRequestFlip[k] = dst_y_per_vm_flip[k] * line_time / num_req_per_lower_vm_stage_flip;
-
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
- dml2_printf("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %f\n", __func__, k, num_group_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %f\n", __func__, k, num_group_per_lower_vm_stage_flip);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %f\n", __func__, k, num_req_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %f\n", __func__, k, num_req_per_lower_vm_stage_flip);
-
- if (display_cfg->gpuvm_max_page_table_levels > 2) {
- TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
- TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
- TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
- TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
- }
-
- } else {
- TimePerVMGroupVBlank[k] = 0;
- TimePerVMGroupFlip[k] = 0;
- TimePerVMRequestVBlank[k] = 0;
- TimePerVMRequestFlip[k] = 0;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
-#endif
- }
-}
-
-static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateStutterEfficiency_params *p)
-{
- struct dml2_core_calcs_CalculateStutterEfficiency_locals *l = &scratch->CalculateStutterEfficiency_locals;
-
- memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- if (p->display_cfg->plane_descriptors[k].surface.dcc.enable == true) {
- if ((dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle) && p->BlockWidth256BytesY[k] > p->SwathHeightY[k]) || (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle) && p->BlockHeight256BytesY[k] > p->SwathHeightY[k]) || p->DCCYMaxUncompressedBlock[k] < 256) {
- l->MaximumEffectiveCompressionLuma = 2;
- } else {
- l->MaximumEffectiveCompressionLuma = 4;
- }
- l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0, l->MaximumEffectiveCompressionLuma);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
-#endif
- l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0;
- l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0 / l->MaximumEffectiveCompressionLuma;
-
- if (p->ReadBandwidthSurfaceChroma[k] > 0) {
- if ((dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle) && p->BlockWidth256BytesC[k] > p->SwathHeightC[k]) || (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle) && p->BlockHeight256BytesC[k] > p->SwathHeightC[k]) || p->DCCCMaxUncompressedBlock[k] < 256) {
- l->MaximumEffectiveCompressionChroma = 2;
- } else {
- l->MaximumEffectiveCompressionChroma = 4;
- }
- l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1, l->MaximumEffectiveCompressionChroma);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
-#endif
- l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1;
- l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1 / l->MaximumEffectiveCompressionChroma;
- }
- } else {
- l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] + p->ReadBandwidthSurfaceChroma[k];
- }
- l->TotalRowReadBandwidth = l->TotalRowReadBandwidth + p->DPPPerSurface[k] * (p->meta_row_bw[k] + p->dpte_row_bw[k]);
- }
- }
-
- l->AverageDCCCompressionRate = p->TotalDataReadBandwidth / l->TotalCompressedReadBandwidth;
- l->AverageDCCZeroSizeFraction = l->TotalZeroSizeRequestReadBandwidth / p->TotalDataReadBandwidth;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
- dml2_printf("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
- dml2_printf("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
-
- dml2_printf("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
- dml2_printf("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
-#endif
- if (l->AverageDCCZeroSizeFraction == 1) {
- l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
- l->EffectiveCompressedBufferSize = (double)p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageZeroSizeCompressionRate + ((double)p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 * l->AverageZeroSizeCompressionRate;
-
-
- } else if (l->AverageDCCZeroSizeFraction > 0) {
- l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
- l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
- (double)p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate)) +
- (p->rob_alloc_compressed ? math_min2(((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * l->AverageDCCCompressionRate,
- ((double)p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate))
- : ((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
-
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
- dml2_printf("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
-#endif
- } else {
- l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
- (double)p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate) +
- ((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * (p->rob_alloc_compressed ? l->AverageDCCCompressionRate : 1.0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
-#endif
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
- dml2_printf("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
-#endif
-
- bool FoundCriticalSurface = false;
- *p->StutterPeriod = 0;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- l->LinesInDETY = ((double)p->DETBufferSizeY[k] + (p->UnboundedRequestEnabled == true ? l->EffectiveCompressedBufferSize : 0) * p->ReadBandwidthSurfaceLuma[k] / p->TotalDataReadBandwidth) / p->BytePerPixelDETY[k] / p->SwathWidthY[k];
- l->LinesInDETYRoundedDownToSwath = math_floor2(l->LinesInDETY, p->SwathHeightY[k]);
- l->DETBufferingTimeY = l->LinesInDETYRoundedDownToSwath * ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
- dml2_printf("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
- dml2_printf("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
-#endif
-
- if (!FoundCriticalSurface || l->DETBufferingTimeY < *p->StutterPeriod) {
- bool isInterlaceTiming = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.interlaced && !p->ProgressiveToInterlaceUnitInOPP;
-
- FoundCriticalSurface = true;
- *p->StutterPeriod = l->DETBufferingTimeY;
- l->FrameTimeCriticalSurface = (isInterlaceTiming ? math_floor2((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_total / 2.0, 1.0) : p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_total) * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- l->VActiveTimeCriticalSurface = (isInterlaceTiming ? math_floor2((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_active / 2.0, 1.0) : p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_active) * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- l->BytePerPixelYCriticalSurface = p->BytePerPixelY[k];
- l->SwathWidthYCriticalSurface = p->SwathWidthY[k];
- l->SwathHeightYCriticalSurface = p->SwathHeightY[k];
- l->BlockWidth256BytesYCriticalSurface = p->BlockWidth256BytesY[k];
- l->DETBufferSizeYCriticalSurface = p->DETBufferSizeY[k];
- l->MinTTUVBlankCriticalSurface = p->MinTTUVBlank[k];
- l->SinglePlaneCriticalSurface = (p->ReadBandwidthSurfaceChroma[k] == 0);
- l->SinglePipeCriticalSurface = (p->DPPPerSurface[k] == 1);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
- dml2_printf("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
- dml2_printf("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
- dml2_printf("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
- dml2_printf("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
-#endif
- }
- }
- }
-
- // for bounded req, the stutter period is calculated only based on DET size, but during burst there can be some return inside ROB/compressed buffer
- // stutter period is calculated only on the det sizing
- // if (cdb + rob >= det) the stutter burst will be absorbed by the cdb + rob which is before decompress
- // else
- // the cdb + rob part will be in compressed rate with urg bw (idea bw)
- // the det part will be return at uncompressed rate with 64B/dcfclk
- //
- // for unbounded req, the stutter period should be calculated as total of CDB+ROB+DET, so the term "PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer"
- // should be == EffectiveCompressedBufferSize which will returned a compressed rate, the rest of stutter period is from the DET will be returned at uncompressed rate with 64B/dcfclk
-
- l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = math_min2(*p->StutterPeriod * p->TotalDataReadBandwidth, l->EffectiveCompressedBufferSize);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
- dml2_printf("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
- dml2_printf("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
- dml2_printf("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
-#endif
-
- l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
- / (p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
- (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer)
- / math_max2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
- *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
- dml2_printf("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
-#endif
-
- l->TotalActiveWriteback = 0;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- l->TotalActiveWriteback = l->TotalActiveWriteback + 1;
- }
- }
-
- if (l->TotalActiveWriteback == 0) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
- dml2_printf("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
-#endif
- *p->StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitTime + l->StutterBurstTime) / *p->StutterPeriod) * 100;
- *p->Z8StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitZ8Time + l->StutterBurstTime) / *p->StutterPeriod) * 100;
- *p->NumberOfStutterBurstsPerFrame = (*p->StutterEfficiencyNotIncludingVBlank > 0 ? (unsigned int)(math_ceil2(l->VActiveTimeCriticalSurface / *p->StutterPeriod, 1)) : 0);
- *p->Z8NumberOfStutterBurstsPerFrame = (*p->Z8StutterEfficiencyNotIncludingVBlank > 0 ? (unsigned int)(math_ceil2(l->VActiveTimeCriticalSurface / *p->StutterPeriod, 1)) : 0);
- } else {
- *p->StutterEfficiencyNotIncludingVBlank = 0.;
- *p->Z8StutterEfficiencyNotIncludingVBlank = 0.;
- *p->NumberOfStutterBurstsPerFrame = 0;
- *p->Z8NumberOfStutterBurstsPerFrame = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
-#endif
-
- unsigned int TotalNumberOfActiveOTG = 0;
- double SinglePixelClock = 0;
- unsigned int SingleHTotal = 0;
- unsigned int SingleVTotal = 0;
- bool SameTiming = true;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- if (p->display_cfg->plane_descriptors[k].stream_index == k) {
- if (TotalNumberOfActiveOTG == 0) {
- SinglePixelClock = ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- SingleHTotal = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- SingleVTotal = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_total;
- } else if (SinglePixelClock != ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) || SingleHTotal != p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total || SingleVTotal != p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_total) {
- SameTiming = false;
- }
- TotalNumberOfActiveOTG = TotalNumberOfActiveOTG + 1;
- }
- }
- }
-
- if (*p->StutterEfficiencyNotIncludingVBlank > 0) {
- if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
- *p->StutterEfficiency = *p->StutterEfficiencyNotIncludingVBlank;
- } else {
- *p->StutterEfficiency = (1 - (*p->NumberOfStutterBurstsPerFrame * p->SRExitTime + l->StutterBurstTime * l->VActiveTimeCriticalSurface / *p->StutterPeriod) / l->FrameTimeCriticalSurface) * 100;
- }
- } else {
- *p->StutterEfficiency = 0;
- *p->NumberOfStutterBurstsPerFrame = 0;
- }
-
- double LastZ8StutterPeriod = 0.0;
-
- if (*p->Z8StutterEfficiencyNotIncludingVBlank > 0) {
- LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
- if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
- *p->Z8StutterEfficiency = *p->Z8StutterEfficiencyNotIncludingVBlank;
- } else {
- *p->Z8StutterEfficiency = (1 - (*p->Z8NumberOfStutterBurstsPerFrame * p->SRExitZ8Time + l->StutterBurstTime * l->VActiveTimeCriticalSurface / *p->StutterPeriod) / l->FrameTimeCriticalSurface) * 100;
- }
- } else {
- *p->Z8StutterEfficiency = 0.;
- *p->Z8NumberOfStutterBurstsPerFrame = 0;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
- dml2_printf("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
- dml2_printf("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
-#endif
-
-
- unsigned int SwathSizeCriticalSurface;
- unsigned int LastChunkOfSwathSize;
- unsigned int MissingPartOfLastSwathOfDETSize;
-
- SwathSizeCriticalSurface = (unsigned int)(l->BytePerPixelYCriticalSurface * l->SwathHeightYCriticalSurface * math_ceil2(l->SwathWidthYCriticalSurface, l->BlockWidth256BytesYCriticalSurface));
- LastChunkOfSwathSize = SwathSizeCriticalSurface % (p->PixelChunkSizeInKByte * 1024);
- MissingPartOfLastSwathOfDETSize = (unsigned int)(math_ceil2(l->DETBufferSizeYCriticalSurface, SwathSizeCriticalSurface) - l->DETBufferSizeYCriticalSurface);
-
- *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface && (LastChunkOfSwathSize > 0) &&
- (LastChunkOfSwathSize <= 4096) && (MissingPartOfLastSwathOfDETSize > 0) && (MissingPartOfLastSwathOfDETSize <= LastChunkOfSwathSize));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SwathSizeCriticalSurface = %u\n", __func__, SwathSizeCriticalSurface);
- dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: LastChunkOfSwathSize = %u\n", __func__, LastChunkOfSwathSize);
- dml2_printf("DML::%s: MissingPartOfLastSwathOfDETSize = %u\n", __func__, MissingPartOfLastSwathOfDETSize);
- dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
-#endif
-}
-
-bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_ex *in_out_params)
-{
- const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
- const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
- const struct core_display_cfg_support_info *cfg_support_info = in_out_params->cfg_support_info;
- struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
- struct dml2_display_cfg_programming *programming = in_out_params->programming;
-
- struct dml2_core_calcs_mode_programming_locals *s = &mode_lib->scratch.dml_core_mode_programming_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
- struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
- struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculateStutterEfficiency_params *CalculateStutterEfficiency_params = &mode_lib->scratch.CalculateStutterEfficiency_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
- struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
- struct dml2_core_shared_CalculateMetaAndPTETimes_params *CalculateMetaAndPTETimes_params = &mode_lib->scratch.CalculateMetaAndPTETimes_params;
-
- unsigned int j, k;
-
- dml2_printf("DML::%s: --- START --- \n", __func__);
-
- memset(&mode_lib->mp, 0, sizeof(struct dml2_core_internal_mode_program));
-
- s->num_active_planes = display_cfg->num_planes;
- get_stream_output_bpp(s->OutputBpp, display_cfg);
-
- mode_lib->mp.num_active_pipes = dml_get_num_active_pipes(display_cfg->num_planes, cfg_support_info);
- dml_calc_pipe_plane_mapping(cfg_support_info, mode_lib->mp.pipe_plane);
-
- mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0;
- mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0;
- mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
- mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0;
- mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0;
- s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000;
- mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4x.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
- mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- dml2_assert(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
- dml2_assert(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
- cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 ||
- cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
-
- if (cfg_support_info->stream_support_info[stream_index].odms_used > 1)
- dml2_assert(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
-
- switch (cfg_support_info->stream_support_info[stream_index].odms_used) {
- case (4):
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_combine_4to1;
- break;
- case (3):
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_combine_3to1;
- break;
- case (2):
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_combine_2to1;
- break;
- default:
- if (cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4)
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_mso_1to4;
- else if (cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2)
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_mso_1to2;
- else
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_bypass;
- break;
- }
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
- mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
- dml2_assert(mode_lib->mp.Dppclk[k] > 0);
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
- dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
- }
-
- mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
- mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
-
- dml2_assert(mode_lib->mp.Dcfclk > 0);
- dml2_assert(mode_lib->mp.FabricClock > 0);
- dml2_assert(mode_lib->mp.dram_bw_mbps > 0);
- dml2_assert(mode_lib->mp.uclk_freq_mhz > 0);
- dml2_assert(mode_lib->mp.GlobalDPPCLK > 0);
- dml2_assert(mode_lib->mp.Dispclk > 0);
- dml2_assert(mode_lib->mp.DCFCLKDeepSleep > 0);
- dml2_assert(s->SOCCLK > 0);
-
-#ifdef __DML_VBA_DEBUG__
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, s->num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, s->num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, s->num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, s->num_active_planes);
- // dml2_printf_dml_display_cfg_hw_resource(&display_cfg->hw, s->num_active_planes);
-
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
- dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
- dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
- dml2_printf("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
- for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
- }
- dml2_printf("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
- dml2_printf("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: min_clk_table min_fclk_khz = %d\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
- dml2_printf("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
- for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) {
- dml2_printf("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
- dml2_printf("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- }
-
- for (k = 0; k < s->num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
-#endif
-
- CalculateMaxDETAndMinCompressedBufferSize(
- mode_lib->ip.config_return_buffer_size_in_kbytes,
- mode_lib->ip.config_return_buffer_segment_size_in_kbytes,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->ip.max_num_dpp,
- display_cfg->overrides.hw.force_nom_det_size_kbytes.enable,
- display_cfg->overrides.hw.force_nom_det_size_kbytes.value,
- mode_lib->ip.dcn_mrq_present,
-
- /* Output */
- &s->MaxTotalDETInKByte,
- &s->NomDETInKByte,
- &s->MinCompressedBufferSizeInKByte);
-
-
- PixelClockAdjustmentForProgressiveToInterlaceUnit(display_cfg, mode_lib->ip.ptoi_supported, s->PixelClockBackEnd);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- CalculateSinglePipeDPPCLKAndSCLThroughput(
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ip.max_dchub_pscl_bw_pix_per_clk,
- mode_lib->ip.max_pscl_lb_bw_pix_per_clk,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps,
-
- /* Output */
- &mode_lib->mp.PSCL_THROUGHPUT[k],
- &mode_lib->mp.PSCL_THROUGHPUT_CHROMA[k],
- &mode_lib->mp.DPPCLKUsingSingleDPP[k]);
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- CalculateBytePerPixelAndBlockSizes(
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].surface.tiling,
- display_cfg->plane_descriptors[k].surface.plane0.pitch,
- display_cfg->plane_descriptors[k].surface.plane1.pitch,
-
- // Output
- &mode_lib->mp.BytePerPixelY[k],
- &mode_lib->mp.BytePerPixelC[k],
- &mode_lib->mp.BytePerPixelInDETY[k],
- &mode_lib->mp.BytePerPixelInDETC[k],
- &mode_lib->mp.Read256BlockHeightY[k],
- &mode_lib->mp.Read256BlockHeightC[k],
- &mode_lib->mp.Read256BlockWidthY[k],
- &mode_lib->mp.Read256BlockWidthC[k],
- &mode_lib->mp.MacroTileHeightY[k],
- &mode_lib->mp.MacroTileHeightC[k],
- &mode_lib->mp.MacroTileWidthY[k],
- &mode_lib->mp.MacroTileWidthC[k],
- &mode_lib->mp.surf_linear128_l[k],
- &mode_lib->mp.surf_linear128_c[k]);
- }
-
- CalculateSwathWidth(
- display_cfg,
- false, // ForceSingleDPP
- s->num_active_planes,
- mode_lib->mp.ODMMode,
- mode_lib->mp.BytePerPixelY,
- mode_lib->mp.BytePerPixelC,
- mode_lib->mp.Read256BlockHeightY,
- mode_lib->mp.Read256BlockHeightC,
- mode_lib->mp.Read256BlockWidthY,
- mode_lib->mp.Read256BlockWidthC,
- mode_lib->mp.surf_linear128_l,
- mode_lib->mp.surf_linear128_c,
- mode_lib->mp.NoOfDPP,
-
- /* Output */
- mode_lib->mp.req_per_swath_ub_l,
- mode_lib->mp.req_per_swath_ub_c,
- mode_lib->mp.SwathWidthSingleDPPY,
- mode_lib->mp.SwathWidthSingleDPPC,
- mode_lib->mp.SwathWidthY,
- mode_lib->mp.SwathWidthC,
- s->dummy_integer_array[0], // unsigned int MaximumSwathHeightY[]
- s->dummy_integer_array[1], // unsigned int MaximumSwathHeightC[]
- mode_lib->mp.swath_width_luma_ub,
- mode_lib->mp.swath_width_chroma_ub);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width * display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
- mode_lib->mp.SurfaceReadBandwidthLuma[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->mp.SurfaceReadBandwidthChroma[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: ReadBandwidthSurfaceLuma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthSurfaceChroma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
- }
-
- CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
- CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSizeInKByte = mode_lib->ip.config_return_buffer_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->MaxTotalDETInKByte = s->MaxTotalDETInKByte;
- CalculateSwathAndDETConfiguration_params->MinCompressedBufferSizeInKByte = s->MinCompressedBufferSizeInKByte;
- CalculateSwathAndDETConfiguration_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateSwathAndDETConfiguration_params->pixel_chunk_size_kbytes = mode_lib->ip.pixel_chunk_size_kbytes;
- CalculateSwathAndDETConfiguration_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateSwathAndDETConfiguration_params->pixel_chunk_size_kbytes = mode_lib->ip.pixel_chunk_size_kbytes;
-
- CalculateSwathAndDETConfiguration_params->ForceSingleDPP = false;
- CalculateSwathAndDETConfiguration_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateSwathAndDETConfiguration_params->nomDETInKByte = s->NomDETInKByte;
- CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
- CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = s->dummy_single_array[0];
- CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = s->dummy_single_array[1];
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->mp.Read256BlockHeightY;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightC = mode_lib->mp.Read256BlockHeightC;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockWidthY = mode_lib->mp.Read256BlockWidthY;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockWidthC = mode_lib->mp.Read256BlockWidthC;
- CalculateSwathAndDETConfiguration_params->surf_linear128_l = mode_lib->mp.surf_linear128_l;
- CalculateSwathAndDETConfiguration_params->surf_linear128_c = mode_lib->mp.surf_linear128_c;
- CalculateSwathAndDETConfiguration_params->ODMMode = mode_lib->mp.ODMMode;
- CalculateSwathAndDETConfiguration_params->DPPPerSurface = mode_lib->mp.NoOfDPP;
- CalculateSwathAndDETConfiguration_params->BytePerPixY = mode_lib->mp.BytePerPixelY;
- CalculateSwathAndDETConfiguration_params->BytePerPixC = mode_lib->mp.BytePerPixelC;
- CalculateSwathAndDETConfiguration_params->BytePerPixDETY = mode_lib->mp.BytePerPixelInDETY;
- CalculateSwathAndDETConfiguration_params->BytePerPixDETC = mode_lib->mp.BytePerPixelInDETC;
-
- // output
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_l = mode_lib->mp.req_per_swath_ub_l;
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_c = mode_lib->mp.req_per_swath_ub_c;
- CalculateSwathAndDETConfiguration_params->swath_width_luma_ub = s->dummy_long_array[0];
- CalculateSwathAndDETConfiguration_params->swath_width_chroma_ub = s->dummy_long_array[1];
- CalculateSwathAndDETConfiguration_params->SwathWidth = s->dummy_long_array[2];
- CalculateSwathAndDETConfiguration_params->SwathWidthChroma = s->dummy_long_array[3];
- CalculateSwathAndDETConfiguration_params->SwathHeightY = mode_lib->mp.SwathHeightY;
- CalculateSwathAndDETConfiguration_params->SwathHeightC = mode_lib->mp.SwathHeightC;
- CalculateSwathAndDETConfiguration_params->request_size_bytes_luma = mode_lib->mp.request_size_bytes_luma;
- CalculateSwathAndDETConfiguration_params->request_size_bytes_chroma = mode_lib->mp.request_size_bytes_chroma;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeInKByte = mode_lib->mp.DETBufferSizeInKByte;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeY = mode_lib->mp.DETBufferSizeY;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeC = mode_lib->mp.DETBufferSizeC;
- CalculateSwathAndDETConfiguration_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CalculateSwathAndDETConfiguration_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CalculateSwathAndDETConfiguration_params->UnboundedRequestEnabled = &mode_lib->mp.UnboundedRequestEnabled;
- CalculateSwathAndDETConfiguration_params->compbuf_reserved_space_64b = &mode_lib->mp.compbuf_reserved_space_64b;
- CalculateSwathAndDETConfiguration_params->hw_debug5 = &mode_lib->mp.hw_debug5;
- CalculateSwathAndDETConfiguration_params->CompressedBufferSizeInkByte = &mode_lib->mp.CompressedBufferSizeInkByte;
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupportPerSurface = &s->dummy_boolean_array[0][0];
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupport = &s->dummy_boolean[0];
- CalculateSwathAndDETConfiguration_params->funcs = &mode_lib->funcs;
-
- // VBA_DELTA
- // Calculate DET size, swath height here. In VBA, they are calculated in mode check stage
- CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
-
- // DSCCLK
- /*
- s->DSCFormatFactor = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if ((display_cfg->plane_descriptors[k].stream_index != k) || !cfg_support_info->stream_support_info[display_cfg->plane_descriptors[k].stream_index].dsc_enable) {
- } else {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420)
- s->DSCFormatFactor = 2;
- else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_444)
- s->DSCFormatFactor = 1;
- else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl)
- s->DSCFormatFactor = 2;
- else
- s->DSCFormatFactor = 1;
-
- s->PixelClockBackEndFactor = 3.0;
-
- if (mode_lib->mp.ODMMode[k] == dml2_odm_mode_combine_4to1)
- s->PixelClockBackEndFactor = 12.0;
- else if (mode_lib->mp.ODMMode[k] == dml2_odm_mode_combine_3to1)
- s->PixelClockBackEndFactor = 9.0;
- else if (mode_lib->mp.ODMMode[k] == dml2_odm_mode_combine_2to1)
- s->PixelClockBackEndFactor = 6.0;
-
- }
- #ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DSCEnabled = %u\n", __func__, k, cfg_support_info->stream_support_info[display_cfg->plane_descriptors[k].stream_index].dsc_enable);
- dml2_printf("DML::%s: k=%u, BlendingAndTiming = %u\n", __func__, k, display_cfg->plane_descriptors[k].stream_index);
- dml2_printf("DML::%s: k=%u, PixelClockBackEndFactor = %f\n", __func__, k, s->PixelClockBackEndFactor);
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLK = %f\n", __func__, k, mode_lib->mp.DSCCLK[k]);
- #endif
- }
- */
-
- // DSC Delay
- for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.DSCDelay[k] = DSCDelayRequirement(cfg_support_info->stream_support_info[display_cfg->plane_descriptors[k].stream_index].dsc_enable,
- mode_lib->mp.ODMMode[k],
- mode_lib->ip.maximum_dsc_bits_per_component,
- s->OutputBpp[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- cfg_support_info->stream_support_info[display_cfg->plane_descriptors[k].stream_index].num_dsc_slices,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- s->PixelClockBackEnd[k]);
- }
-
- for (k = 0; k < s->num_active_planes; ++k)
- for (j = 0; j < s->num_active_planes; ++j) // NumberOfSurfaces
- if (j != k && display_cfg->plane_descriptors[k].stream_index == j && cfg_support_info->stream_support_info[display_cfg->plane_descriptors[j].stream_index].dsc_enable)
- mode_lib->mp.DSCDelay[k] = mode_lib->mp.DSCDelay[j];
-
- // Prefetch
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0) {
- for (k = 0; k < s->num_active_planes; ++k)
- mode_lib->mp.SurfaceSizeInTheMALL[k] = 0;
- } else {
- CalculateSurfaceSizeInMall(
- display_cfg,
- s->num_active_planes,
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->mp.BytePerPixelY,
- mode_lib->mp.BytePerPixelC,
- mode_lib->mp.Read256BlockWidthY,
- mode_lib->mp.Read256BlockWidthC,
- mode_lib->mp.Read256BlockHeightY,
- mode_lib->mp.Read256BlockHeightC,
- mode_lib->mp.MacroTileWidthY,
- mode_lib->mp.MacroTileWidthC,
- mode_lib->mp.MacroTileHeightY,
- mode_lib->mp.MacroTileHeightC,
-
- /* Output */
- mode_lib->mp.SurfaceSizeInTheMALL,
- &s->dummy_boolean[0]); /* bool *ExceededMALLSize */
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- s->SurfaceParameters[k].PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->SurfaceParameters[k].DPPPerSurface = mode_lib->mp.NoOfDPP[k];
- s->SurfaceParameters[k].RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- s->SurfaceParameters[k].ViewportHeight = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- s->SurfaceParameters[k].ViewportHeightC = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- s->SurfaceParameters[k].BlockWidth256BytesY = mode_lib->mp.Read256BlockWidthY[k];
- s->SurfaceParameters[k].BlockHeight256BytesY = mode_lib->mp.Read256BlockHeightY[k];
- s->SurfaceParameters[k].BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC[k];
- s->SurfaceParameters[k].BlockHeight256BytesC = mode_lib->mp.Read256BlockHeightC[k];
- s->SurfaceParameters[k].BlockWidthY = mode_lib->mp.MacroTileWidthY[k];
- s->SurfaceParameters[k].BlockHeightY = mode_lib->mp.MacroTileHeightY[k];
- s->SurfaceParameters[k].BlockWidthC = mode_lib->mp.MacroTileWidthC[k];
- s->SurfaceParameters[k].BlockHeightC = mode_lib->mp.MacroTileHeightC[k];
- s->SurfaceParameters[k].InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- s->SurfaceParameters[k].HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- s->SurfaceParameters[k].DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- s->SurfaceParameters[k].SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- s->SurfaceParameters[k].SurfaceTiling = display_cfg->plane_descriptors[k].surface.tiling;
- s->SurfaceParameters[k].BytePerPixelY = mode_lib->mp.BytePerPixelY[k];
- s->SurfaceParameters[k].BytePerPixelC = mode_lib->mp.BytePerPixelC[k];
- s->SurfaceParameters[k].ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
- s->SurfaceParameters[k].VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- s->SurfaceParameters[k].VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- s->SurfaceParameters[k].VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- s->SurfaceParameters[k].VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- s->SurfaceParameters[k].PitchY = display_cfg->plane_descriptors[k].surface.plane0.pitch;
- s->SurfaceParameters[k].PitchC = display_cfg->plane_descriptors[k].surface.plane1.pitch;
- s->SurfaceParameters[k].ViewportStationary = display_cfg->plane_descriptors[k].composition.viewport.stationary;
- s->SurfaceParameters[k].ViewportXStart = display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start;
- s->SurfaceParameters[k].ViewportYStart = display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start;
- s->SurfaceParameters[k].ViewportXStartC = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- s->SurfaceParameters[k].ViewportYStartC = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- s->SurfaceParameters[k].FORCE_ONE_ROW_FOR_FRAME = display_cfg->plane_descriptors[k].overrides.hw.force_one_row_for_frame;
- s->SurfaceParameters[k].SwathHeightY = mode_lib->mp.SwathHeightY[k];
- s->SurfaceParameters[k].SwathHeightC = mode_lib->mp.SwathHeightC[k];
- s->SurfaceParameters[k].DCCMetaPitchY = display_cfg->plane_descriptors[k].surface.dcc.plane0.pitch;
- s->SurfaceParameters[k].DCCMetaPitchC = display_cfg->plane_descriptors[k].surface.dcc.plane1.pitch;
- }
-
- CalculateVMRowAndSwath_params->display_cfg = display_cfg;
- CalculateVMRowAndSwath_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateVMRowAndSwath_params->myPipe = s->SurfaceParameters;
- CalculateVMRowAndSwath_params->SurfaceSizeInMALL = mode_lib->mp.SurfaceSizeInTheMALL;
- CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsLuma = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
- CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsChroma = mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma;
- CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->soc.mall_allocated_for_dcn_mbytes;
- CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->mp.SwathWidthY;
- CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->mp.SwathWidthC;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ip.dcc_meta_buffer_size_bytes;
- CalculateVMRowAndSwath_params->mrq_present = mode_lib->ip.dcn_mrq_present;
-
- // output
- CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
- CalculateVMRowAndSwath_params->dpte_row_width_luma_ub = mode_lib->mp.dpte_row_width_luma_ub;
- CalculateVMRowAndSwath_params->dpte_row_width_chroma_ub = mode_lib->mp.dpte_row_width_chroma_ub;
- CalculateVMRowAndSwath_params->dpte_row_height_luma = mode_lib->mp.dpte_row_height;
- CalculateVMRowAndSwath_params->dpte_row_height_chroma = mode_lib->mp.dpte_row_height_chroma;
- CalculateVMRowAndSwath_params->dpte_row_height_linear_luma = mode_lib->mp.dpte_row_height_linear;
- CalculateVMRowAndSwath_params->dpte_row_height_linear_chroma = mode_lib->mp.dpte_row_height_linear_chroma;
- CalculateVMRowAndSwath_params->vm_group_bytes = mode_lib->mp.vm_group_bytes;
- CalculateVMRowAndSwath_params->dpte_group_bytes = mode_lib->mp.dpte_group_bytes;
- CalculateVMRowAndSwath_params->PixelPTEReqWidthY = mode_lib->mp.PixelPTEReqWidthY;
- CalculateVMRowAndSwath_params->PixelPTEReqHeightY = mode_lib->mp.PixelPTEReqHeightY;
- CalculateVMRowAndSwath_params->PTERequestSizeY = mode_lib->mp.PTERequestSizeY;
- CalculateVMRowAndSwath_params->PixelPTEReqWidthC = mode_lib->mp.PixelPTEReqWidthC;
- CalculateVMRowAndSwath_params->PixelPTEReqHeightC = mode_lib->mp.PixelPTEReqHeightC;
- CalculateVMRowAndSwath_params->PTERequestSizeC = mode_lib->mp.PTERequestSizeC;
- CalculateVMRowAndSwath_params->vmpg_width_y = s->vmpg_width_y;
- CalculateVMRowAndSwath_params->vmpg_height_y = s->vmpg_height_y;
- CalculateVMRowAndSwath_params->vmpg_width_c = s->vmpg_width_c;
- CalculateVMRowAndSwath_params->vmpg_height_c = s->vmpg_height_c;
- CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_l = mode_lib->mp.dpde0_bytes_per_frame_ub_l;
- CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_c = mode_lib->mp.dpde0_bytes_per_frame_ub_c;
- CalculateVMRowAndSwath_params->PrefetchSourceLinesY = mode_lib->mp.PrefetchSourceLinesY;
- CalculateVMRowAndSwath_params->PrefetchSourceLinesC = mode_lib->mp.PrefetchSourceLinesC;
- CalculateVMRowAndSwath_params->VInitPreFillY = mode_lib->mp.VInitPreFillY;
- CalculateVMRowAndSwath_params->VInitPreFillC = mode_lib->mp.VInitPreFillC;
- CalculateVMRowAndSwath_params->MaxNumSwathY = mode_lib->mp.MaxNumSwathY;
- CalculateVMRowAndSwath_params->MaxNumSwathC = mode_lib->mp.MaxNumSwathC;
- CalculateVMRowAndSwath_params->dpte_row_bw = mode_lib->mp.dpte_row_bw;
- CalculateVMRowAndSwath_params->PixelPTEBytesPerRow = mode_lib->mp.PixelPTEBytesPerRow;
- CalculateVMRowAndSwath_params->vm_bytes = mode_lib->mp.vm_bytes;
- CalculateVMRowAndSwath_params->use_one_row_for_frame = mode_lib->mp.use_one_row_for_frame;
- CalculateVMRowAndSwath_params->use_one_row_for_frame_flip = mode_lib->mp.use_one_row_for_frame_flip;
- CalculateVMRowAndSwath_params->is_using_mall_for_ss = mode_lib->mp.is_using_mall_for_ss;
- CalculateVMRowAndSwath_params->PTE_BUFFER_MODE = mode_lib->mp.PTE_BUFFER_MODE;
- CalculateVMRowAndSwath_params->BIGK_FRAGMENT_SIZE = mode_lib->mp.BIGK_FRAGMENT_SIZE;
- CalculateVMRowAndSwath_params->DCCMetaBufferSizeNotExceeded = s->dummy_boolean_array[1];
- CalculateVMRowAndSwath_params->meta_row_bw = mode_lib->mp.meta_row_bw;
- CalculateVMRowAndSwath_params->meta_row_bytes = mode_lib->mp.meta_row_bytes;
- CalculateVMRowAndSwath_params->meta_req_width_luma = mode_lib->mp.meta_req_width;
- CalculateVMRowAndSwath_params->meta_req_height_luma = mode_lib->mp.meta_req_height;
- CalculateVMRowAndSwath_params->meta_row_width_luma = mode_lib->mp.meta_row_width;
- CalculateVMRowAndSwath_params->meta_row_height_luma = mode_lib->mp.meta_row_height;
- CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_l = mode_lib->mp.meta_pte_bytes_per_frame_ub_l;
- CalculateVMRowAndSwath_params->meta_req_width_chroma = mode_lib->mp.meta_req_width_chroma;
- CalculateVMRowAndSwath_params->meta_row_height_chroma = mode_lib->mp.meta_row_height_chroma;
- CalculateVMRowAndSwath_params->meta_row_width_chroma = mode_lib->mp.meta_row_width_chroma;
- CalculateVMRowAndSwath_params->meta_req_height_chroma = mode_lib->mp.meta_req_height_chroma;
- CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_c = mode_lib->mp.meta_pte_bytes_per_frame_ub_c;
-
- CalculateVMRowAndSwath(&mode_lib->scratch, CalculateVMRowAndSwath_params);
-
- memset(calculate_mcache_setting_params, 0, sizeof(struct dml2_core_calcs_calculate_mcache_setting_params));
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0 || mode_lib->ip.dcn_mrq_present) {
- for (k = 0; k < s->num_active_planes; k++) {
- mode_lib->mp.mall_prefetch_sdp_overhead_factor[k] = 1.0;
- mode_lib->mp.mall_prefetch_dram_overhead_factor[k] = 1.0;
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p0[k] = 1.0;
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p0[k] = 1.0;
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p1[k] = 1.0;
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p1[k] = 1.0;
- }
- } else {
- for (k = 0; k < s->num_active_planes; k++) {
- calculate_mcache_setting_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- calculate_mcache_setting_params->num_chans = mode_lib->soc.clk_table.dram_config.channel_count;
- calculate_mcache_setting_params->mem_word_bytes = mode_lib->soc.mem_word_bytes;
- calculate_mcache_setting_params->mcache_size_bytes = mode_lib->soc.mcache_size_bytes;
- calculate_mcache_setting_params->mcache_line_size_bytes = mode_lib->soc.mcache_line_size_bytes;
- calculate_mcache_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_mcache_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
-
- calculate_mcache_setting_params->source_format = display_cfg->plane_descriptors[k].pixel_format;
- calculate_mcache_setting_params->surf_vert = dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle);
- calculate_mcache_setting_params->vp_stationary = display_cfg->plane_descriptors[k].composition.viewport.stationary;
- calculate_mcache_setting_params->tiling_mode = display_cfg->plane_descriptors[k].surface.tiling;
- calculate_mcache_setting_params->imall_enable = mode_lib->ip.imall_supported && display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall;
-
- calculate_mcache_setting_params->vp_start_x_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start;
- calculate_mcache_setting_params->vp_start_y_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start;
- calculate_mcache_setting_params->full_vp_width_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.width;
- calculate_mcache_setting_params->full_vp_height_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- calculate_mcache_setting_params->blk_width_l = mode_lib->mp.MacroTileWidthY[k];
- calculate_mcache_setting_params->blk_height_l = mode_lib->mp.MacroTileHeightY[k];
- calculate_mcache_setting_params->vmpg_width_l = s->vmpg_width_y[k];
- calculate_mcache_setting_params->vmpg_height_l = s->vmpg_height_y[k];
- calculate_mcache_setting_params->full_swath_bytes_l = s->full_swath_bytes_l[k];
- calculate_mcache_setting_params->bytes_per_pixel_l = mode_lib->mp.BytePerPixelY[k];
-
- calculate_mcache_setting_params->vp_start_x_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- calculate_mcache_setting_params->vp_start_y_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- calculate_mcache_setting_params->full_vp_width_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.width;
- calculate_mcache_setting_params->full_vp_height_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- calculate_mcache_setting_params->blk_width_c = mode_lib->mp.MacroTileWidthC[k];
- calculate_mcache_setting_params->blk_height_c = mode_lib->mp.MacroTileHeightC[k];
- calculate_mcache_setting_params->vmpg_width_c = s->vmpg_width_c[k];
- calculate_mcache_setting_params->vmpg_height_c = s->vmpg_height_c[k];
- calculate_mcache_setting_params->full_swath_bytes_c = s->full_swath_bytes_c[k];
- calculate_mcache_setting_params->bytes_per_pixel_c = mode_lib->mp.BytePerPixelC[k];
-
- // output
- calculate_mcache_setting_params->dcc_dram_bw_nom_overhead_factor_l = &mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p0[k];
- calculate_mcache_setting_params->dcc_dram_bw_pref_overhead_factor_l = &mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p0[k];
- calculate_mcache_setting_params->dcc_dram_bw_nom_overhead_factor_c = &mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p1[k];
- calculate_mcache_setting_params->dcc_dram_bw_pref_overhead_factor_c = &mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p1[k];
-
- calculate_mcache_setting_params->num_mcaches_l = &mode_lib->mp.num_mcaches_l[k];
- calculate_mcache_setting_params->mcache_row_bytes_l = &mode_lib->mp.mcache_row_bytes_l[k];
- calculate_mcache_setting_params->mcache_offsets_l = mode_lib->mp.mcache_offsets_l[k];
- calculate_mcache_setting_params->mcache_shift_granularity_l = &mode_lib->mp.mcache_shift_granularity_l[k];
-
- calculate_mcache_setting_params->num_mcaches_c = &mode_lib->mp.num_mcaches_c[k];
- calculate_mcache_setting_params->mcache_row_bytes_c = &mode_lib->mp.mcache_row_bytes_c[k];
- calculate_mcache_setting_params->mcache_offsets_c = mode_lib->mp.mcache_offsets_c[k];
- calculate_mcache_setting_params->mcache_shift_granularity_c = &mode_lib->mp.mcache_shift_granularity_c[k];
-
- calculate_mcache_setting_params->mall_comb_mcache_l = &mode_lib->mp.mall_comb_mcache_l[k];
- calculate_mcache_setting_params->mall_comb_mcache_c = &mode_lib->mp.mall_comb_mcache_c[k];
- calculate_mcache_setting_params->lc_comb_mcache = &mode_lib->mp.lc_comb_mcache[k];
- calculate_mcache_setting(&mode_lib->scratch, calculate_mcache_setting_params);
- }
-
- calculate_mall_bw_overhead_factor(
- mode_lib->mp.mall_prefetch_sdp_overhead_factor,
- mode_lib->mp.mall_prefetch_dram_overhead_factor,
-
- // input
- display_cfg,
- s->num_active_planes);
- }
-
- // Calculate all the bandwidth availabe
- calculate_bandwidth_available(
- mode_lib->mp.avg_bandwidth_available_min,
- mode_lib->mp.avg_bandwidth_available,
- mode_lib->mp.urg_bandwidth_available_min,
- mode_lib->mp.urg_bandwidth_available,
- mode_lib->mp.urg_bandwidth_available_vm_only,
- mode_lib->mp.urg_bandwidth_available_pixel_and_vm,
-
- &mode_lib->soc,
- display_cfg->hostvm_enable,
- mode_lib->mp.Dcfclk,
- mode_lib->mp.FabricClock,
- mode_lib->mp.dram_bw_mbps);
-
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->mp.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- s->TotalDCCActiveDPP = 0;
- s->TotalActiveDPP = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- s->TotalActiveDPP = s->TotalActiveDPP + mode_lib->mp.NoOfDPP[k];
- if (display_cfg->plane_descriptors[k].surface.dcc.enable)
- s->TotalDCCActiveDPP = s->TotalDCCActiveDPP + mode_lib->mp.NoOfDPP[k];
- }
- // Calculate tdlut schedule related terms
- for (k = 0; k <= s->num_active_planes - 1; k++) {
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->mp.Dispclk;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->mp.Dcfclk,
- mode_lib->mp.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- mode_lib->mp.urg_bandwidth_available_min[dml2_core_internal_soc_state_sys_active],
- s->num_active_planes,
- mode_lib->mp.NoOfDPP,
- mode_lib->mp.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->mp.request_size_bytes_luma,
- mode_lib->mp.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->mp.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->mp.ExtraLatency,
- &mode_lib->mp.ExtraLatency_sr,
- &mode_lib->mp.ExtraLatencyPrefetch);
-
- mode_lib->mp.TCalc = 24.0 / mode_lib->mp.DCFCLKDeepSleep;
-
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->mp.WritebackDelay[k] =
- mode_lib->soc.qos_parameters.writeback.base_latency_us
- + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->mp.Dispclk;
- } else
- mode_lib->mp.WritebackDelay[k] = 0;
-
- for (j = 0; j < s->num_active_planes; ++j) {
- if (display_cfg->plane_descriptors[j].stream_index == k
- && display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.enable == true) {
- mode_lib->mp.WritebackDelay[k] =
- math_max2(
- mode_lib->mp.WritebackDelay[k],
- mode_lib->soc.qos_parameters.writeback.base_latency_us
- + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.input_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->mp.Dispclk);
- }
- }
- }
- }
-
- for (k = 0; k < s->num_active_planes; ++k)
- for (j = 0; j < s->num_active_planes; ++j)
- if (display_cfg->plane_descriptors[k].stream_index == j)
- mode_lib->mp.WritebackDelay[k] = mode_lib->mp.WritebackDelay[j];
-
- mode_lib->mp.UrgentLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
- mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
- mode_lib->mp.FabricClock,
- mode_lib->mp.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- mode_lib->mp.TripToMemory = CalculateTripToMemory(
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.FabricClock,
- mode_lib->mp.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- mode_lib->mp.TripToMemory = math_max2(mode_lib->mp.UrgentLatency, mode_lib->mp.TripToMemory);
-
- mode_lib->mp.MetaTripToMemory = CalculateMetaTripToMemory(
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.FabricClock,
- mode_lib->mp.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.meta_trip_adder_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- calculate_cursor_req_attributes(
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- display_cfg->plane_descriptors[k].cursor.cursor_bpp,
-
- // output
- &s->cursor_lines_per_chunk[k],
- &s->cursor_bytes_per_line[k],
- &s->cursor_bytes_per_chunk[k],
- &s->cursor_bytes[k]);
-
- bool cursor_not_enough_urgent_latency_hiding = 0;
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
- calculate_cursor_urgent_burst_factor(
- mode_lib->ip.cursor_buffer_size,
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- s->cursor_bytes_per_chunk[k],
- s->cursor_lines_per_chunk[k],
- line_time_us,
- mode_lib->mp.UrgentLatency,
-
- // output
- &mode_lib->mp.UrgentBurstFactorCursor[k],
- &cursor_not_enough_urgent_latency_hiding);
- mode_lib->mp.UrgentBurstFactorCursorPre[k] = mode_lib->mp.UrgentBurstFactorCursor[k];
-
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->mp.swath_width_luma_ub[k],
- mode_lib->mp.swath_width_chroma_ub[k],
- mode_lib->mp.SwathHeightY[k],
- mode_lib->mp.SwathHeightC[k],
- line_time_us,
- mode_lib->mp.UrgentLatency,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->mp.BytePerPixelInDETY[k],
- mode_lib->mp.BytePerPixelInDETC[k],
- mode_lib->mp.DETBufferSizeY[k],
- mode_lib->mp.DETBufferSizeC[k],
-
- /* output */
- &mode_lib->mp.UrgentBurstFactorLuma[k],
- &mode_lib->mp.UrgentBurstFactorChroma[k],
- &mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
-
- mode_lib->mp.NotEnoughUrgentLatencyHiding[k] = mode_lib->mp.NotEnoughUrgentLatencyHiding[k] || cursor_not_enough_urgent_latency_hiding;
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- s->MaxVStartupLines[k] = CalculateMaxVStartup(
- mode_lib->ip.ptoi_supported,
- mode_lib->ip.vblank_nom_default_us,
- &display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing,
- mode_lib->mp.WritebackDelay[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
-#endif
- }
-
- s->immediate_flip_required = false;
- for (k = 0; k < s->num_active_planes; ++k) {
- s->immediate_flip_required = s->immediate_flip_required || display_cfg->plane_descriptors[k].immediate_flip;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
-#endif
-
- {
- s->DestinationLineTimesForPrefetchLessThan2 = false;
- s->VRatioPrefetchMoreThanMax = false;
-
- dml2_printf("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
-
- mode_lib->mp.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.TripToMemory);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- myPipe->Dppclk = mode_lib->mp.Dppclk[k];
- myPipe->Dispclk = mode_lib->mp.Dispclk;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->mp.DCFCLKDeepSleep;
- myPipe->DPPPerSurface = mode_lib->mp.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->mp.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->mp.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->mp.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->mp.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->mp.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->mp.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->mp.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->mp.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaxVStartupLines[k];
- CalculatePrefetchSchedule_params->MaxVStartup = s->MaxVStartupLines[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->mp.UrgentLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->mp.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->mp.TCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->mp.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->mp.PixelPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->mp.PrefetchSourceLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->mp.VInitPreFillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->mp.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->mp.PrefetchSourceLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->mp.VInitPreFillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->mp.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->mp.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->mp.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->mp.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->mp.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->mp.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->mp.TripToMemory;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->mp.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &mode_lib->mp.DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &mode_lib->mp.DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->mp.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->mp.dst_y_per_vm_vblank[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->mp.dst_y_per_row_vblank[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->mp.VRatioPrefetchY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->mp.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &mode_lib->mp.Tdmdl_vm[k];
- CalculatePrefetchSchedule_params->Tdmdl = &mode_lib->mp.Tdmdl[k];
- CalculatePrefetchSchedule_params->TSetup = &mode_lib->mp.TSetup[k];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &mode_lib->mp.VUpdateOffsetPix[k];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &mode_lib->mp.VUpdateWidthPix[k];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &mode_lib->mp.VReadyOffsetPix[k];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->mp.prefetch_cursor_bw[k];
-
- mode_lib->mp.NoTimeToPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
-#endif
- mode_lib->mp.VStartupMin[k] = s->MaxVStartupLines[k];
- } // for k
-
- mode_lib->mp.PrefetchModeSupported = true;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (mode_lib->mp.NoTimeToPrefetch[k] == true ||
- mode_lib->mp.NotEnoughTimeForDynamicMetadata[k] ||
- mode_lib->mp.DSTYAfterScaler[k] > 8) {
- dml2_printf("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
- dml2_printf("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
- dml2_printf("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
- mode_lib->mp.PrefetchModeSupported = false;
- }
- if (mode_lib->mp.dst_y_prefetch[k] < 2)
- s->DestinationLineTimesForPrefetchLessThan2 = true;
-
- if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ ||
- mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__)
- s->VRatioPrefetchMoreThanMax = true;
-
- if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
- mode_lib->mp.PrefetchModeSupported = false;
- }
- }
-
- if (s->VRatioPrefetchMoreThanMax == true || s->DestinationLineTimesForPrefetchLessThan2 == true) {
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
- dml2_printf("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
- mode_lib->mp.PrefetchModeSupported = false;
- }
-
- dml2_printf("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
- mode_lib->mp.PrefetchModeSupported ? "" : "NOT ", CalculatePrefetchSchedule_params->VStartup);
-
- // Prefetch schedule OK, now check prefetch bw
- if (mode_lib->mp.PrefetchModeSupported == true) {
- for (k = 0; k < s->num_active_planes; ++k) {
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->mp.swath_width_luma_ub[k],
- mode_lib->mp.swath_width_chroma_ub[k],
- mode_lib->mp.SwathHeightY[k],
- mode_lib->mp.SwathHeightC[k],
- line_time_us,
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.VRatioPrefetchY[k],
- mode_lib->mp.VRatioPrefetchC[k],
- mode_lib->mp.BytePerPixelInDETY[k],
- mode_lib->mp.BytePerPixelInDETC[k],
- mode_lib->mp.DETBufferSizeY[k],
- mode_lib->mp.DETBufferSizeC[k],
- /* Output */
- &mode_lib->mp.UrgentBurstFactorLumaPre[k],
- &mode_lib->mp.UrgentBurstFactorChromaPre[k],
- &mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
-
- dml2_printf("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
-
- dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceLuma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceChroma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
- dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
- dml2_printf("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
-#endif
- }
-
- for (k = 0; k <= s->num_active_planes - 1; k++)
- mode_lib->mp.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- mode_lib->mp.urg_vactive_bandwidth_required,
- mode_lib->mp.urg_bandwidth_required,
- mode_lib->mp.non_urg_bandwidth_required,
-
- // Input
- display_cfg,
- 0, // inc_flip_bw
- s->num_active_planes,
- mode_lib->mp.NoOfDPP,
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p0,
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p1,
- mode_lib->mp.mall_prefetch_sdp_overhead_factor,
- mode_lib->mp.mall_prefetch_dram_overhead_factor,
- mode_lib->mp.SurfaceReadBandwidthLuma,
- mode_lib->mp.SurfaceReadBandwidthChroma,
- mode_lib->mp.RequiredPrefetchPixelDataBWLuma,
- mode_lib->mp.RequiredPrefetchPixelDataBWChroma,
- mode_lib->mp.cursor_bw,
- mode_lib->mp.dpte_row_bw,
- mode_lib->mp.meta_row_bw,
- mode_lib->mp.prefetch_cursor_bw,
- mode_lib->mp.prefetch_vmrow_bw,
- mode_lib->mp.final_flip_bw,
- mode_lib->mp.UrgentBurstFactorLuma,
- mode_lib->mp.UrgentBurstFactorChroma,
- mode_lib->mp.UrgentBurstFactorCursor,
- mode_lib->mp.UrgentBurstFactorLumaPre,
- mode_lib->mp.UrgentBurstFactorChromaPre,
- mode_lib->mp.UrgentBurstFactorCursorPre);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &mode_lib->mp.FractionOfUrgentBandwidth, // double* frac_urg_bandwidth
- &mode_lib->mp.FractionOfUrgentBandwidthMALL, // double* frac_urg_bandwidth_mall
- &s->dummy_boolean[1], // vactive bw ok
- &mode_lib->mp.PrefetchModeSupported, // prefetch bw ok
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->mp.non_urg_bandwidth_required,
- mode_lib->mp.urg_vactive_bandwidth_required,
- mode_lib->mp.urg_bandwidth_required,
- mode_lib->mp.urg_bandwidth_available);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- if (mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
- mode_lib->mp.PrefetchModeSupported = false;
- }
- }
- } // prefetch schedule ok
-
- // Prefetch schedule and prefetch bw ok, now check flip bw
- if (mode_lib->mp.PrefetchModeSupported == true) { // prefetch schedule and prefetch bw ok, now check flip bw
-
- mode_lib->mp.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(dml2_core_internal_soc_state_sys_active,
- mode_lib->mp.urg_bandwidth_required, // no flip
- mode_lib->mp.urg_bandwidth_available);
- mode_lib->mp.TotImmediateFlipBytes = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->mp.vm_bytes[k],
- mode_lib->mp.PixelPTEBytesPerRow[k],
- mode_lib->mp.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->mp.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->mp.NoOfDPP[k];
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k = %u\n", __func__, k);
- dml2_printf("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
-#endif
- }
- for (k = 0; k < s->num_active_planes; ++k) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 0, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->mp.vm_bytes[k],
- mode_lib->mp.PixelPTEBytesPerRow[k],
- mode_lib->mp.BandwidthAvailableForImmediateFlip,
- mode_lib->mp.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->mp.Tno_bw[k],
- mode_lib->mp.dpte_row_height[k],
- mode_lib->mp.dpte_row_height_chroma[k],
- mode_lib->mp.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- s->per_pipe_flip_bytes[k],
- mode_lib->mp.meta_row_bytes[k],
- mode_lib->mp.meta_row_height[k],
- mode_lib->mp.meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- // Output
- &mode_lib->mp.dst_y_per_vm_flip[k],
- &mode_lib->mp.dst_y_per_row_flip[k],
- &mode_lib->mp.final_flip_bw[k],
- &mode_lib->mp.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- s->dummy_bw,
- mode_lib->mp.urg_bandwidth_required_flip,
- mode_lib->mp.non_urg_bandwidth_required_flip,
-
- // Input
- display_cfg,
- 1, // inc_flip_bw
- s->num_active_planes,
- mode_lib->mp.NoOfDPP,
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p0,
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p1,
- mode_lib->mp.mall_prefetch_sdp_overhead_factor,
- mode_lib->mp.mall_prefetch_dram_overhead_factor,
- mode_lib->mp.SurfaceReadBandwidthLuma,
- mode_lib->mp.SurfaceReadBandwidthChroma,
- mode_lib->mp.RequiredPrefetchPixelDataBWLuma,
- mode_lib->mp.RequiredPrefetchPixelDataBWChroma,
- mode_lib->mp.cursor_bw,
- mode_lib->mp.dpte_row_bw,
- mode_lib->mp.meta_row_bw,
- mode_lib->mp.prefetch_cursor_bw,
- mode_lib->mp.prefetch_vmrow_bw,
- mode_lib->mp.final_flip_bw,
- mode_lib->mp.UrgentBurstFactorLuma,
- mode_lib->mp.UrgentBurstFactorChroma,
- mode_lib->mp.UrgentBurstFactorCursor,
- mode_lib->mp.UrgentBurstFactorLumaPre,
- mode_lib->mp.UrgentBurstFactorChromaPre,
- mode_lib->mp.UrgentBurstFactorCursorPre);
-
- calculate_immediate_flip_bandwidth_support(
- &mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip, // double* frac_urg_bandwidth_flip
- &mode_lib->mp.ImmediateFlipSupported, // bool* flip_bandwidth_support_ok
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->mp.urg_bandwidth_required_flip,
- mode_lib->mp.non_urg_bandwidth_required_flip,
- mode_lib->mp.urg_bandwidth_available);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].immediate_flip && mode_lib->mp.ImmediateFlipSupportedForPipe[k] == false) {
- mode_lib->mp.ImmediateFlipSupported = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
-#endif
- }
- }
- } else { // flip or prefetch not support
- mode_lib->mp.ImmediateFlipSupported = false;
- }
-
- // consider flip support is okay if the flip bw is ok or (when user does't require a iflip and there is no host vm)
- bool must_support_iflip = display_cfg->hostvm_enable || s->immediate_flip_required;
- mode_lib->mp.PrefetchAndImmediateFlipSupported = (mode_lib->mp.PrefetchModeSupported == true && (!must_support_iflip || mode_lib->mp.ImmediateFlipSupported));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
- for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
- dml2_printf("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
- dml2_printf("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
-#endif
- dml2_printf("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
- }
-
- for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
-
- if (!mode_lib->mp.PrefetchAndImmediateFlipSupported) {
- dml2_printf("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
- } else {
- dml2_printf("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
-
- // DCC Configuration
- for (k = 0; k < s->num_active_planes; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
-#endif
- CalculateDCCConfiguration(
- display_cfg->plane_descriptors[k].surface.dcc.enable,
- display_cfg->overrides.dcc_programming_assumes_scan_direction_unknown,
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].surface.plane0.width,
- display_cfg->plane_descriptors[k].surface.plane1.width,
- display_cfg->plane_descriptors[k].surface.plane0.height,
- display_cfg->plane_descriptors[k].surface.plane1.height,
- s->NomDETInKByte,
- mode_lib->mp.Read256BlockHeightY[k],
- mode_lib->mp.Read256BlockHeightC[k],
- display_cfg->plane_descriptors[k].surface.tiling,
- mode_lib->mp.BytePerPixelY[k],
- mode_lib->mp.BytePerPixelC[k],
- mode_lib->mp.BytePerPixelInDETY[k],
- mode_lib->mp.BytePerPixelInDETC[k],
- display_cfg->plane_descriptors[k].composition.rotation_angle,
-
- /* Output */
- &mode_lib->mp.RequestLuma[k],
- &mode_lib->mp.RequestChroma[k],
- &mode_lib->mp.DCCYMaxUncompressedBlock[k],
- &mode_lib->mp.DCCCMaxUncompressedBlock[k],
- &mode_lib->mp.DCCYMaxCompressedBlock[k],
- &mode_lib->mp.DCCCMaxCompressedBlock[k],
- &mode_lib->mp.DCCYIndependentBlock[k],
- &mode_lib->mp.DCCCIndependentBlock[k]);
- }
-
- //Watermarks and NB P-State/DRAM Clock Change Support
- s->mmSOCParameters.UrgentLatency = mode_lib->mp.UrgentLatency;
- s->mmSOCParameters.ExtraLatency = mode_lib->mp.ExtraLatency;
- s->mmSOCParameters.ExtraLatency_sr = mode_lib->mp.ExtraLatency_sr;
- s->mmSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mmSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mmSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mmSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mmSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mmSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mmSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mmSOCParameters.USRRetrainingLatency = 0; //0; //FIXME_STAGE2
- s->mmSOCParameters.SMNLatency = 0; //mode_lib->soc.smn_latency_us; //FIXME_STAGE2
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false/*FIXME_STAGE2 was: mode_lib->ms.policy.USRRetrainingRequired, no new dml2 replacement*/;
- CalculateWatermarks_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->mp.Dcfclk;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->mp.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mmSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = s->SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->mp.DCFCLKDeepSleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->mp.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->mp.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->mp.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->mp.SwathHeightC;
- //CalculateWatermarks_params->LBBitPerPixel = 57; //FIXME_STAGE2
- CalculateWatermarks_params->SwathWidthY = mode_lib->mp.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->mp.SwathWidthC;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->mp.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->mp.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = mode_lib->mp.DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = mode_lib->mp.DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->mp.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->mp.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = mode_lib->mp.meta_row_height;
- CalculateWatermarks_params->meta_row_height_c = mode_lib->mp.meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &mode_lib->mp.Watermark;
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->mp.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->mp.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = mode_lib->mp.MaxActiveDRAMClockChangeLatencySupported;
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->mp.SubViewportLinesNeededInMALL;
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->mp.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->mp.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &mode_lib->mp.MaxActiveFCLKChangeLatencySupported;
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->mp.USRRetrainingSupport;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = 0;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) - mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
- mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) - mode_lib->mp.Watermark.WritebackFCLKChangeWatermark);
- } else {
- mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k] = 0;
- mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k] = 0;
- }
- }
-
- dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
- dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
-
- //Display Pipeline Delivery Time in Prefetch, Groups
- CalculatePixelDeliveryTimes(
- display_cfg,
- cfg_support_info,
- s->num_active_planes,
- mode_lib->mp.VRatioPrefetchY,
- mode_lib->mp.VRatioPrefetchC,
- mode_lib->mp.swath_width_luma_ub,
- mode_lib->mp.swath_width_chroma_ub,
- mode_lib->mp.PSCL_THROUGHPUT,
- mode_lib->mp.PSCL_THROUGHPUT_CHROMA,
- mode_lib->mp.Dppclk,
- mode_lib->mp.BytePerPixelC,
- mode_lib->mp.req_per_swath_ub_l,
- mode_lib->mp.req_per_swath_ub_c,
-
- /* Output */
- mode_lib->mp.DisplayPipeLineDeliveryTimeLuma,
- mode_lib->mp.DisplayPipeLineDeliveryTimeChroma,
- mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch,
- mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch,
- mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma,
- mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma,
- mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch,
- mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch);
-
- CalculateMetaAndPTETimes_params->scratch = &mode_lib->scratch;
- CalculateMetaAndPTETimes_params->display_cfg = display_cfg;
- CalculateMetaAndPTETimes_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateMetaAndPTETimes_params->use_one_row_for_frame = mode_lib->mp.use_one_row_for_frame;
- CalculateMetaAndPTETimes_params->dst_y_per_row_vblank = mode_lib->mp.dst_y_per_row_vblank;
- CalculateMetaAndPTETimes_params->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip;
- CalculateMetaAndPTETimes_params->BytePerPixelY = mode_lib->mp.BytePerPixelY;
- CalculateMetaAndPTETimes_params->BytePerPixelC = mode_lib->mp.BytePerPixelC;
- CalculateMetaAndPTETimes_params->dpte_row_height = mode_lib->mp.dpte_row_height;
- CalculateMetaAndPTETimes_params->dpte_row_height_chroma = mode_lib->mp.dpte_row_height_chroma;
- CalculateMetaAndPTETimes_params->dpte_group_bytes = mode_lib->mp.dpte_group_bytes;
- CalculateMetaAndPTETimes_params->PTERequestSizeY = mode_lib->mp.PTERequestSizeY;
- CalculateMetaAndPTETimes_params->PTERequestSizeC = mode_lib->mp.PTERequestSizeC;
- CalculateMetaAndPTETimes_params->PixelPTEReqWidthY = mode_lib->mp.PixelPTEReqWidthY;
- CalculateMetaAndPTETimes_params->PixelPTEReqHeightY = mode_lib->mp.PixelPTEReqHeightY;
- CalculateMetaAndPTETimes_params->PixelPTEReqWidthC = mode_lib->mp.PixelPTEReqWidthC;
- CalculateMetaAndPTETimes_params->PixelPTEReqHeightC = mode_lib->mp.PixelPTEReqHeightC;
- CalculateMetaAndPTETimes_params->dpte_row_width_luma_ub = mode_lib->mp.dpte_row_width_luma_ub;
- CalculateMetaAndPTETimes_params->dpte_row_width_chroma_ub = mode_lib->mp.dpte_row_width_chroma_ub;
- CalculateMetaAndPTETimes_params->tdlut_groups_per_2row_ub = s->tdlut_groups_per_2row_ub;
- CalculateMetaAndPTETimes_params->mrq_present = mode_lib->ip.dcn_mrq_present;
-
- CalculateMetaAndPTETimes_params->MetaChunkSize = mode_lib->ip.meta_chunk_size_kbytes;
- CalculateMetaAndPTETimes_params->MinMetaChunkSizeBytes = mode_lib->ip.min_meta_chunk_size_bytes;
- CalculateMetaAndPTETimes_params->meta_row_width = mode_lib->mp.meta_row_width;
- CalculateMetaAndPTETimes_params->meta_row_width_chroma = mode_lib->mp.meta_row_width_chroma;
- CalculateMetaAndPTETimes_params->meta_row_height = mode_lib->mp.meta_row_height;
- CalculateMetaAndPTETimes_params->meta_row_height_chroma = mode_lib->mp.meta_row_height_chroma;
- CalculateMetaAndPTETimes_params->meta_req_width = mode_lib->mp.meta_req_width;
- CalculateMetaAndPTETimes_params->meta_req_width_chroma = mode_lib->mp.meta_req_width_chroma;
- CalculateMetaAndPTETimes_params->meta_req_height = mode_lib->mp.meta_req_height;
- CalculateMetaAndPTETimes_params->meta_req_height_chroma = mode_lib->mp.meta_req_height_chroma;
-
- CalculateMetaAndPTETimes_params->time_per_tdlut_group = mode_lib->mp.time_per_tdlut_group;
- CalculateMetaAndPTETimes_params->DST_Y_PER_PTE_ROW_NOM_L = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_L;
- CalculateMetaAndPTETimes_params->DST_Y_PER_PTE_ROW_NOM_C = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_C;
- CalculateMetaAndPTETimes_params->time_per_pte_group_nom_luma = mode_lib->mp.time_per_pte_group_nom_luma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_vblank_luma = mode_lib->mp.time_per_pte_group_vblank_luma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_flip_luma = mode_lib->mp.time_per_pte_group_flip_luma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_nom_chroma = mode_lib->mp.time_per_pte_group_nom_chroma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_vblank_chroma = mode_lib->mp.time_per_pte_group_vblank_chroma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_flip_chroma = mode_lib->mp.time_per_pte_group_flip_chroma;
- CalculateMetaAndPTETimes_params->DST_Y_PER_META_ROW_NOM_L = mode_lib->mp.DST_Y_PER_META_ROW_NOM_L;
- CalculateMetaAndPTETimes_params->DST_Y_PER_META_ROW_NOM_C = mode_lib->mp.DST_Y_PER_META_ROW_NOM_C;
- CalculateMetaAndPTETimes_params->TimePerMetaChunkNominal = mode_lib->mp.TimePerMetaChunkNominal;
- CalculateMetaAndPTETimes_params->TimePerChromaMetaChunkNominal = mode_lib->mp.TimePerChromaMetaChunkNominal;
- CalculateMetaAndPTETimes_params->TimePerMetaChunkVBlank = mode_lib->mp.TimePerMetaChunkVBlank;
- CalculateMetaAndPTETimes_params->TimePerChromaMetaChunkVBlank = mode_lib->mp.TimePerChromaMetaChunkVBlank;
- CalculateMetaAndPTETimes_params->TimePerMetaChunkFlip = mode_lib->mp.TimePerMetaChunkFlip;
- CalculateMetaAndPTETimes_params->TimePerChromaMetaChunkFlip = mode_lib->mp.TimePerChromaMetaChunkFlip;
-
- CalculateMetaAndPTETimes(CalculateMetaAndPTETimes_params);
-
- CalculateVMGroupAndRequestTimes(
- display_cfg,
- s->num_active_planes,
- mode_lib->mp.BytePerPixelC,
- mode_lib->mp.dst_y_per_vm_vblank,
- mode_lib->mp.dst_y_per_vm_flip,
- mode_lib->mp.dpte_row_width_luma_ub,
- mode_lib->mp.dpte_row_width_chroma_ub,
- mode_lib->mp.vm_group_bytes,
- mode_lib->mp.dpde0_bytes_per_frame_ub_l,
- mode_lib->mp.dpde0_bytes_per_frame_ub_c,
- s->tdlut_pte_bytes_per_frame,
- mode_lib->mp.meta_pte_bytes_per_frame_ub_l,
- mode_lib->mp.meta_pte_bytes_per_frame_ub_c,
- mode_lib->ip.dcn_mrq_present,
-
- /* Output */
- mode_lib->mp.TimePerVMGroupVBlank,
- mode_lib->mp.TimePerVMGroupFlip,
- mode_lib->mp.TimePerVMRequestVBlank,
- mode_lib->mp.TimePerVMRequestFlip);
-
- // VStartup Adjustment
- for (k = 0; k < s->num_active_planes; ++k) {
-
- mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TWait[k] + mode_lib->mp.ExtraLatency;
- if (!display_cfg->plane_descriptors[k].dynamic_meta_data.enable)
- mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TCalc + mode_lib->mp.MinTTUVBlank[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
-#endif
- s->Tvstartup_margin = (s->MaxVStartupLines[k] - mode_lib->mp.VStartupMin[k]) * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.MinTTUVBlank[k] + s->Tvstartup_margin;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
- dml2_printf("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
-#endif
-
- mode_lib->mp.Tdmdl[k] = mode_lib->mp.Tdmdl[k] + s->Tvstartup_margin;
- if (display_cfg->plane_descriptors[k].dynamic_meta_data.enable && mode_lib->ip.dynamic_metadata_vm_enabled) {
- mode_lib->mp.Tdmdl_vm[k] = mode_lib->mp.Tdmdl_vm[k] + s->Tvstartup_margin;
- }
-
- bool isInterlaceTiming = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced && !mode_lib->ip.ptoi_supported);
-
- // The actual positioning of the vstartup
- mode_lib->mp.VStartup[k] = (isInterlaceTiming ? (2 * s->MaxVStartupLines[k]) : s->MaxVStartupLines[k]);
-
- s->dlg_vblank_start = ((isInterlaceTiming ? math_floor2((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch) / 2.0, 1.0) :
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total) - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- s->LSetup = math_floor2(4.0 * mode_lib->mp.TSetup[k] / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)), 1.0) / 4.0;
- s->blank_lines_remaining = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active) - mode_lib->mp.VStartup[k];
-
- if (s->blank_lines_remaining < 0) {
- dml2_printf("ERROR: Vstartup is larger than vblank!?\n");
- s->blank_lines_remaining = 0;
- DML2_ASSERT(0);
- }
- mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
-
- // debug only
- if (((mode_lib->mp.VUpdateOffsetPix[k] + mode_lib->mp.VUpdateWidthPix[k] + (double) mode_lib->mp.VReadyOffsetPix[k]) / display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) <=
- (isInterlaceTiming ?
- math_floor2((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch - mode_lib->mp.VStartup[k]) / 2.0, 1.0) :
- (int)(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch - mode_lib->mp.VStartup[k]))) {
- mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = true;
- } else {
- mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = false;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
- dml2_printf("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
- dml2_printf("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
- dml2_printf("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, HTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
- dml2_printf("DML::%s: k=%u, VTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- dml2_printf("DML::%s: k=%u, VActive = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
- dml2_printf("DML::%s: k=%u, VFrontPorch = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- dml2_printf("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
- dml2_printf("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
- dml2_printf("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
-#endif
- }
-
- //Maximum Bandwidth Used
- s->TotalWRBandwidth = 0;
- s->WRBandwidth = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_32) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8;
- }
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
- }
-
- mode_lib->mp.TotalDataReadBandwidth = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.SurfaceReadBandwidthLuma[k] + mode_lib->mp.SurfaceReadBandwidthChroma[k];
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
-#endif
- }
-
- CalculateStutterEfficiency_params->display_cfg = display_cfg;
- CalculateStutterEfficiency_params->CompressedBufferSizeInkByte = mode_lib->mp.CompressedBufferSizeInkByte;
- CalculateStutterEfficiency_params->UnboundedRequestEnabled = mode_lib->mp.UnboundedRequestEnabled;
- CalculateStutterEfficiency_params->MetaFIFOSizeInKEntries = mode_lib->ip.meta_fifo_size_in_kentries;
- CalculateStutterEfficiency_params->ZeroSizeBufferEntries = mode_lib->ip.zero_size_buffer_entries;
- CalculateStutterEfficiency_params->PixelChunkSizeInKByte = mode_lib->ip.pixel_chunk_size_kbytes;
- CalculateStutterEfficiency_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateStutterEfficiency_params->ROBBufferSizeInKByte = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateStutterEfficiency_params->TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth;
- CalculateStutterEfficiency_params->DCFCLK = mode_lib->mp.Dcfclk;
- CalculateStutterEfficiency_params->ReturnBW = mode_lib->mp.urg_bandwidth_available_min[dml2_core_internal_soc_state_sys_active];
- CalculateStutterEfficiency_params->CompbufReservedSpace64B = mode_lib->mp.compbuf_reserved_space_64b;
- CalculateStutterEfficiency_params->CompbufReservedSpaceZs = mode_lib->ip.compbuf_reserved_space_zs;
- CalculateStutterEfficiency_params->SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- CalculateStutterEfficiency_params->SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- CalculateStutterEfficiency_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateStutterEfficiency_params->StutterEnterPlusExitWatermark = mode_lib->mp.Watermark.StutterEnterPlusExitWatermark;
- CalculateStutterEfficiency_params->Z8StutterEnterPlusExitWatermark = mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark;
- CalculateStutterEfficiency_params->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
- CalculateStutterEfficiency_params->MinTTUVBlank = mode_lib->mp.MinTTUVBlank;
- CalculateStutterEfficiency_params->DPPPerSurface = mode_lib->mp.NoOfDPP;
- CalculateStutterEfficiency_params->DETBufferSizeY = mode_lib->mp.DETBufferSizeY;
- CalculateStutterEfficiency_params->BytePerPixelY = mode_lib->mp.BytePerPixelY;
- CalculateStutterEfficiency_params->BytePerPixelDETY = mode_lib->mp.BytePerPixelInDETY;
- CalculateStutterEfficiency_params->SwathWidthY = mode_lib->mp.SwathWidthY;
- CalculateStutterEfficiency_params->SwathHeightY = mode_lib->mp.SwathHeightY;
- CalculateStutterEfficiency_params->SwathHeightC = mode_lib->mp.SwathHeightC;
- CalculateStutterEfficiency_params->BlockHeight256BytesY = mode_lib->mp.Read256BlockHeightY;
- CalculateStutterEfficiency_params->BlockWidth256BytesY = mode_lib->mp.Read256BlockWidthY;
- CalculateStutterEfficiency_params->BlockHeight256BytesC = mode_lib->mp.Read256BlockHeightC;
- CalculateStutterEfficiency_params->BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC;
- CalculateStutterEfficiency_params->DCCYMaxUncompressedBlock = mode_lib->mp.DCCYMaxUncompressedBlock;
- CalculateStutterEfficiency_params->DCCCMaxUncompressedBlock = mode_lib->mp.DCCCMaxUncompressedBlock;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
- CalculateStutterEfficiency_params->dpte_row_bw = mode_lib->mp.dpte_row_bw;
- CalculateStutterEfficiency_params->meta_row_bw = mode_lib->mp.meta_row_bw;
- CalculateStutterEfficiency_params->rob_alloc_compressed = mode_lib->ip.dcn_mrq_present;
-
- // output
- CalculateStutterEfficiency_params->StutterEfficiencyNotIncludingVBlank = &mode_lib->mp.StutterEfficiencyNotIncludingVBlank;
- CalculateStutterEfficiency_params->StutterEfficiency = &mode_lib->mp.StutterEfficiency;
- CalculateStutterEfficiency_params->NumberOfStutterBurstsPerFrame = &mode_lib->mp.NumberOfStutterBurstsPerFrame;
- CalculateStutterEfficiency_params->Z8StutterEfficiencyNotIncludingVBlank = &mode_lib->mp.Z8StutterEfficiencyNotIncludingVBlank;
- CalculateStutterEfficiency_params->Z8StutterEfficiency = &mode_lib->mp.Z8StutterEfficiency;
- CalculateStutterEfficiency_params->Z8NumberOfStutterBurstsPerFrame = &mode_lib->mp.Z8NumberOfStutterBurstsPerFrame;
- CalculateStutterEfficiency_params->StutterPeriod = &mode_lib->mp.StutterPeriod;
- CalculateStutterEfficiency_params->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = &mode_lib->mp.DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE;
-
- // Stutter Efficiency
- CalculateStutterEfficiency(&mode_lib->scratch, CalculateStutterEfficiency_params);
-
-#ifdef __DML_VBA_ALLOW_DELTA__
- // Calculate z8 stutter eff assuming 0 reserved space
- CalculateStutterEfficiency_params->CompbufReservedSpace64B = 0;
- CalculateStutterEfficiency_params->CompbufReservedSpaceZs = 0;
-
- CalculateStutterEfficiency_params->Z8StutterEfficiencyNotIncludingVBlank = &mode_lib->mp.Z8StutterEfficiencyNotIncludingVBlankBestCase;
- CalculateStutterEfficiency_params->Z8StutterEfficiency = &mode_lib->mp.Z8StutterEfficiencyBestCase;
- CalculateStutterEfficiency_params->Z8NumberOfStutterBurstsPerFrame = &mode_lib->mp.Z8NumberOfStutterBurstsPerFrameBestCase;
- CalculateStutterEfficiency_params->StutterPeriod = &mode_lib->mp.StutterPeriodBestCase;
-
- // Stutter Efficiency
- CalculateStutterEfficiency(&mode_lib->scratch, CalculateStutterEfficiency_params);
-#else
- mode_lib->mp.Z8StutterEfficiencyNotIncludingVBlankBestCase = mode_lib->mp.Z8StutterEfficiencyNotIncludingVBlank;
- mode_lib->mp.Z8StutterEfficiencyBestCase = mode_lib->mp.Z8StutterEfficiency;
- mode_lib->mp.Z8NumberOfStutterBurstsPerFrameBestCase = mode_lib->mp.Z8NumberOfStutterBurstsPerFrame;
- mode_lib->mp.StutterPeriodBestCase = mode_lib->mp.StutterPeriod;
-#endif
- } // PrefetchAndImmediateFlipSupported
-
- const long min_return_uclk_cycles = 83;
- const long min_return_fclk_cycles = 75;
- double max_fclk_mhz = min_clk_table->max_clocks_khz.fclk / 1000.0;
- double max_uclk_mhz = mode_lib->soc.clk_table.uclk.clk_values_khz[mode_lib->soc.clk_table.uclk.num_clk_values - 1] / 1000.0;
- double hard_minimum_dcfclk_mhz = (double)min_clk_table->dram_bw_table.entries[0].min_dcfclk_khz / 1000.0;
- double min_return_latency_in_DCFCLK_cycles = (min_return_uclk_cycles / max_uclk_mhz + min_return_fclk_cycles / max_fclk_mhz) * hard_minimum_dcfclk_mhz;
- mode_lib->mp.min_return_latency_in_dcfclk = (unsigned int)min_return_latency_in_DCFCLK_cycles;
- mode_lib->mp.dcfclk_deep_sleep_hysteresis = (unsigned int)math_max2(32, (double)mode_lib->ip.pixel_chunk_size_kbytes * 1024 * 3 / 4 / 64 - min_return_latency_in_DCFCLK_cycles);
- mode_lib->mp.dcfclk_deep_sleep_hysteresis = 255;
- DML2_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
- dml2_printf("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
- dml2_printf("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
- dml2_printf("DML::%s: min_return_uclk_cycles = %d\n", __func__, min_return_uclk_cycles);
- dml2_printf("DML::%s: min_return_fclk_cycles = %d\n", __func__, min_return_fclk_cycles);
- dml2_printf("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
- dml2_printf("DML::%s: --- END --- \n", __func__);
-#endif
-
- return (in_out_params->mode_lib->mp.PrefetchAndImmediateFlipSupported);
-}
-
-static bool dml_is_dual_plane(enum dml2_source_format_class source_format)
-{
- bool ret_val = 0;
-
- if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
-
- return ret_val;
-}
-
-static unsigned int dml_get_plane_idx(const struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int pipe_idx)
-{
- unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
- return plane_idx;
-}
-
-static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *wm_regs)
-{
- double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
-
- wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
- wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
- wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- wm_regs->temp_read_or_ppt = 0;
- wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
- wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
-}
-
-static unsigned int log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend)
-{
- if (a == 0)
- return 0;
-
- return (math_log2_approx(a) - subtrahend);
-}
-
-void dml2_core_shared_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p)
-{
- int dst_x_offset = (int)((p->cursor_x_position + (p->cursor_stereo_en == 0 ? 0 : math_max2(p->cursor_primary_offset, p->cursor_secondary_offset)) -
- (p->cursor_hotspot_x * (p->cursor_2x_magnify == 0 ? 1 : 2))) * p->dlg_refclk_mhz / p->pixel_rate_mhz / p->hratio);
- cursor_dlg_regs->dst_x_offset = (unsigned int)((dst_x_offset > 0) ? dst_x_offset : 0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
- dml2_printf("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
- dml2_printf("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
-#endif
-
- cursor_dlg_regs->chunk_hdl_adjust = 3;
- cursor_dlg_regs->dst_y_offset = 0;
-
- cursor_dlg_regs->qos_level_fixed = 8;
- cursor_dlg_regs->qos_ramp_disable = 0;
-}
-
-static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
- const struct dml2_display_cfg *display_cfg,
- const struct dml2_core_internal_display_mode_lib *mode_lib,
- unsigned int pipe_idx)
-{
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
-
- unsigned int plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- enum dml2_source_format_class source_format = display_cfg->plane_descriptors[plane_idx].pixel_format;
- enum dml2_swizzle_mode sw_mode = display_cfg->plane_descriptors[plane_idx].surface.tiling;
- bool dual_plane = dml_is_dual_plane((enum dml2_source_format_class)(source_format));
-
- unsigned int pixel_chunk_bytes = 0;
- unsigned int min_pixel_chunk_bytes = 0;
- unsigned int dpte_group_bytes = 0;
- unsigned int mpte_group_bytes = 0;
-
- unsigned int p1_pixel_chunk_bytes = 0;
- unsigned int p1_min_pixel_chunk_bytes = 0;
- unsigned int p1_dpte_group_bytes = 0;
- unsigned int p1_mpte_group_bytes = 0;
-
- pixel_chunk_bytes = (unsigned int)(mode_lib->ip.pixel_chunk_size_kbytes * 1024);
- min_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.min_pixel_chunk_size_bytes);
-
- if (pixel_chunk_bytes == 64 * 1024)
- min_pixel_chunk_bytes = 0;
-
- dpte_group_bytes = (unsigned int)(mode_lib->mp.dpte_group_bytes[mode_lib->mp.pipe_plane[pipe_idx]]);
- mpte_group_bytes = (unsigned int)(mode_lib->mp.vm_group_bytes[mode_lib->mp.pipe_plane[pipe_idx]]);
-
- p1_pixel_chunk_bytes = pixel_chunk_bytes;
- p1_min_pixel_chunk_bytes = min_pixel_chunk_bytes;
- p1_dpte_group_bytes = dpte_group_bytes;
- p1_mpte_group_bytes = mpte_group_bytes;
-
- if (source_format == dml2_rgbe_alpha)
- p1_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.alpha_pixel_chunk_size_kbytes * 1024);
-
- rq_regs->unbounded_request_enabled = mode_lib->mp.UnboundedRequestEnabled;
- rq_regs->rq_regs_l.chunk_size = log_and_substract_if_non_zero(pixel_chunk_bytes, 10);
- rq_regs->rq_regs_c.chunk_size = log_and_substract_if_non_zero(p1_pixel_chunk_bytes, 10);
-
- if (min_pixel_chunk_bytes == 0)
- rq_regs->rq_regs_l.min_chunk_size = 0;
- else
- rq_regs->rq_regs_l.min_chunk_size = log_and_substract_if_non_zero(min_pixel_chunk_bytes, 8 - 1);
-
- if (p1_min_pixel_chunk_bytes == 0)
- rq_regs->rq_regs_c.min_chunk_size = 0;
- else
- rq_regs->rq_regs_c.min_chunk_size = log_and_substract_if_non_zero(p1_min_pixel_chunk_bytes, 8 - 1);
-
- rq_regs->rq_regs_l.dpte_group_size = log_and_substract_if_non_zero(dpte_group_bytes, 6);
- rq_regs->rq_regs_l.mpte_group_size = log_and_substract_if_non_zero(mpte_group_bytes, 6);
- rq_regs->rq_regs_c.dpte_group_size = log_and_substract_if_non_zero(p1_dpte_group_bytes, 6);
- rq_regs->rq_regs_c.mpte_group_size = log_and_substract_if_non_zero(p1_mpte_group_bytes, 6);
-
- unsigned int detile_buf_size_in_bytes = (unsigned int)(mode_lib->mp.DETBufferSizeInKByte[mode_lib->mp.pipe_plane[pipe_idx]] * 1024);
- unsigned int detile_buf_plane1_addr = 0;
-
- if (sw_mode == dml2_sw_linear && display_cfg->gpuvm_enable) {
- unsigned int p0_pte_row_height_linear = (unsigned int)(mode_lib->mp.dpte_row_height_linear[mode_lib->mp.pipe_plane[pipe_idx]]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
-#endif
- DML2_ASSERT(p0_pte_row_height_linear >= 8);
-
- rq_regs->rq_regs_l.pte_row_height_linear = math_log2_approx(p0_pte_row_height_linear) - 3;
- if (dual_plane) {
- unsigned int p1_pte_row_height_linear = (unsigned int)(mode_lib->mp.dpte_row_height_linear_chroma[mode_lib->mp.pipe_plane[pipe_idx]]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
-#endif
- if (sw_mode == dml2_sw_linear) {
- DML2_ASSERT(p1_pte_row_height_linear >= 8);
- }
-
- rq_regs->rq_regs_c.pte_row_height_linear = math_log2_approx(p1_pte_row_height_linear) - 3;
- }
- } else {
- rq_regs->rq_regs_l.pte_row_height_linear = 0;
- rq_regs->rq_regs_c.pte_row_height_linear = 0;
- }
-
- rq_regs->rq_regs_l.swath_height = log_and_substract_if_non_zero(mode_lib->mp.SwathHeightY[mode_lib->mp.pipe_plane[pipe_idx]], 0);
- rq_regs->rq_regs_c.swath_height = log_and_substract_if_non_zero(mode_lib->mp.SwathHeightC[mode_lib->mp.pipe_plane[pipe_idx]], 0);
-
- // FIXME_DCN4, programming guide has dGPU condition
- if (pixel_chunk_bytes >= 32 * 1024 || (dual_plane && p1_pixel_chunk_bytes >= 32 * 1024)) { //32kb
- rq_regs->drq_expansion_mode = 0;
- } else {
- rq_regs->drq_expansion_mode = 2;
- }
- rq_regs->prq_expansion_mode = 1;
- rq_regs->crq_expansion_mode = 1;
- rq_regs->mrq_expansion_mode = 1;
-
- double stored_swath_l_bytes = mode_lib->mp.DETBufferSizeY[mode_lib->mp.pipe_plane[pipe_idx]];
- double stored_swath_c_bytes = mode_lib->mp.DETBufferSizeC[mode_lib->mp.pipe_plane[pipe_idx]];
- bool is_phantom_pipe = dml_get_is_phantom_pipe(display_cfg, mode_lib, pipe_idx);
-
- // Note: detile_buf_plane1_addr is in unit of 1KB
- if (dual_plane) {
- if (is_phantom_pipe) {
- detile_buf_plane1_addr = (unsigned int)((1024.0 * 1024.0) / 2.0 / 1024.0); // half to chroma
- } else {
- if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
- detile_buf_plane1_addr = (unsigned int)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
-#endif
- } else {
- detile_buf_plane1_addr = (unsigned int)(dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
-#endif
- }
- }
- }
- rq_regs->plane1_base_address = detile_buf_plane1_addr;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
- dml2_printf("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
- dml2_printf("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
- dml2_printf("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
-#endif
- //dml2_printf_rq_regs_st(rq_regs);
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
-}
-
-static void rq_dlg_get_dlg_reg(
- struct dml2_core_internal_scratch *s,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- const struct dml2_display_cfg *display_cfg,
- const struct dml2_core_internal_display_mode_lib *mode_lib,
- const unsigned int pipe_idx)
-{
- struct dml2_core_shared_rq_dlg_get_dlg_reg_locals *l = &s->rq_dlg_get_dlg_reg_locals;
-
- memset(l, 0, sizeof(struct dml2_core_shared_rq_dlg_get_dlg_reg_locals));
-
- dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
-
- l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- dml2_assert(l->plane_idx < DML2_MAX_PLANES);
-
- l->source_format = dml2_444_8;
- l->dual_plane = dml_is_dual_plane(l->source_format);
- l->odm_mode = dml2_odm_mode_bypass;
-
- l->htotal = 0;
- l->hactive = 0;
- l->hblank_end = 0;
- l->vblank_end = 0;
- l->interlaced = false;
- l->pclk_freq_in_mhz = 0.0;
- l->refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
- l->ref_freq_to_pix_freq = 0.0;
-
- if (l->plane_idx < DML2_MAX_PLANES) {
-
- l->timing = &display_cfg->stream_descriptors[display_cfg->plane_descriptors[l->plane_idx].stream_index].timing;
- l->source_format = display_cfg->plane_descriptors[l->plane_idx].pixel_format;
- l->odm_mode = mode_lib->mp.ODMMode[l->plane_idx];
-
- l->htotal = l->timing->h_total;
- l->hactive = l->timing->h_active;
- l->hblank_end = l->timing->h_blank_end;
- l->vblank_end = l->timing->v_blank_end;
- l->interlaced = l->timing->interlaced;
- l->pclk_freq_in_mhz = (double)l->timing->pixel_clock_khz / 1000;
- l->ref_freq_to_pix_freq = l->refclk_freq_in_mhz / l->pclk_freq_in_mhz;
-
- dml2_printf("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
- dml2_printf("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
- dml2_printf("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
- dml2_printf("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.dchub_refclk_mhz);
- dml2_printf("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
-
- DML2_ASSERT(l->refclk_freq_in_mhz != 0);
- DML2_ASSERT(l->pclk_freq_in_mhz != 0);
- DML2_ASSERT(l->ref_freq_to_pix_freq < 4.0);
-
- // Need to figure out which side of odm combine we're in
- // Assume the pipe instance under the same plane is in order
-
- if (l->odm_mode == dml2_odm_mode_bypass) {
- disp_dlg_regs->refcyc_h_blank_end = (unsigned int)((double)l->hblank_end * l->ref_freq_to_pix_freq);
- } else if (l->odm_mode == dml2_odm_mode_combine_2to1 || l->odm_mode == dml2_odm_mode_combine_3to1 || l->odm_mode == dml2_odm_mode_combine_4to1) {
- // find out how many pipe are in this plane
- l->num_active_pipes = mode_lib->mp.num_active_pipes;
- l->first_pipe_idx_in_plane = DML2_MAX_PLANES;
- l->pipe_idx_in_combine = 0; // pipe index within the plane
- l->odm_combine_factor = 2;
-
- if (l->odm_mode == dml2_odm_mode_combine_3to1)
- l->odm_combine_factor = 3;
- else if (l->odm_mode == dml2_odm_mode_combine_4to1)
- l->odm_combine_factor = 4;
-
- for (unsigned int i = 0; i < l->num_active_pipes; i++) {
- if (dml_get_plane_idx(mode_lib, i) == l->plane_idx) {
- if (i < l->first_pipe_idx_in_plane) {
- l->first_pipe_idx_in_plane = i;
- }
- }
- }
- l->pipe_idx_in_combine = pipe_idx - l->first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.)
-
- disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double)l->hblank_end + (double)l->pipe_idx_in_combine * (double)l->hactive / (double)l->odm_combine_factor) * l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
- dml2_printf("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
- dml2_printf("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
- dml2_printf("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
- }
- dml2_printf("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
-
- DML2_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
-
- disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int)(l->ref_freq_to_pix_freq * math_pow(2, 19));
- disp_dlg_regs->refcyc_per_htotal = (unsigned int)(l->ref_freq_to_pix_freq * (double)l->htotal * math_pow(2, 8));
- disp_dlg_regs->dlg_vblank_end = l->interlaced ? (l->vblank_end / 2) : l->vblank_end; // 15 bits
-
- l->min_ttu_vblank = mode_lib->mp.MinTTUVBlank[mode_lib->mp.pipe_plane[pipe_idx]];
- l->min_dst_y_next_start = (unsigned int)(mode_lib->mp.MIN_DST_Y_NEXT_START[mode_lib->mp.pipe_plane[pipe_idx]]);
-
- dml2_printf("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
- dml2_printf("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
-
- l->vready_after_vcount0 = (unsigned int)(mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[mode_lib->mp.pipe_plane[pipe_idx]]);
- disp_dlg_regs->vready_after_vcount0 = l->vready_after_vcount0;
-
- dml2_printf("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
-
- l->dst_x_after_scaler = (unsigned int)(mode_lib->mp.DSTXAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
- l->dst_y_after_scaler = (unsigned int)(mode_lib->mp.DSTYAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
-
- dml2_printf("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
- dml2_printf("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
-
- l->dst_y_prefetch = mode_lib->mp.dst_y_prefetch[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_vm_vblank = mode_lib->mp.dst_y_per_vm_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_row_vblank = mode_lib->mp.dst_y_per_row_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_vm_flip = mode_lib->mp.dst_y_per_vm_flip[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip[mode_lib->mp.pipe_plane[pipe_idx]];
-
- l->max_dst_y_per_vm_vblank = 32.0; //U5.2
- l->max_dst_y_per_row_vblank = 16.0; //U4.2
-
- // magic!
- if (l->htotal <= 75) {
- l->max_dst_y_per_vm_vblank = 100.0;
- l->max_dst_y_per_row_vblank = 100.0;
- }
-
- dml2_printf("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
-
- DML2_ASSERT(l->dst_y_per_vm_vblank < l->max_dst_y_per_vm_vblank);
- DML2_ASSERT(l->dst_y_per_row_vblank < l->max_dst_y_per_row_vblank);
- if (l->dst_y_prefetch > 0 && l->dst_y_per_vm_vblank > 0 && l->dst_y_per_row_vblank > 0) {
- DML2_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
- }
-
- l->vratio_pre_l = mode_lib->mp.VRatioPrefetchY[mode_lib->mp.pipe_plane[pipe_idx]];
- l->vratio_pre_c = mode_lib->mp.VRatioPrefetchC[mode_lib->mp.pipe_plane[pipe_idx]];
-
- dml2_printf("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
- dml2_printf("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
-
- // Active
- l->refcyc_per_line_delivery_pre_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_line_delivery_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
-
- l->refcyc_per_line_delivery_pre_c = 0.0;
- l->refcyc_per_line_delivery_c = 0.0;
-
- if (l->dual_plane) {
- l->refcyc_per_line_delivery_pre_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_line_delivery_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
- }
-
- disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int)(mode_lib->mp.Tdmdl_vm[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
- disp_dlg_regs->dmdata_dl_delta = (unsigned int)(mode_lib->mp.Tdmdl[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
-
- l->refcyc_per_req_delivery_pre_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_req_delivery_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
-
- l->refcyc_per_req_delivery_pre_c = 0.0;
- l->refcyc_per_req_delivery_c = 0.0;
- if (l->dual_plane) {
- l->refcyc_per_req_delivery_pre_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_req_delivery_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
- }
-
- // TTU - Cursor
- DML2_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
-
- // Assign to register structures
- disp_dlg_regs->min_dst_y_next_start = (unsigned int)((double)l->min_dst_y_next_start * math_pow(2, 2));
- DML2_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
-
- disp_dlg_regs->dst_y_after_scaler = l->dst_y_after_scaler; // in terms of line
- disp_dlg_regs->refcyc_x_after_scaler = (unsigned int)((double)l->dst_x_after_scaler * l->ref_freq_to_pix_freq); // in terms of refclk
- disp_dlg_regs->dst_y_prefetch = (unsigned int)(l->dst_y_prefetch * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int)(l->dst_y_per_vm_vblank * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_row_vblank = (unsigned int)(l->dst_y_per_row_vblank * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_vm_flip = (unsigned int)(l->dst_y_per_vm_flip * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_row_flip = (unsigned int)(l->dst_y_per_row_flip * math_pow(2, 2));
-
- disp_dlg_regs->vratio_prefetch = (unsigned int)(l->vratio_pre_l * math_pow(2, 19));
- disp_dlg_regs->vratio_prefetch_c = (unsigned int)(l->vratio_pre_c * math_pow(2, 19));
-
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
-
- disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(mode_lib->mp.TimePerVMGroupVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
- disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(mode_lib->mp.TimePerVMGroupFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
- disp_dlg_regs->refcyc_per_vm_req_vblank = (unsigned int)(mode_lib->mp.TimePerVMRequestVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz * math_pow(2, 10));
- disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(mode_lib->mp.TimePerVMRequestFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz * math_pow(2, 10));
-
- l->dst_y_per_pte_row_nom_l = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_L[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_pte_row_nom_c = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_C[mode_lib->mp.pipe_plane[pipe_idx]];
- l->refcyc_per_pte_group_nom_l = mode_lib->mp.time_per_pte_group_nom_luma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_nom_c = mode_lib->mp.time_per_pte_group_nom_chroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_vblank_l = mode_lib->mp.time_per_pte_group_vblank_luma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_vblank_c = mode_lib->mp.time_per_pte_group_vblank_chroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_flip_l = mode_lib->mp.time_per_pte_group_flip_luma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_flip_c = mode_lib->mp.time_per_pte_group_flip_chroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_tdlut_group = mode_lib->mp.time_per_tdlut_group[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int)(l->dst_y_per_pte_row_nom_l * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int)(l->dst_y_per_pte_row_nom_c * math_pow(2, 2));
-
- disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int)(l->refcyc_per_pte_group_nom_l);
- disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(l->refcyc_per_pte_group_nom_c);
- disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int)(l->refcyc_per_pte_group_vblank_l);
- disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int)(l->refcyc_per_pte_group_vblank_c);
- disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int)(l->refcyc_per_pte_group_flip_l);
- disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int)(l->refcyc_per_pte_group_flip_c);
- disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int)math_floor2(l->refcyc_per_line_delivery_pre_l, 1);
- disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int)math_floor2(l->refcyc_per_line_delivery_l, 1);
- disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int)math_floor2(l->refcyc_per_line_delivery_pre_c, 1);
- disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int)math_floor2(l->refcyc_per_line_delivery_c, 1);
-
- l->dst_y_per_meta_row_nom_l = mode_lib->mp.DST_Y_PER_META_ROW_NOM_L[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_meta_row_nom_c = mode_lib->mp.DST_Y_PER_META_ROW_NOM_C[mode_lib->mp.pipe_plane[pipe_idx]];
- l->refcyc_per_meta_chunk_nom_l = mode_lib->mp.TimePerMetaChunkNominal[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_nom_c = mode_lib->mp.TimePerChromaMetaChunkNominal[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_vblank_l = mode_lib->mp.TimePerMetaChunkVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_vblank_c = mode_lib->mp.TimePerChromaMetaChunkVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_flip_l = mode_lib->mp.TimePerMetaChunkFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_flip_c = mode_lib->mp.TimePerChromaMetaChunkFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int)(l->dst_y_per_meta_row_nom_l * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_meta_row_nom_c = (unsigned int)(l->dst_y_per_meta_row_nom_c * math_pow(2, 2));
- disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int)(l->refcyc_per_meta_chunk_nom_l);
- disp_dlg_regs->refcyc_per_meta_chunk_nom_c = (unsigned int)(l->refcyc_per_meta_chunk_nom_c);
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = (unsigned int)(l->refcyc_per_meta_chunk_vblank_l);
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_c = (unsigned int)(l->refcyc_per_meta_chunk_vblank_c);
- disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int)(l->refcyc_per_meta_chunk_flip_l);
- disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int)(l->refcyc_per_meta_chunk_flip_c);
-
- disp_dlg_regs->refcyc_per_tdlut_group = (unsigned int)(l->refcyc_per_tdlut_group);
- disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
-
- disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int)(l->refcyc_per_req_delivery_pre_l * math_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int)(l->refcyc_per_req_delivery_l * math_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int)(l->refcyc_per_req_delivery_pre_c * math_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int)(l->refcyc_per_req_delivery_c * math_pow(2, 10));
- disp_ttu_regs->qos_level_low_wm = 0;
-
- disp_ttu_regs->qos_level_high_wm = (unsigned int)(4.0 * (double)l->htotal * l->ref_freq_to_pix_freq);
-
- disp_ttu_regs->qos_level_flip = 14;
- disp_ttu_regs->qos_level_fixed_l = 8;
- disp_ttu_regs->qos_level_fixed_c = 8;
- disp_ttu_regs->qos_ramp_disable_l = 0;
- disp_ttu_regs->qos_ramp_disable_c = 0;
- disp_ttu_regs->min_ttu_vblank = (unsigned int)(l->min_ttu_vblank * l->refclk_freq_in_mhz);
-
- // CHECK for HW registers' range, DML2_ASSERT or clamp
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
- if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(math_pow(2, 23) - 1);
-
- if (disp_dlg_regs->refcyc_per_vm_group_flip >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(math_pow(2, 23) - 1);
-
- if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_vm_req_vblank = (unsigned int)(math_pow(2, 23) - 1);
-
- if (disp_dlg_regs->refcyc_per_vm_req_flip >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
-
-
- DML2_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
- DML2_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
-
- if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
- l->dst_y_per_pte_row_nom_l = (unsigned int)math_pow(2, 17) - 1;
- }
- if (l->dual_plane) {
- if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
- l->dst_y_per_pte_row_nom_c = (unsigned int)math_pow(2, 17) - 1;
- }
- }
-
- if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int)(math_pow(2, 23) - 1);
- if (l->dual_plane) {
- if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(math_pow(2, 23) - 1);
- }
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
- if (l->dual_plane) {
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
- }
-
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
-
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
-
- }
-}
-
-static void rq_dlg_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *arb_param)
-{
- arb_param->max_req_outstanding = mode_lib->soc.max_outstanding_reqs;
- arb_param->min_req_outstanding = mode_lib->soc.max_outstanding_reqs; // turn off the sat level feature if this set to max
- arb_param->sdpif_request_rate_limit = (3 * mode_lib->ip.words_per_channel * mode_lib->soc.clk_table.dram_config.channel_count) / 4;
- arb_param->sdpif_request_rate_limit = arb_param->sdpif_request_rate_limit < 96 ? 96 : arb_param->sdpif_request_rate_limit;
- arb_param->sat_level_us = 60;
- arb_param->hvm_max_qos_commit_threshold = 0xf;
- arb_param->hvm_min_req_outstand_commit_threshold = 0xa;
- arb_param->compbuf_reserved_space_kbytes = mode_lib->mp.compbuf_reserved_space_64b * 64 / 1024;
- arb_param->allow_sdpif_rate_limit_when_cstate_req = mode_lib->mp.hw_debug5;
- arb_param->dcfclk_deep_sleep_hysteresis = mode_lib->mp.dcfclk_deep_sleep_hysteresis;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
- dml2_printf("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
- dml2_printf("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
- dml2_printf("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
-#endif
-
-}
-
-void dml2_core_shared_get_watermarks(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *out)
-{
- rq_dlg_get_wm_regs(display_cfg, mode_lib, out);
-}
-
-void dml2_core_shared_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *out)
-{
- rq_dlg_get_arb_params(mode_lib, out);
-}
-
-void dml2_core_shared_get_pipe_regs(const struct dml2_display_cfg *display_cfg,
- struct dml2_core_internal_display_mode_lib *mode_lib,
- struct dml2_dchub_per_pipe_register_set *out, int pipe_index)
-{
- rq_dlg_get_rq_reg(&out->rq_regs, display_cfg, mode_lib, pipe_index);
- rq_dlg_get_dlg_reg(&mode_lib->scratch, &out->dlg_regs, &out->ttu_regs, display_cfg, mode_lib, pipe_index);
- out->det_size = mode_lib->mp.DETBufferSizeInKByte[mode_lib->mp.pipe_plane[pipe_index]] / mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
-}
-
-void dml2_core_shared_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index)
-{
- // out->min_clocks.dcn4x.dscclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); // FIXME_STAGE2
- // out->min_clocks.dcn4x.dtbclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
- // out->min_clocks.dcn4x.phyclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
-
- out->global_sync.dcn4x.vready_offset_pixels = mode_lib->mp.VReadyOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4x.vstartup_lines = mode_lib->mp.VStartup[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4x.vupdate_offset_pixels = mode_lib->mp.VUpdateOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4x.vupdate_vupdate_width_pixels = mode_lib->mp.VUpdateWidthPix[mode_lib->mp.pipe_plane[pipe_index]];
-}
-
-void dml2_core_shared_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_idx)
-{
- unsigned int n;
-
- out->num_mcaches_plane0 = mode_lib->ms.num_mcaches_l[plane_idx];
- out->num_mcaches_plane1 = mode_lib->ms.num_mcaches_c[plane_idx];
- out->shift_granularity.p0 = mode_lib->ms.mcache_shift_granularity_l[plane_idx];
- out->shift_granularity.p1 = mode_lib->ms.mcache_shift_granularity_c[plane_idx];
-
- for (n = 0; n < out->num_mcaches_plane0; n++)
- out->mcache_x_offsets_plane0[n] = mode_lib->ms.mcache_offsets_l[plane_idx][n];
-
- for (n = 0; n < out->num_mcaches_plane1; n++)
- out->mcache_x_offsets_plane1[n] = mode_lib->ms.mcache_offsets_l[plane_idx][n];
-
- out->last_slice_sharing.mall_comb_mcache_p0 = mode_lib->ms.mall_comb_mcache_l[plane_idx];
- out->last_slice_sharing.mall_comb_mcache_p1 = mode_lib->ms.mall_comb_mcache_c[plane_idx];
- out->last_slice_sharing.plane0_plane1 = mode_lib->ms.lc_comb_mcache[plane_idx];
- out->informative.meta_row_bytes_plane0 = mode_lib->ms.mcache_row_bytes_l[plane_idx];
- out->informative.meta_row_bytes_plane1 = mode_lib->ms.mcache_row_bytes_c[plane_idx];
-
- out->valid = true;
-}
-
-void dml2_core_shared_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index)
-{
- *out = mode_lib->mp.SurfaceSizeInTheMALL[mode_lib->mp.pipe_plane[pipe_index]];
-}
-
-void dml2_core_shared_get_plane_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_plane_support_info *out, int plane_idx)
-{
- out->mall_svp_size_requirement_ways = 0;
-
- out->nominal_vblank_pstate_latency_hiding_us =
- (int)(display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_idx].stream_index].timing.h_total /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_idx].stream_index].timing.pixel_clock_khz / 1000) * mode_lib->ms.TWait[plane_idx]);
-
- out->dram_change_latency_hiding_margin_in_active = (int)mode_lib->ms.VActiveLatencyHidingMargin[plane_idx];
-
- out->active_latency_hiding_us = (int)mode_lib->ms.VActiveLatencyHidingUs[plane_idx];
-}
-
-void dml2_core_shared_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index)
-{
- double phantom_processing_delay_pix;
- unsigned int phantom_processing_delay_lines;
- unsigned int phantom_v_active_lines;
- unsigned int phantom_v_startup_lines;
- unsigned int phantom_v_blank_lines;
- unsigned int main_v_blank_lines;
- unsigned int rem;
-
- phantom_processing_delay_pix = (double)((mode_lib->ip.subvp_fw_processing_delay_us + mode_lib->ip.subvp_pstate_allow_width_us) *
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.pixel_clock_khz / 1000));
- phantom_processing_delay_lines = (unsigned int)(phantom_processing_delay_pix / (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total);
- dml2_core_shared_div_rem(phantom_processing_delay_pix, display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total, &rem);
- if (rem)
- phantom_processing_delay_lines++;
-
- phantom_v_startup_lines = mode_lib->ms.MaxVStartupLines[plane_index];
- phantom_v_active_lines = phantom_processing_delay_lines + mode_lib->ms.SubViewportLinesNeededInMALL[plane_index] + mode_lib->ip.subvp_swath_height_margin_lines;
-
- // phantom_vblank = max(vbp(vstartup) + vactive + vfp(always 1) + vsync(can be 1), main_vblank)
- phantom_v_blank_lines = phantom_v_startup_lines + 1 + 1;
- main_v_blank_lines = display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.v_active;
- if (phantom_v_blank_lines > main_v_blank_lines)
- phantom_v_blank_lines = main_v_blank_lines;
-
- out->phantom_v_active = phantom_v_active_lines;
- // phantom_vtotal = vactive + vblank
- out->phantom_v_total = phantom_v_active_lines + phantom_v_blank_lines;
-
- out->phantom_min_v_active = mode_lib->ms.SubViewportLinesNeededInMALL[plane_index];
- out->phantom_v_startup = mode_lib->ms.MaxVStartupLines[plane_index];
-
- out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
- dml2_printf("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
- dml2_printf("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
- dml2_printf("DML::%s: vblank_reserved_time_us = %f\n", __func__, out->vblank_reserved_time_us);
-#endif
-}
-
-void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out)
-{
- unsigned int k, n;
-
- out->informative.mode_support_info.ModeIsSupported = mode_lib->ms.support.ModeSupport;
- out->informative.mode_support_info.ImmediateFlipSupport = mode_lib->ms.support.ImmediateFlipSupport;
- out->informative.mode_support_info.WritebackLatencySupport = mode_lib->ms.support.WritebackLatencySupport;
- out->informative.mode_support_info.ScaleRatioAndTapsSupport = mode_lib->ms.support.ScaleRatioAndTapsSupport;
- out->informative.mode_support_info.SourceFormatPixelAndScanSupport = mode_lib->ms.support.SourceFormatPixelAndScanSupport;
- out->informative.mode_support_info.P2IWith420 = mode_lib->ms.support.P2IWith420;
- out->informative.mode_support_info.DSCOnlyIfNecessaryWithBPP = mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP;
- out->informative.mode_support_info.DSC422NativeNotSupported = mode_lib->ms.support.DSC422NativeNotSupported;
- out->informative.mode_support_info.LinkRateDoesNotMatchDPVersion = mode_lib->ms.support.LinkRateDoesNotMatchDPVersion;
- out->informative.mode_support_info.LinkRateForMultistreamNotIndicated = mode_lib->ms.support.LinkRateForMultistreamNotIndicated;
- out->informative.mode_support_info.BPPForMultistreamNotIndicated = mode_lib->ms.support.BPPForMultistreamNotIndicated;
- out->informative.mode_support_info.MultistreamWithHDMIOreDP = mode_lib->ms.support.MultistreamWithHDMIOreDP;
- out->informative.mode_support_info.MSOOrODMSplitWithNonDPLink = mode_lib->ms.support.MSOOrODMSplitWithNonDPLink;
- out->informative.mode_support_info.NotEnoughLanesForMSO = mode_lib->ms.support.NotEnoughLanesForMSO;
- out->informative.mode_support_info.NumberOfOTGSupport = mode_lib->ms.support.NumberOfOTGSupport;
- out->informative.mode_support_info.NumberOfHDMIFRLSupport = mode_lib->ms.support.NumberOfHDMIFRLSupport;
- out->informative.mode_support_info.NumberOfDP2p0Support = mode_lib->ms.support.NumberOfDP2p0Support;
- out->informative.mode_support_info.WritebackScaleRatioAndTapsSupport = mode_lib->ms.support.WritebackScaleRatioAndTapsSupport;
- out->informative.mode_support_info.CursorSupport = mode_lib->ms.support.CursorSupport;
- out->informative.mode_support_info.PitchSupport = mode_lib->ms.support.PitchSupport;
- out->informative.mode_support_info.ViewportExceedsSurface = mode_lib->ms.support.ViewportExceedsSurface;
- out->informative.mode_support_info.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified = false;
- out->informative.mode_support_info.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe;
- out->informative.mode_support_info.InvalidCombinationOfMALLUseForPStateAndStaticScreen = mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen;
- out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
- out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
- out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
-
- out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
- out->informative.mode_support_info.NotEnoughDSCUnits = mode_lib->ms.support.NotEnoughDSCUnits;
- out->informative.mode_support_info.NotEnoughDSCSlices = mode_lib->ms.support.NotEnoughDSCSlices;
- out->informative.mode_support_info.PixelsPerLinePerDSCUnitSupport = mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport;
- out->informative.mode_support_info.DSCCLKRequiredMoreThanSupported = mode_lib->ms.support.DSCCLKRequiredMoreThanSupported;
- out->informative.mode_support_info.DTBCLKRequiredMoreThanSupported = mode_lib->ms.support.DTBCLKRequiredMoreThanSupported;
- out->informative.mode_support_info.LinkCapacitySupport = mode_lib->ms.support.LinkCapacitySupport;
-
- out->informative.mode_support_info.ROBSupport = mode_lib->ms.support.ROBSupport;
- out->informative.mode_support_info.OutstandingRequestsSupport = mode_lib->ms.support.OutstandingRequestsSupport;
- out->informative.mode_support_info.OutstandingRequestsUrgencyAvoidance = mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance;
- out->informative.mode_support_info.PTEBufferSizeNotExceeded = mode_lib->ms.support.PTEBufferSizeNotExceeded;
- out->informative.mode_support_info.DCCMetaBufferSizeNotExceeded = mode_lib->ms.support.DCCMetaBufferSizeNotExceeded;
-
- out->informative.mode_support_info.TotalVerticalActiveBandwidthSupport = mode_lib->ms.support.AvgBandwidthSupport;
- out->informative.mode_support_info.VActiveBandwidthSupport = mode_lib->ms.support.UrgVactiveBandwidthSupport;
- out->informative.mode_support_info.USRRetrainingSupport = mode_lib->ms.support.USRRetrainingSupport;
-
- out->informative.mode_support_info.PrefetchSupported = mode_lib->ms.support.PrefetchSupported;
- out->informative.mode_support_info.DynamicMetadataSupported = mode_lib->ms.support.DynamicMetadataSupported;
- out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported;
- out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support;
- out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport;
- out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport;
-
- for (k = 0; k < out->display_config.num_planes; k++) {
-
- out->informative.mode_support_info.FCLKChangeSupport[k] = mode_lib->ms.support.FCLKChangeSupport[k];
- out->informative.mode_support_info.MPCCombineEnable[k] = mode_lib->ms.support.MPCCombineEnable[k];
- out->informative.mode_support_info.ODMMode[k] = mode_lib->ms.support.ODMMode[k];
- out->informative.mode_support_info.DPPPerSurface[k] = mode_lib->ms.support.DPPPerSurface[k];
- out->informative.mode_support_info.DSCEnabled[k] = mode_lib->ms.support.DSCEnabled[k];
- out->informative.mode_support_info.FECEnabled[k] = mode_lib->ms.support.FECEnabled[k];
- out->informative.mode_support_info.NumberOfDSCSlices[k] = mode_lib->ms.support.NumberOfDSCSlices[k];
- out->informative.mode_support_info.OutputBpp[k] = mode_lib->ms.support.OutputBpp[k];
-
- if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_unknown)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_unknown;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_dp)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_dp;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_edp)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_edp;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_dp2p0)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_dp2p0;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_hdmi)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_hdmi;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_hdmifrl)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_hdmifrl;
-
- if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_unknown)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_unknown;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_hbr)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_hbr;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_hbr2)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_hbr2;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_hbr3)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_hbr3;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_uhbr10)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_uhbr10;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_uhbr13p5)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_uhbr13p5;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_uhbr20)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_uhbr20;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_3x3)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_3x3;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_6x3)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_6x3;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_6x4)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_6x4;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_8x4)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_8x4;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_10x4)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4;
-
- out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k];
- out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k];
- }
-
- out->informative.watermarks.urgent_us = mode_lib->mp.Watermark.UrgentWatermark;
- out->informative.watermarks.writeback_urgent_us = mode_lib->mp.Watermark.WritebackUrgentWatermark;
- out->informative.watermarks.writeback_pstate_us = mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark;
- out->informative.watermarks.writeback_fclk_pstate_us = mode_lib->mp.Watermark.WritebackFCLKChangeWatermark;
-
- out->informative.watermarks.cstate_exit_us = mode_lib->mp.Watermark.StutterExitWatermark;
- out->informative.watermarks.cstate_enter_plus_exit_us = mode_lib->mp.Watermark.StutterEnterPlusExitWatermark;
- out->informative.watermarks.z8_cstate_exit_us = mode_lib->mp.Watermark.Z8StutterExitWatermark;
- out->informative.watermarks.z8_cstate_enter_plus_exit_us = mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark;
- out->informative.watermarks.pstate_change_us = mode_lib->mp.Watermark.DRAMClockChangeWatermark;
- out->informative.watermarks.fclk_pstate_change_us = mode_lib->mp.Watermark.FCLKChangeWatermark;
- out->informative.watermarks.usr_retraining_us = mode_lib->mp.Watermark.USRRetrainingWatermark;
-
- out->informative.mall.total_surface_size_in_mall_bytes = 0;
- for (k = 0; k < out->display_config.num_planes; ++k)
- out->informative.mall.total_surface_size_in_mall_bytes += mode_lib->mp.SurfaceSizeInTheMALL[k];
-
- out->informative.qos.min_return_latency_in_dcfclk = mode_lib->mp.min_return_latency_in_dcfclk;
- out->informative.qos.urgent_latency_us = mode_lib->mp.UrgentLatency;
-
- out->informative.qos.max_urgent_latency_us = mode_lib->ms.support.max_urgent_latency_us;
- out->informative.qos.avg_non_urgent_latency_us = mode_lib->ms.support.avg_non_urgent_latency_us;
- out->informative.qos.avg_urgent_latency_us = mode_lib->ms.support.avg_urgent_latency_us;
-
- out->informative.qos.wm_memory_trip_us = mode_lib->mp.UrgentLatency;
- out->informative.qos.meta_trip_memory_us = mode_lib->mp.MetaTripToMemory;
- out->informative.qos.fraction_of_urgent_bandwidth = mode_lib->mp.FractionOfUrgentBandwidth;
- out->informative.qos.fraction_of_urgent_bandwidth_immediate_flip = mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip;
- out->informative.qos.fraction_of_urgent_bandwidth_mall = mode_lib->mp.FractionOfUrgentBandwidthMALL;
-
- out->informative.qos.avg_bw_required.sys_active.sdp_bw_mbps =
- mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.avg_bw_required.sys_active.dram_bw_mbps =
- mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.avg_bw_required.svp_prefetch.sdp_bw_mbps =
- mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.avg_bw_required.svp_prefetch.dram_bw_mbps =
- mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.avg_bw_available.sys_active.sdp_bw_mbps =
- mode_lib->mp.avg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.avg_bw_available.sys_active.dram_bw_mbps =
- mode_lib->mp.avg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.avg_bw_available.svp_prefetch.sdp_bw_mbps =
- mode_lib->mp.avg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.avg_bw_available.svp_prefetch.dram_bw_mbps =
- mode_lib->mp.avg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.urg_bw_available.sys_active.sdp_bw_mbps =
- mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_available.sys_active.dram_bw_mbps =
- mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.urg_bw_available.sys_active.dram_vm_only_bw_mbps =
- mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active];
-
- out->informative.qos.urg_bw_available.svp_prefetch.sdp_bw_mbps =
- mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_available.svp_prefetch.dram_bw_mbps =
- mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
- out->informative.qos.urg_bw_available.svp_prefetch.dram_vm_only_bw_mbps =
- mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_svp_prefetch];
-
- out->informative.qos.urg_bw_required.sys_active.sdp_bw_mbps = mode_lib->mp.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_required.sys_active.dram_bw_mbps = mode_lib->mp.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.urg_bw_required.svp_prefetch.sdp_bw_mbps = mode_lib->mp.urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_required.svp_prefetch.dram_bw_mbps = mode_lib->mp.urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.non_urg_bw_required.sys_active.sdp_bw_mbps = mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.non_urg_bw_required.sys_active.dram_bw_mbps = mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.non_urg_bw_required.svp_prefetch.sdp_bw_mbps = mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.non_urg_bw_required.svp_prefetch.dram_bw_mbps = mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.urg_bw_required_with_flip.sys_active.sdp_bw_mbps = mode_lib->mp.urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_required_with_flip.sys_active.dram_bw_mbps = mode_lib->mp.urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.urg_bw_required_with_flip.svp_prefetch.sdp_bw_mbps = mode_lib->mp.urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_required_with_flip.svp_prefetch.dram_bw_mbps = mode_lib->mp.urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.non_urg_bw_required_with_flip.sys_active.sdp_bw_mbps = mode_lib->mp.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.non_urg_bw_required_with_flip.sys_active.dram_bw_mbps = mode_lib->mp.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.non_urg_bw_required_with_flip.svp_prefetch.sdp_bw_mbps = mode_lib->mp.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.non_urg_bw_required_with_flip.svp_prefetch.dram_bw_mbps = mode_lib->mp.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.crb.comp_buffer_size_kbytes = mode_lib->mp.CompressedBufferSizeInkByte;
- out->informative.crb.UnboundedRequestEnabled = mode_lib->mp.UnboundedRequestEnabled;
-
- out->informative.crb.compbuf_reserved_space_64b = mode_lib->mp.compbuf_reserved_space_64b;
- out->informative.misc.hw_debug5 = mode_lib->mp.hw_debug5;
- out->informative.misc.dcfclk_deep_sleep_hysteresis = mode_lib->mp.dcfclk_deep_sleep_hysteresis;
-
- out->informative.power_management.stutter_efficiency = mode_lib->mp.StutterEfficiencyNotIncludingVBlank;
- out->informative.power_management.stutter_efficiency_with_vblank = mode_lib->mp.StutterEfficiency;
- out->informative.power_management.stutter_num_bursts = mode_lib->mp.NumberOfStutterBurstsPerFrame;
-
- out->informative.power_management.z8.stutter_efficiency = mode_lib->mp.Z8StutterEfficiency;
- out->informative.power_management.z8.stutter_efficiency_with_vblank = mode_lib->mp.StutterEfficiency;
- out->informative.power_management.z8.stutter_num_bursts = mode_lib->mp.Z8NumberOfStutterBurstsPerFrame;
- out->informative.power_management.z8.stutter_period = mode_lib->mp.StutterPeriod;
-
- out->informative.power_management.z8.bestcase.stutter_efficiency = mode_lib->mp.Z8StutterEfficiencyBestCase;
- out->informative.power_management.z8.bestcase.stutter_num_bursts = mode_lib->mp.Z8NumberOfStutterBurstsPerFrameBestCase;
- out->informative.power_management.z8.bestcase.stutter_period = mode_lib->mp.StutterPeriodBestCase;
-
- out->informative.misc.cstate_max_cap_mode = mode_lib->mp.DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE;
-
- out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)(mode_lib->mp.GlobalDPPCLK * 1000.0);
-
- out->informative.qos.max_active_fclk_change_latency_supported = mode_lib->mp.MaxActiveFCLKChangeLatencySupported;
-
- for (k = 0; k < out->display_config.num_planes; k++) {
-
- if ((out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us)
- && (out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.fclk_change_blackout_us)
- && (out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us))
- out->informative.misc.PrefetchMode[k] = 0;
- else if ((out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.fclk_change_blackout_us)
- && (out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us))
- out->informative.misc.PrefetchMode[k] = 1;
- else if (out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us)
- out->informative.misc.PrefetchMode[k] = 2;
- else
- out->informative.misc.PrefetchMode[k] = 3;
-
- out->informative.misc.min_ttu_vblank_us[k] = mode_lib->mp.MinTTUVBlank[k];
- out->informative.mall.subviewport_lines_needed_in_mall[k] = mode_lib->mp.SubViewportLinesNeededInMALL[k];
- out->informative.crb.det_size_in_kbytes[k] = mode_lib->mp.DETBufferSizeInKByte[k];
- out->informative.crb.DETBufferSizeY[k] = mode_lib->mp.DETBufferSizeY[k];
- out->informative.misc.ImmediateFlipSupportedForPipe[k] = mode_lib->mp.ImmediateFlipSupportedForPipe[k];
- out->informative.misc.UsesMALLForStaticScreen[k] = mode_lib->mp.is_using_mall_for_ss[k];
- out->informative.plane_info[k].dpte_row_height_plane0 = mode_lib->mp.dpte_row_height[k];
- out->informative.plane_info[k].dpte_row_height_plane1 = mode_lib->mp.dpte_row_height_chroma[k];
- out->informative.plane_info[k].meta_row_height_plane0 = mode_lib->mp.meta_row_height[k];
- out->informative.plane_info[k].meta_row_height_plane1 = mode_lib->mp.meta_row_height_chroma[k];
- out->informative.dcc_control[k].max_uncompressed_block_plane0 = mode_lib->mp.DCCYMaxUncompressedBlock[k];
- out->informative.dcc_control[k].max_compressed_block_plane0 = mode_lib->mp.DCCYMaxCompressedBlock[k];
- out->informative.dcc_control[k].independent_block_plane0 = mode_lib->mp.DCCYIndependentBlock[k];
- out->informative.dcc_control[k].max_uncompressed_block_plane1 = mode_lib->mp.DCCCMaxUncompressedBlock[k];
- out->informative.dcc_control[k].max_compressed_block_plane1 = mode_lib->mp.DCCCMaxCompressedBlock[k];
- out->informative.dcc_control[k].independent_block_plane1 = mode_lib->mp.DCCCIndependentBlock[k];
- out->informative.misc.dst_x_after_scaler[k] = mode_lib->mp.DSTXAfterScaler[k];
- out->informative.misc.dst_y_after_scaler[k] = mode_lib->mp.DSTYAfterScaler[k];
- out->informative.misc.prefetch_source_lines_plane0[k] = mode_lib->mp.PrefetchSourceLinesY[k];
- out->informative.misc.prefetch_source_lines_plane1[k] = mode_lib->mp.PrefetchSourceLinesC[k];
- out->informative.misc.vready_at_or_after_vsync[k] = mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k];
- out->informative.misc.min_dst_y_next_start[k] = mode_lib->mp.MIN_DST_Y_NEXT_START[k];
- out->informative.plane_info[k].swath_width_plane0 = mode_lib->mp.SwathWidthY[k];
- out->informative.plane_info[k].swath_height_plane0 = mode_lib->mp.SwathHeightY[k];
- out->informative.plane_info[k].swath_height_plane1 = mode_lib->mp.SwathHeightC[k];
- out->informative.misc.CursorDstXOffset[k] = mode_lib->mp.CursorDstXOffset[k];
- out->informative.misc.CursorDstYOffset[k] = mode_lib->mp.CursorDstYOffset[k];
- out->informative.misc.CursorChunkHDLAdjust[k] = mode_lib->mp.CursorChunkHDLAdjust[k];
- out->informative.misc.dpte_group_bytes[k] = mode_lib->mp.dpte_group_bytes[k];
- out->informative.misc.vm_group_bytes[k] = mode_lib->mp.vm_group_bytes[k];
- out->informative.misc.DisplayPipeRequestDeliveryTimeLuma[k] = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[k];
- out->informative.misc.DisplayPipeRequestDeliveryTimeChroma[k] = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[k];
- out->informative.misc.DisplayPipeRequestDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[k];
- out->informative.misc.DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[k];
- out->informative.misc.TimePerVMGroupVBlank[k] = mode_lib->mp.TimePerVMGroupVBlank[k];
- out->informative.misc.TimePerVMGroupFlip[k] = mode_lib->mp.TimePerVMGroupFlip[k];
- out->informative.misc.TimePerVMRequestVBlank[k] = mode_lib->mp.TimePerVMRequestVBlank[k];
- out->informative.misc.TimePerVMRequestFlip[k] = mode_lib->mp.TimePerVMRequestFlip[k];
- out->informative.misc.Tdmdl_vm[k] = mode_lib->mp.Tdmdl_vm[k];
- out->informative.misc.Tdmdl[k] = mode_lib->mp.Tdmdl[k];
- out->informative.misc.VStartup[k] = mode_lib->mp.VStartup[k];
- out->informative.misc.VUpdateOffsetPix[k] = mode_lib->mp.VUpdateOffsetPix[k];
- out->informative.misc.VUpdateWidthPix[k] = mode_lib->mp.VUpdateWidthPix[k];
- out->informative.misc.VReadyOffsetPix[k] = mode_lib->mp.VReadyOffsetPix[k];
-
- out->informative.misc.DST_Y_PER_PTE_ROW_NOM_L[k] = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_L[k];
- out->informative.misc.DST_Y_PER_PTE_ROW_NOM_C[k] = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_C[k];
- out->informative.misc.time_per_pte_group_nom_luma[k] = mode_lib->mp.time_per_pte_group_nom_luma[k];
- out->informative.misc.time_per_pte_group_nom_chroma[k] = mode_lib->mp.time_per_pte_group_nom_chroma[k];
- out->informative.misc.time_per_pte_group_vblank_luma[k] = mode_lib->mp.time_per_pte_group_vblank_luma[k];
- out->informative.misc.time_per_pte_group_vblank_chroma[k] = mode_lib->mp.time_per_pte_group_vblank_chroma[k];
- out->informative.misc.time_per_pte_group_flip_luma[k] = mode_lib->mp.time_per_pte_group_flip_luma[k];
- out->informative.misc.time_per_pte_group_flip_chroma[k] = mode_lib->mp.time_per_pte_group_flip_chroma[k];
- out->informative.misc.VRatioPrefetchY[k] = mode_lib->mp.VRatioPrefetchY[k];
- out->informative.misc.VRatioPrefetchC[k] = mode_lib->mp.VRatioPrefetchC[k];
- out->informative.misc.DestinationLinesForPrefetch[k] = mode_lib->mp.dst_y_prefetch[k];
- out->informative.misc.DestinationLinesToRequestVMInVBlank[k] = mode_lib->mp.dst_y_per_vm_vblank[k];
- out->informative.misc.DestinationLinesToRequestRowInVBlank[k] = mode_lib->mp.dst_y_per_row_vblank[k];
- out->informative.misc.DestinationLinesToRequestVMInImmediateFlip[k] = mode_lib->mp.dst_y_per_vm_flip[k];
- out->informative.misc.DestinationLinesToRequestRowInImmediateFlip[k] = mode_lib->mp.dst_y_per_row_flip[k];
- out->informative.misc.DisplayPipeLineDeliveryTimeLuma[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[k];
- out->informative.misc.DisplayPipeLineDeliveryTimeChroma[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[k];
- out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
- out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
-
- out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
- out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
- out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
- out->informative.misc.BIGK_FRAGMENT_SIZE[k] = mode_lib->mp.BIGK_FRAGMENT_SIZE[k];
- out->informative.misc.PTE_BUFFER_MODE[k] = mode_lib->mp.PTE_BUFFER_MODE[k];
- out->informative.misc.DSCDelay[k] = mode_lib->mp.DSCDelay[k];
- out->informative.misc.MaxActiveDRAMClockChangeLatencySupported[k] = mode_lib->mp.MaxActiveDRAMClockChangeLatencySupported[k];
- }
-
- // For this DV informative layer, all pipes in the same planes will just use the same id
- // will have the optimization and helper layer later on
- // only work when we can have high "mcache" that fit everything without thrashing the cache
- for (k = 0; k < out->display_config.num_planes; k++) {
- out->informative.non_optimized_mcache_allocation[k].num_mcaches_plane0 = mode_lib->ms.num_mcaches_l[k];
- out->informative.non_optimized_mcache_allocation[k].informative.meta_row_bytes_plane0 = mode_lib->ms.mcache_row_bytes_l[k];
-
- for (n = 0; n < out->informative.non_optimized_mcache_allocation[k].num_mcaches_plane0; n++) {
- out->informative.non_optimized_mcache_allocation[k].mcache_x_offsets_plane0[n] = mode_lib->ms.mcache_offsets_l[k][n];
- out->informative.non_optimized_mcache_allocation[k].global_mcache_ids_plane0[n] = k;
- }
-
- out->informative.non_optimized_mcache_allocation[k].num_mcaches_plane1 = mode_lib->ms.num_mcaches_c[k];
- out->informative.non_optimized_mcache_allocation[k].informative.meta_row_bytes_plane1 = mode_lib->ms.mcache_row_bytes_c[k];
-
- for (n = 0; n < out->informative.non_optimized_mcache_allocation[k].num_mcaches_plane1; n++) {
- out->informative.non_optimized_mcache_allocation[k].mcache_x_offsets_plane1[n] = mode_lib->ms.mcache_offsets_c[k][n];
- out->informative.non_optimized_mcache_allocation[k].global_mcache_ids_plane1[n] = k;
- }
- }
-
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
- if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
- out->informative.misc.ROBUrgencyAvoidance = true;
- } else {
- out->informative.misc.ROBUrgencyAvoidance = false;
- }
- } else {
- out->informative.misc.ROBUrgencyAvoidance = true;
- }
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
deleted file mode 100644
index db0a30fdb58d..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
+++ /dev/null
@@ -1,4 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
deleted file mode 100644
index f9f8869cd8b8..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
+++ /dev/null
@@ -1,354 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_internal_shared_types.h"
-#include "dml_top.h"
-#include "dml2_mcg_factory.h"
-#include "dml2_core_factory.h"
-#include "dml2_dpmm_factory.h"
-#include "dml2_pmo_factory.h"
-#include "dml_top_mcache.h"
-#include "dml2_top_optimization.h"
-#include "dml2_external_lib_deps.h"
-
-unsigned int dml2_get_instance_size_bytes(void)
-{
- return sizeof(struct dml2_instance);
-}
-
-bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
-{
- struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
- struct dml2_initialize_instance_locals *l = &dml->scratch.initialize_instance_locals;
- struct dml2_core_initialize_in_out core_init_params = { 0 };
- struct dml2_mcg_build_min_clock_table_params_in_out mcg_build_min_clk_params = { 0 };
- struct dml2_pmo_initialize_in_out pmo_init_params = { 0 };
- bool result = false;
-
- memset(l, 0, sizeof(struct dml2_initialize_instance_locals));
- memset(dml, 0, sizeof(struct dml2_instance));
-
- memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
- memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb));
-
- dml->project_id = in_out->options.project_id;
- dml->pmo_options = in_out->options.pmo_options;
-
- // Initialize All Components
- result = dml2_mcg_create(in_out->options.project_id, &dml->mcg_instance);
-
- if (result)
- result = dml2_dpmm_create(in_out->options.project_id, &dml->dpmm_instance);
-
- if (result)
- result = dml2_core_create(in_out->options.project_id, &dml->core_instance);
-
- if (result) {
- mcg_build_min_clk_params.soc_bb = &in_out->soc_bb;
- mcg_build_min_clk_params.min_clk_table = &dml->min_clk_table;
- result = dml->mcg_instance.build_min_clock_table(&mcg_build_min_clk_params);
- }
-
- if (result) {
- core_init_params.project_id = in_out->options.project_id;
- core_init_params.instance = &dml->core_instance;
- core_init_params.minimum_clock_table = &dml->min_clk_table;
- core_init_params.explicit_ip_bb = in_out->overrides.explicit_ip_bb;
- core_init_params.explicit_ip_bb_size = in_out->overrides.explicit_ip_bb_size;
- core_init_params.ip_caps = &in_out->ip_caps;
- core_init_params.soc_bb = &in_out->soc_bb;
- result = dml->core_instance.initialize(&core_init_params);
-
- if (core_init_params.explicit_ip_bb && core_init_params.explicit_ip_bb_size > 0) {
- memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
- }
- }
-
- if (result)
- result = dml2_pmo_create(in_out->options.project_id, &dml->pmo_instance);
-
- if (result) {
- pmo_init_params.instance = &dml->pmo_instance;
- pmo_init_params.soc_bb = &dml->soc_bbox;
- pmo_init_params.ip_caps = &dml->ip_caps;
- pmo_init_params.mcg_clock_table_size = dml->min_clk_table.dram_bw_table.num_entries;
- pmo_init_params.options = &dml->pmo_options;
- dml->pmo_instance.initialize(&pmo_init_params);
- }
-
- return result;
-}
-
-static void setup_unoptimized_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
-{
- memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
- out->stage1.min_clk_index_for_latency = dml->min_clk_table.dram_bw_table.num_entries - 1; //dml->min_clk_table.clean_me_up.soc_bb.num_states - 1;
-}
-
-static void setup_speculative_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
-{
- memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
- out->stage1.min_clk_index_for_latency = 0;
-}
-
-bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
-{
- struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
- struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
- struct dml2_display_cfg_programming *dpmm_programming = &dml->dpmm_instance.dpmm_scratch.programming;
-
- bool result = false;
- bool mcache_success = false;
-
- memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
-
- setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
-
- l->mode_support_params.instance = &dml->core_instance;
- l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
- l->mode_support_params.min_clk_table = &dml->min_clk_table;
- l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
-
- result = dml->core_instance.mode_support(&l->mode_support_params);
- l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
-
- if (result) {
- struct optimization_phase_params mcache_phase = {
- .dml = dml,
- .display_config = &l->base_display_config_with_meta,
- .test_function = dml2_top_optimization_test_function_mcache,
- .optimize_function = dml2_top_optimization_optimize_function_mcache,
- .optimized_display_config = &l->optimized_display_config_with_meta,
- .all_or_nothing = false,
- };
- mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &mcache_phase);
- }
-
- /*
- * Call DPMM to map all requirements to minimum clock state
- */
- if (result) {
- l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
- l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
- l->dppm_map_mode_params.programming = dpmm_programming;
- l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
- l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
- result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
- }
-
- in_out->is_supported = mcache_success;
- result = result && in_out->is_supported;
-
- return result;
-}
-
-bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
-{
- struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
- struct dml2_build_mode_programming_locals *l = &dml->scratch.build_mode_programming_locals;
-
- bool result = false;
- bool mcache_success = false;
- bool uclk_pstate_success = false;
- bool vmin_success = false;
- bool stutter_success = false;
- unsigned int i;
-
- memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
- memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
-
- memcpy(&in_out->programming->display_config, in_out->display_config, sizeof(struct dml2_display_cfg));
-
- setup_speculative_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
-
- l->mode_support_params.instance = &dml->core_instance;
- l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
- l->mode_support_params.min_clk_table = &dml->min_clk_table;
- l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
-
- result = dml->core_instance.mode_support(&l->mode_support_params);
- l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
-
- if (!result) {
- setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
-
- l->mode_support_params.instance = &dml->core_instance;
- l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
- l->mode_support_params.min_clk_table = &dml->min_clk_table;
- l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
-
- result = dml->core_instance.mode_support(&l->mode_support_params);
- l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
-
- if (!result) {
- l->informative_params.instance = &dml->core_instance;
- l->informative_params.programming = in_out->programming;
- l->informative_params.mode_is_supported = false;
- dml->core_instance.populate_informative(&l->informative_params);
-
- return false;
- }
-
- /*
- * Phase 1: Determine minimum clocks to satisfy latency requirements for this mode
- */
- memset(&l->min_clock_for_latency_phase, 0, sizeof(struct optimization_phase_params));
- l->min_clock_for_latency_phase.dml = dml;
- l->min_clock_for_latency_phase.display_config = &l->base_display_config_with_meta;
- l->min_clock_for_latency_phase.init_function = dml2_top_optimization_init_function_min_clk_for_latency;
- l->min_clock_for_latency_phase.test_function = dml2_top_optimization_test_function_min_clk_for_latency;
- l->min_clock_for_latency_phase.optimize_function = dml2_top_optimization_optimize_function_min_clk_for_latency;
- l->min_clock_for_latency_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->min_clock_for_latency_phase.all_or_nothing = false;
-
- dml2_top_optimization_perform_optimization_phase_1(&l->optimization_phase_locals, &l->min_clock_for_latency_phase);
-
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- }
-
- /*
- * Phase 2: Satisfy DCC mcache requirements
- */
- memset(&l->mcache_phase, 0, sizeof(struct optimization_phase_params));
- l->mcache_phase.dml = dml;
- l->mcache_phase.display_config = &l->base_display_config_with_meta;
- l->mcache_phase.test_function = dml2_top_optimization_test_function_mcache;
- l->mcache_phase.optimize_function = dml2_top_optimization_optimize_function_mcache;
- l->mcache_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->mcache_phase.all_or_nothing = true;
-
- mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->mcache_phase);
-
- if (!mcache_success) {
- l->informative_params.instance = &dml->core_instance;
- l->informative_params.programming = in_out->programming;
- l->informative_params.mode_is_supported = false;
-
- dml->core_instance.populate_informative(&l->informative_params);
-
- in_out->programming->informative.failed_mcache_validation = true;
- return false;
- }
-
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
-
- /*
- * Phase 3: Optimize for Pstate
- */
- memset(&l->uclk_pstate_phase, 0, sizeof(struct optimization_phase_params));
- l->uclk_pstate_phase.dml = dml;
- l->uclk_pstate_phase.display_config = &l->base_display_config_with_meta;
- l->uclk_pstate_phase.init_function = dml2_top_optimization_init_function_uclk_pstate;
- l->uclk_pstate_phase.test_function = dml2_top_optimization_test_function_uclk_pstate;
- l->uclk_pstate_phase.optimize_function = dml2_top_optimization_optimize_function_uclk_pstate;
- l->uclk_pstate_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->uclk_pstate_phase.all_or_nothing = true;
-
- uclk_pstate_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->uclk_pstate_phase);
-
- if (uclk_pstate_success) {
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage3.success = true;
- }
-
- /*
- * Phase 4: Optimize for Vmin
- */
- memset(&l->vmin_phase, 0, sizeof(struct optimization_phase_params));
- l->vmin_phase.dml = dml;
- l->vmin_phase.display_config = &l->base_display_config_with_meta;
- l->vmin_phase.init_function = dml2_top_optimization_init_function_vmin;
- l->vmin_phase.test_function = dml2_top_optimization_test_function_vmin;
- l->vmin_phase.optimize_function = dml2_top_optimization_optimize_function_vmin;
- l->vmin_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->vmin_phase.all_or_nothing = false;
-
- vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase);
-
- if (l->optimized_display_config_with_meta.stage4.performed) {
- /*
- * when performed is true, optimization has applied to
- * optimized_display_config_with_meta and it has passed mode
- * support. However it may or may not pass the test function to
- * reach actual Vmin. As long as voltage is optimized even if it
- * doesn't reach Vmin level, there is still power benefit so in
- * this case we will still copy this optimization into base
- * display config.
- */
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage4.success = vmin_success;
- }
-
- /*
- * Phase 5: Optimize for Stutter
- */
- memset(&l->stutter_phase, 0, sizeof(struct optimization_phase_params));
- l->stutter_phase.dml = dml;
- l->stutter_phase.display_config = &l->base_display_config_with_meta;
- l->stutter_phase.init_function = dml2_top_optimization_init_function_stutter;
- l->stutter_phase.test_function = dml2_top_optimization_test_function_stutter;
- l->stutter_phase.optimize_function = dml2_top_optimization_optimize_function_stutter;
- l->stutter_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->stutter_phase.all_or_nothing = true;
-
- stutter_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->stutter_phase);
-
- if (stutter_success) {
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage5.success = true;
- }
-
- /*
- * Populate mcache programming
- */
- for (i = 0; i < in_out->display_config->num_planes; i++) {
- in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
- }
-
- /*
- * Call DPMM to map all requirements to minimum clock state
- */
- if (result) {
- l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
- l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
- l->dppm_map_mode_params.programming = in_out->programming;
- l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
- l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
- result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
- if (!result)
- in_out->programming->informative.failed_dpmm = true;
- }
-
- if (result) {
- l->mode_programming_params.instance = &dml->core_instance;
- l->mode_programming_params.display_cfg = &l->base_display_config_with_meta;
- l->mode_programming_params.cfg_support_info = &l->base_display_config_with_meta.mode_support_result.cfg_support_info;
- l->mode_programming_params.programming = in_out->programming;
-
- result = dml->core_instance.mode_programming(&l->mode_programming_params);
- if (!result)
- in_out->programming->informative.failed_mode_programming = true;
- }
-
- if (result) {
- l->dppm_map_watermarks_params.core = &dml->core_instance;
- l->dppm_map_watermarks_params.display_cfg = &l->base_display_config_with_meta;
- l->dppm_map_watermarks_params.programming = in_out->programming;
- result = dml->dpmm_instance.map_watermarks(&l->dppm_map_watermarks_params);
- }
-
- l->informative_params.instance = &dml->core_instance;
- l->informative_params.programming = in_out->programming;
- l->informative_params.mode_is_supported = result;
-
- dml->core_instance.populate_informative(&l->informative_params);
-
- return result;
-}
-
-bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out)
-{
- return dml2_top_mcache_build_mcache_programming(in_out);
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
deleted file mode 100644
index f95c7ff56f15..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_debug.h"
-
-int dml2_log_internal(const char *format, ...)
-{
- return 0;
-}
-
-int dml2_printf(const char *format, ...)
-{
-#ifdef _DEBUG
-#ifdef _DEBUG_PRINTS
- int result;
- va_list args;
- va_start(args, format);
-
- result = vprintf(format, args);
-
- va_end(args);
-
- return result;
-#else
- return 0;
-#endif
-#else
- return 0;
-#endif
-}
-
-void dml2_assert(int condition)
-{
- //ASSERT(condition);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
deleted file mode 100644
index a27792b56f7e..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DML2_DEBUG_H__
-#define __DML2_DEBUG_H__
-
-#ifdef _DEBUG
-#define DML2_ASSERT(condition) dml2_assert(condition)
-#else
-#define DML2_ASSERT(condition) ((void)0)
-#endif
-/*
- * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
- * DML_LOG_ERROR - unexpected but recoverable failures inside DML
- * DML_LOG_WARN - unexpected inputs or events to DML
- * DML_LOG_INFO - high level tracing of DML interfaces
- * DML_LOG_DEBUG - detailed tracing of DML internal components
- * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure
- */
-#if !defined(DML_LOG_LEVEL)
-#if defined(_DEBUG) && defined(_DEBUG_PRINTS)
-/* for backward compatibility with old macros */
-#define DML_LOG_LEVEL 5
-#else
-#define DML_LOG_LEVEL 0
-#endif
-#endif
-
-#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#if DML_LOG_LEVEL >= 1
-#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_ERROR(fmt, ...) ((void)0)
-#endif
-#if DML_LOG_LEVEL >= 2
-#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_WARN(fmt, ...) ((void)0)
-#endif
-#if DML_LOG_LEVEL >= 3
-#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_INFO(fmt, ...) ((void)0)
-#endif
-#if DML_LOG_LEVEL >= 4
-#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_DEBUG(fmt, ...) ((void)0)
-#endif
-#if DML_LOG_LEVEL >= 5
-#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
-#endif
-
-int dml2_log_internal(const char *format, ...);
-int dml2_printf(const char *format, ...);
-void dml2_assert(int condition);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
new file mode 100644
index 000000000000..97e068b6bf6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT */
+#
+# Copyright 2023 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+# Makefile for dml2.
+
+dml2_ccflags := $(CC_FLAGS_FPU)
+dml2_rcflags := $(CC_FLAGS_NO_FPU)
+
+ifneq ($(CONFIG_FRAME_WARN),0)
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2056
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
+
+DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
+ dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
+ dml_display_rq_dlg_calc.o
+
+AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags)
+
+DML21 := src/dml2_top/dml2_top_interfaces.o
+DML21 += src/dml2_top/dml2_top_soc15.o
+DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_utils.o
+DML21 += src/dml2_core/dml2_core_factory.o
+DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
+DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
+DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
+DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
+DML21 += src/dml2_mcg/dml2_mcg_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
+DML21 += src/dml2_pmo/dml2_pmo_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
+DML21 += src/dml2_standalone_libraries/lib_float_math.o
+DML21 += dml21_translation_helper.o
+DML21 += dml21_wrapper.o
+DML21 += dml21_utils.o
+
+AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
index e450445bc05d..b954c9648fbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
@@ -53,17 +53,17 @@ typedef const void *const_pvoid;
typedef const char *const_pchar;
typedef struct rgba_struct {
- uint8 a;
- uint8 r;
- uint8 g;
- uint8 b;
+ uint8 a;
+ uint8 r;
+ uint8 g;
+ uint8 b;
} rgba_t;
typedef struct {
- uint8 blue;
- uint8 green;
- uint8 red;
- uint8 alpha;
+ uint8 blue;
+ uint8 green;
+ uint8 red;
+ uint8 alpha;
} gen_color_t;
typedef union {
@@ -87,7 +87,7 @@ typedef union {
} uintfloat64;
#ifndef UNREFERENCED_PARAMETER
-#define UNREFERENCED_PARAMETER(x) x = x
+#define UNREFERENCED_PARAMETER(x) (x = x)
#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
index 35bc917631ae..c468f492b876 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
@@ -32,6 +32,7 @@
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
#define TB_BORROWED_MAX 400
+#define DML_MAX_VSTARTUP_START 1023
// ---------------------------
// Declaration Begins
@@ -1736,7 +1737,7 @@ static void CalculateBytePerPixelAndBlockSizes(
#endif
} // CalculateBytePerPixelAndBlockSizes
-static dml_float_t CalculateTWait(
+static noinline_for_stack dml_float_t CalculateTWait(
dml_uint_t PrefetchMode,
enum dml_use_mall_for_pstate_change_mode UseMALLForPStateChange,
dml_bool_t SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
@@ -4458,7 +4459,7 @@ static void CalculateSwathWidth(
}
} // CalculateSwathWidth
-static dml_float_t CalculateExtraLatency(
+static noinline_for_stack dml_float_t CalculateExtraLatency(
dml_uint_t RoundTripPingLatencyCycles,
dml_uint_t ReorderingBytes,
dml_float_t DCFCLK,
@@ -5915,7 +5916,7 @@ static dml_uint_t DSCDelayRequirement(
return DSCDelayRequirement_val;
}
-static dml_bool_t CalculateVActiveBandwithSupport(dml_uint_t NumberOfActiveSurfaces,
+static noinline_for_stack dml_bool_t CalculateVActiveBandwithSupport(dml_uint_t NumberOfActiveSurfaces,
dml_float_t ReturnBW,
dml_bool_t NotUrgentLatencyHiding[],
dml_float_t ReadBandwidthLuma[],
@@ -6019,7 +6020,7 @@ static void CalculatePrefetchBandwithSupport(
#endif
}
-static dml_float_t CalculateBandwidthAvailableForImmediateFlip(
+static noinline_for_stack dml_float_t CalculateBandwidthAvailableForImmediateFlip(
dml_uint_t NumberOfActiveSurfaces,
dml_float_t ReturnBW,
dml_float_t ReadBandwidthLuma[],
@@ -6210,10 +6211,11 @@ static dml_uint_t CalculateMaxVStartup(
dml_print("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
dml_print("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
+ max_vstartup_lines = (dml_uint_t) dml_min(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
}
-static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *mode_lib,
+static noinline_for_stack void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *mode_lib,
struct CalculatePrefetchSchedule_params_st *CalculatePrefetchSchedule_params,
dml_uint_t j,
dml_uint_t k)
@@ -6265,7 +6267,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
}
-static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
{
struct dml_core_mode_support_locals_st *s = &mode_lib->scratch.dml_core_mode_support_locals;
struct CalculatePrefetchSchedule_params_st *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
@@ -6527,7 +6529,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.TotImmediateFlipBytes = 0;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (!(mode_lib->ms.policy.ImmediateFlipRequirement[k] == dml_immediate_flip_not_required)) {
- mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k];
+ mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]);
if (mode_lib->ms.use_one_row_for_frame_flip[j][k]) {
mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (2 * mode_lib->ms.DPTEBytesPerRow[j][k]);
} else {
@@ -10187,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para
result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx,
in_out_params->mode_lib,
in_out_params->in_display_cfg,
- 0,
+ in_out_params->in_start_state_idx,
in_out_params->mode_lib->states.num_states - 1);
if (result)
@@ -10203,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin
return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe);
}
+
#define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \
{ \
dml_uint_t plane_idx; \
@@ -10331,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F
dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow);
dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte);
dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte);
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
index a38ed89c47a9..a38ed89c47a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
index dd3f43181a6e..5b40dcdc4406 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
@@ -38,6 +38,7 @@ enum dml_project_id {
dml_project_dcn35 = 3,
dml_project_dcn351 = 4,
dml_project_dcn401 = 5,
+ dml_project_dcn36 = 6,
};
enum dml_prefetch_modes {
dml_prefetch_support_uclk_fclk_and_stutter_if_possible = 0,
@@ -273,7 +274,6 @@ enum dml_clk_cfg_policy {
dml_use_state_freq = 2
};
-
struct soc_state_bounding_box_st {
dml_float_t socclk_mhz;
dml_float_t dscclk_mhz;
@@ -1893,7 +1893,7 @@ struct display_mode_lib_scratch_st {
struct CalculatePrefetchSchedule_params_st CalculatePrefetchSchedule_params;
};
-/// @brief Represent the overall soc/ip enviroment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
+/// @brief Represent the overall soc/ip environment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
struct display_mode_lib_st {
dml_uint_t project;
@@ -1916,6 +1916,7 @@ struct display_mode_lib_st {
struct dml_mode_support_ex_params_st {
struct display_mode_lib_st *mode_lib;
const struct dml_display_cfg_st *in_display_cfg;
+ dml_uint_t in_start_state_idx;
dml_uint_t out_lowest_state_idx;
struct dml_mode_support_info_st *out_evaluation_info;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
index 14d389525296..e574c81edf5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
@@ -52,7 +52,7 @@
#define __DML_VBA_DEBUG__
#define __DML_VBA_ENABLE_INLINE_CHECK_ 0
#define __DML_VBA_MIN_VSTARTUP__ 9 //<brief At which vstartup the DML start to try if the mode can be supported
-#define __DML_ARB_TO_RET_DELAY__ 7 + 95 //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
+#define __DML_ARB_TO_RET_DELAY__ (7 + 95) //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
#define __DML_MIN_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
#define __DML_MAX_VRATIO_PRE__ 4.0 //<brief Prefetch schedule max vratio
#define __DML_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
index 89890c88fd66..89890c88fd66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
index 113b0265e1d1..a82b49cf7fb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
@@ -30,7 +30,6 @@
#include "display_mode_core_structs.h"
#include "cmntypes.h"
-
#include "dml_assert.h"
#include "dml_logging.h"
@@ -72,5 +71,4 @@ __DML_DLL_EXPORT__ dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st
__DML_DLL_EXPORT__ dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx);
__DML_DLL_EXPORT__ void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
index efb099905496..bf5e7f4e0416 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
@@ -2,341 +2,73 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml21_wrapper.h"
#include "dml2_core_dcn4_calcs.h"
#include "dml2_internal_shared_types.h"
#include "dml2_internal_types.h"
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
-#include "bounding_boxes/dcn4_soc_bb.h"
-#include "bounding_boxes/dcn3_soc_bb.h"
+#include "soc_and_ip_translator.h"
-static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
+static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
+ const struct dc *in_dc,
+ const struct dml2_configuration_options *config)
{
- const struct dml2_soc_bb *soc_bb;
- const struct dml2_soc_qos_parameters *qos_params;
-
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- soc_bb = &dml2_socbb_dcn31;
- qos_params = &dml_dcn31_soc_qos_params;
- break;
- case DCN_VERSION_4_01:
- default:
- if (config->bb_from_dmub)
- soc_bb = config->bb_from_dmub;
- else
- soc_bb = &dml2_socbb_dcn401;
+ bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
- qos_params = &dml_dcn4_variant_a_soc_qos_params;
- }
+ /* ODM options */
+ pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
+ pmo_options->disable_dyn_odm_for_multi_stream = true;
+ pmo_options->disable_dyn_odm_for_stream_with_svp = true;
- /* patch soc bb */
- memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb));
+ pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
- /* patch qos params */
- memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters));
-}
-
-static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config)
-{
- memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb));
-}
+ /* NOTE: DRR and SubVP Require FAMS2 */
+ pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
+ in_dc->debug.force_disable_subvp ||
+ disable_fams2;
+ pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
+ disable_fams2;
+ pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
+ disable_fams2;
+ pmo_options->disable_fams2 = disable_fams2;
-static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config)
-{
- memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities));
+ pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
+ in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
+ pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
+static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_version dcn_version)
{
- const struct dml2_ip_capabilities *ip_caps;
-
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- ip_caps = &dml2_dcn31_max_ip_caps;
- break;
+ enum dml2_project_id project_id;
+ switch (dcn_version) {
case DCN_VERSION_4_01:
+ project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
+ break;
default:
- ip_caps = &dml2_dcn401_max_ip_caps;
+ project_id = dml2_project_invalid;
+ DC_ERR("unsupported dcn version for DML21!");
+ break;
}
- memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities));
+ return project_id;
}
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init,
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- if (config->use_native_soc_bb_construction)
- dml21_init_socbb_params(dml_init, config, in_dc);
- else
- dml21_external_socbb_params(dml_init, config);
-}
-
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- if (config->use_native_soc_bb_construction)
- dml21_init_ip_params(dml_init, config, in_dc);
- else
- dml21_external_ip_params(dml_init, config);
-}
-
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config, const struct dc *in_dc)
-{
- int i;
-
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table;
-
- /* override clocks if smu is present */
- if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) {
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- }
- }
- }
+ dml_init->options.project_id = dml21_dcn_revision_to_dml2_project_id(in_dc->ctx->dce_version);
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* do not override phyclks for now */
- /* phyclk */
- // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000;
- // }
-
- /* phyclk_d18 */
- // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000;
- // }
-
- /* phyclk_d32 */
- // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000;
- // }
- }
-
- dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
- dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-
- /* override bounding box paramters from VBIOS */
- if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->vram_info.num_chans) {
- dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- }
-
- if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
- }
-
- /* override bounding box paramters from DC config */
- if (in_dc->bb_overrides.sr_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
- }
-
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
- in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
- }
-
- if (in_dc->bb_overrides.dram_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- }
-
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.fclk_change_blackout_us =
- in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
+ if (config->use_native_soc_bb_construction) {
+ in_dc->soc_and_ip_translator->translator_funcs->get_soc_bb(&dml_init->soc_bb, in_dc, config);
+ in_dc->soc_and_ip_translator->translator_funcs->get_ip_caps(&dml_init->ip_caps);
+ } else {
+ dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
+ dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- //TODO
- // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) {
- // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- // }
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -352,25 +84,29 @@ static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stre
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
struct dml2_context *dml_ctx)
{
unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
+ uint32_t pix_clk_100hz;
- timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
timing->h_front_porch = stream->timing.h_front_porch;
timing->v_front_porch = stream->timing.v_front_porch;
timing->pixel_clock_khz = stream->timing.pix_clk_100hz / 10;
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ timing->pixel_clock_khz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz / 10;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
timing->pixel_clock_khz *= 2;
- timing->h_total = stream->timing.h_total;
+ timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
timing->v_total = stream->timing.v_total;
timing->h_sync_width = stream->timing.h_sync_width;
timing->interlaced = stream->timing.flags.INTERLACE;
hblank_start = stream->timing.h_total - stream->timing.h_front_porch;
- timing->h_blank_end = hblank_start - stream->timing.h_addressable
+ timing->h_blank_end = hblank_start - stream->timing.h_addressable - pipe_ctx->dsc_padding_params.dsc_hactive_padding
- stream->timing.h_border_left - stream->timing.h_border_right;
if (hblank_start < stream->timing.h_addressable)
@@ -389,15 +125,16 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
/* limit min refresh rate to DC cap */
min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
if (stream->ctx->dc->caps.max_v_total != 0) {
- min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
- (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) {
+ pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ } else {
+ pix_clk_100hz = stream->timing.pix_clk_100hz;
+ }
+ min_hardware_refresh_in_uhz = div64_u64((pix_clk_100hz * 100000000ULL),
+ (timing->h_total * (long long)calc_max_hardware_v_total(stream)));
}
- if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
- timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
- } else {
- timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
- }
+ timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz);
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
@@ -445,21 +182,6 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
timing->vblank_nom = timing->v_total - timing->v_active;
}
-/**
- * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration
- * based on the pipe context.
- * @timing: Pointer to the dml2_timing_cfg structure to be adjusted.
- * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value.
- *
- * This function modifies the horizontal active and blank end timings by adding and subtracting
- * the horizontal blanking borrow value from the pipe context, respectively.
- */
-static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe)
-{
- timing->h_active += pipe->hblank_borrow;
- timing->h_blank_end -= pipe->hblank_borrow;
-}
-
static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output,
struct dc_stream_state *stream, const struct pipe_ctx *pipe)
{
@@ -529,7 +251,8 @@ static void populate_dml21_output_config_from_stream_state(struct dml2_link_outp
static void populate_dml21_stream_overrides_from_stream_state(
struct dml2_stream_parameters *stream_desc,
- struct dc_stream_state *stream)
+ struct dc_stream_state *stream,
+ struct dc_stream_status *stream_status)
{
switch (stream->debug.force_odm_combine_segments) {
case 0:
@@ -554,7 +277,9 @@ static void populate_dml21_stream_overrides_from_stream_state(
if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy ||
stream->debug.force_odm_combine_segments > 0)
stream_desc->overrides.disable_dynamic_odm = true;
- stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req;
+ stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp ||
+ stream->hw_cursor_req ||
+ stream_status->mall_stream_config.cursor_size_limit_subvp;
}
static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode)
@@ -721,11 +446,20 @@ static void populate_dml21_surface_config_from_plane_state(
surface->dcc.informative.fraction_of_zero_size_request_plane1 = plane_state->dcc.independent_64b_blks_c;
surface->dcc.plane0.pitch = plane_state->dcc.meta_pitch;
surface->dcc.plane1.pitch = plane_state->dcc.meta_pitch_c;
- if (in_dc->ctx->dce_version < DCN_VERSION_4_01) {
- /* needed for N-1 testing */
+
+ // Update swizzle / array mode based on the gfx_format
+ switch (plane_state->tiling_info.gfxversion) {
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ break;
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
surface->tiling = gfx9_to_dml2_swizzle_mode(plane_state->tiling_info.gfx9.swizzle);
- } else {
+ break;
+ case DcGfxAddr3:
surface->tiling = gfx_addr3_to_dml2_swizzle_mode(plane_state->tiling_info.gfx_addr3.swizzle);
+ break;
}
}
@@ -747,7 +481,9 @@ static const struct scaler_data *get_scaler_data_for_plane(
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
temp_pipe->stream_res = pipe->stream_res;
- temp_pipe->hblank_borrow = pipe->hblank_borrow;
+ temp_pipe->dsc_padding_params.dsc_hactive_padding = pipe->dsc_padding_params.dsc_hactive_padding;
+ temp_pipe->dsc_padding_params.dsc_htotal_padding = pipe->dsc_padding_params.dsc_htotal_padding;
+ temp_pipe->dsc_padding_params.dsc_pix_clk_100hz = pipe->dsc_padding_params.dsc_pix_clk_100hz;
dml_ctx->config.callbacks.build_scaling_params(temp_pipe);
break;
}
@@ -778,6 +514,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->pixel_format = dml2_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
plane->pixel_format = dml2_444_64;
@@ -879,6 +616,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ default:
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
}
@@ -903,7 +641,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
}
//TODO : Could be possibly moved to a common helper layer.
-static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id)
+static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id)
{
int i, j;
@@ -911,10 +649,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str
return false;
for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- if (context->stream_status[i].plane_states[j] == plane) {
- *plane_id = (i << 16) | j;
- return true;
+ if (context->streams[i]->stream_id == stream_id) {
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ if (context->stream_status[i].plane_states[j] == plane) {
+ *plane_id = (i << 16) | j;
+ return true;
+ }
}
}
}
@@ -937,14 +677,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
return location;
}
-static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
const struct dc_plane_state *plane, const struct dc_state *context)
{
unsigned int plane_id;
int i = 0;
int location = -1;
- if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) {
+ if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
ASSERT(false);
return -1;
}
@@ -1010,11 +750,10 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_streams++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
- populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
- adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index], dml_ctx);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
- populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
+ populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.fclk_pstate = dml2_twait_budgeting_setting_if_needed;
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.uclk_pstate = dml2_twait_budgeting_setting_if_needed;
@@ -1030,18 +769,18 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
} else {
for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
- disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context);
+ disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context);
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]);
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
- if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
+ if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
/* apply forced pstate policy */
@@ -1079,28 +818,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
-}
-
-void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx)
-{
- struct dml2_core_internal_display_mode_lib *mode_lib = &in_ctx->v21.dml_init.dml2_instance->core_instance.clean_me_up.mode_lib;
- double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
-
- if (reg_set_idx >= DML2_DCHUB_WATERMARK_SET_NUM) {
- /* invalid register set index */
- return;
- }
-
- /* convert to legacy format (time in ns) */
- watermark->urgent_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->pte_meta_urgent_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.cstate_enter_plus_exit_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].sr_enter / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.cstate_exit_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].sr_exit / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.pstate_change_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].uclk_pstate / refclk_freq_in_mhz) * 1000.0;
- watermark->urgent_latency_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.fclk_pstate_change_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].fclk_pstate / refclk_freq_in_mhz) * 1000.0;
- watermark->frac_urg_bw_flip = in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].frac_urg_bw_flip;
- watermark->frac_urg_bw_nom = in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].frac_urg_bw_nom;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
@@ -1146,53 +865,6 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se
}
}
-
-void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming)
-{
- unsigned int hactive, vactive, hblank_start, vblank_start, hblank_end, vblank_end;
- struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
-
- hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right + pipe_ctx->hblank_borrow;
- vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top;
- hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch;
- vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch;
-
- hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right - pipe_ctx->hblank_borrow;
- vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
-
- if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
- /* phantom has its own global sync */
- global_sync = &stream_programming->phantom_stream.global_sync;
- }
-
- pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4x.vstartup_lines;
- pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4x.vupdate_offset_pixels;
- pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4x.vupdate_vupdate_width_pixels;
- pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4x.vready_offset_pixels;
- pipe_ctx->pipe_dlg_param.pstate_keepout = global_sync->dcn4x.pstate_keepout_start_lines;
-
- pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst;
-
- pipe_ctx->pipe_dlg_param.hactive = hactive;
- pipe_ctx->pipe_dlg_param.vactive = vactive;
- pipe_ctx->pipe_dlg_param.htotal = pipe_ctx->stream->timing.h_total;
- pipe_ctx->pipe_dlg_param.vtotal = pipe_ctx->stream->timing.v_total;
- pipe_ctx->pipe_dlg_param.hblank_end = hblank_end;
- pipe_ctx->pipe_dlg_param.vblank_end = vblank_end;
- pipe_ctx->pipe_dlg_param.hblank_start = hblank_start;
- pipe_ctx->pipe_dlg_param.vblank_start = vblank_start;
- pipe_ctx->pipe_dlg_param.vfront_porch = pipe_ctx->stream->timing.v_front_porch;
- pipe_ctx->pipe_dlg_param.pixel_rate_mhz = pipe_ctx->stream->timing.pix_clk_100hz / 10000.00;
- pipe_ctx->pipe_dlg_param.refresh_rate = ((timing->pix_clk_100hz * 100) / timing->h_total) / timing->v_total;
- pipe_ctx->pipe_dlg_param.vtotal_max = pipe_ctx->stream->adjust.v_total_max;
- pipe_ctx->pipe_dlg_param.vtotal_min = pipe_ctx->stream->adjust.v_total_min;
- pipe_ctx->pipe_dlg_param.recout_height = pipe_ctx->plane_res.scl_data.recout.height;
- pipe_ctx->pipe_dlg_param.recout_width = pipe_ctx->plane_res.scl_data.recout.width;
- pipe_ctx->pipe_dlg_param.full_recout_height = pipe_ctx->plane_res.scl_data.recout.height;
- pipe_ctx->pipe_dlg_param.full_recout_width = pipe_ctx->plane_res.scl_data.recout.width;
-}
-
void dml21_map_hw_resources(struct dml2_context *dml_ctx)
{
unsigned int i = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
index 476a7f6e4875..9880d3e0398e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
@@ -11,20 +11,18 @@ struct dc_state;
struct dcn_watermarks;
union dcn_watermark_set;
struct pipe_ctx;
+struct dc_plane_state;
struct dml2_context;
struct dml2_configuration_options;
struct dml2_initialize_instance_in_out;
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
-void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming);
-void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
index cb966f8d3216..ee721606b883 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
@@ -142,108 +142,21 @@ int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
return num_pipes;
}
-
-void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- struct pipe_ctx *out)
+void dml21_pipe_populate_global_sync(struct dml2_context *dml_ctx,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming)
{
- memset(&out->rq_regs, 0, sizeof(out->rq_regs));
- out->rq_regs.rq_regs_l.chunk_size = rq_regs->rq_regs_l.chunk_size;
- out->rq_regs.rq_regs_l.min_chunk_size = rq_regs->rq_regs_l.min_chunk_size;
- //out->rq_regs.rq_regs_l.meta_chunk_size = rq_regs->rq_regs_l.meta_chunk_size;
- //out->rq_regs.rq_regs_l.min_meta_chunk_size = rq_regs->rq_regs_l.min_meta_chunk_size;
- out->rq_regs.rq_regs_l.dpte_group_size = rq_regs->rq_regs_l.dpte_group_size;
- out->rq_regs.rq_regs_l.mpte_group_size = rq_regs->rq_regs_l.mpte_group_size;
- out->rq_regs.rq_regs_l.swath_height = rq_regs->rq_regs_l.swath_height;
- out->rq_regs.rq_regs_l.pte_row_height_linear = rq_regs->rq_regs_l.pte_row_height_linear;
-
- out->rq_regs.rq_regs_c.chunk_size = rq_regs->rq_regs_c.chunk_size;
- out->rq_regs.rq_regs_c.min_chunk_size = rq_regs->rq_regs_c.min_chunk_size;
- //out->rq_regs.rq_regs_c.meta_chunk_size = rq_regs->rq_regs_c.meta_chunk_size;
- //out->rq_regs.rq_regs_c.min_meta_chunk_size = rq_regs->rq_regs_c.min_meta_chunk_size;
- out->rq_regs.rq_regs_c.dpte_group_size = rq_regs->rq_regs_c.dpte_group_size;
- out->rq_regs.rq_regs_c.mpte_group_size = rq_regs->rq_regs_c.mpte_group_size;
- out->rq_regs.rq_regs_c.swath_height = rq_regs->rq_regs_c.swath_height;
- out->rq_regs.rq_regs_c.pte_row_height_linear = rq_regs->rq_regs_c.pte_row_height_linear;
-
- out->rq_regs.drq_expansion_mode = rq_regs->drq_expansion_mode;
- out->rq_regs.prq_expansion_mode = rq_regs->prq_expansion_mode;
- //out->rq_regs.mrq_expansion_mode = rq_regs->mrq_expansion_mode;
- out->rq_regs.crq_expansion_mode = rq_regs->crq_expansion_mode;
- out->rq_regs.plane1_base_address = rq_regs->plane1_base_address;
- out->unbounded_req = rq_regs->unbounded_request_enabled;
-
- memset(&out->dlg_regs, 0, sizeof(out->dlg_regs));
- out->dlg_regs.refcyc_h_blank_end = disp_dlg_regs->refcyc_h_blank_end;
- out->dlg_regs.dlg_vblank_end = disp_dlg_regs->dlg_vblank_end;
- out->dlg_regs.min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
- out->dlg_regs.refcyc_per_htotal = disp_dlg_regs->refcyc_per_htotal;
- out->dlg_regs.refcyc_x_after_scaler = disp_dlg_regs->refcyc_x_after_scaler;
- out->dlg_regs.dst_y_after_scaler = disp_dlg_regs->dst_y_after_scaler;
- out->dlg_regs.dst_y_prefetch = disp_dlg_regs->dst_y_prefetch;
- out->dlg_regs.dst_y_per_vm_vblank = disp_dlg_regs->dst_y_per_vm_vblank;
- out->dlg_regs.dst_y_per_row_vblank = disp_dlg_regs->dst_y_per_row_vblank;
- out->dlg_regs.dst_y_per_vm_flip = disp_dlg_regs->dst_y_per_vm_flip;
- out->dlg_regs.dst_y_per_row_flip = disp_dlg_regs->dst_y_per_row_flip;
- out->dlg_regs.ref_freq_to_pix_freq = disp_dlg_regs->ref_freq_to_pix_freq;
- out->dlg_regs.vratio_prefetch = disp_dlg_regs->vratio_prefetch;
- out->dlg_regs.vratio_prefetch_c = disp_dlg_regs->vratio_prefetch_c;
- out->dlg_regs.refcyc_per_tdlut_group = disp_dlg_regs->refcyc_per_tdlut_group;
- out->dlg_regs.refcyc_per_pte_group_vblank_l = disp_dlg_regs->refcyc_per_pte_group_vblank_l;
- out->dlg_regs.refcyc_per_pte_group_vblank_c = disp_dlg_regs->refcyc_per_pte_group_vblank_c;
- //out->dlg_regs.refcyc_per_meta_chunk_vblank_l = disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;
- //out->dlg_regs.refcyc_per_meta_chunk_vblank_c = disp_dlg_regs->refcyc_per_meta_chunk_vblank_c;
- out->dlg_regs.refcyc_per_pte_group_flip_l = disp_dlg_regs->refcyc_per_pte_group_flip_l;
- out->dlg_regs.refcyc_per_pte_group_flip_c = disp_dlg_regs->refcyc_per_pte_group_flip_c;
- //out->dlg_regs.refcyc_per_meta_chunk_flip_l = disp_dlg_regs->refcyc_per_meta_chunk_flip_l;
- //out->dlg_regs.refcyc_per_meta_chunk_flip_c = disp_dlg_regs->refcyc_per_meta_chunk_flip_c;
- out->dlg_regs.dst_y_per_pte_row_nom_l = disp_dlg_regs->dst_y_per_pte_row_nom_l;
- out->dlg_regs.dst_y_per_pte_row_nom_c = disp_dlg_regs->dst_y_per_pte_row_nom_c;
- out->dlg_regs.refcyc_per_pte_group_nom_l = disp_dlg_regs->refcyc_per_pte_group_nom_l;
- out->dlg_regs.refcyc_per_pte_group_nom_c = disp_dlg_regs->refcyc_per_pte_group_nom_c;
- //out->dlg_regs.dst_y_per_meta_row_nom_l = disp_dlg_regs->dst_y_per_meta_row_nom_l;
- //out->dlg_regs.dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_c;
- //out->dlg_regs.refcyc_per_meta_chunk_nom_l = disp_dlg_regs->refcyc_per_meta_chunk_nom_l;
- //out->dlg_regs.refcyc_per_meta_chunk_nom_c = disp_dlg_regs->refcyc_per_meta_chunk_nom_c;
- out->dlg_regs.refcyc_per_line_delivery_pre_l = disp_dlg_regs->refcyc_per_line_delivery_pre_l;
- out->dlg_regs.refcyc_per_line_delivery_pre_c = disp_dlg_regs->refcyc_per_line_delivery_pre_c;
- out->dlg_regs.refcyc_per_line_delivery_l = disp_dlg_regs->refcyc_per_line_delivery_l;
- out->dlg_regs.refcyc_per_line_delivery_c = disp_dlg_regs->refcyc_per_line_delivery_c;
- out->dlg_regs.refcyc_per_vm_group_vblank = disp_dlg_regs->refcyc_per_vm_group_vblank;
- out->dlg_regs.refcyc_per_vm_group_flip = disp_dlg_regs->refcyc_per_vm_group_flip;
- out->dlg_regs.refcyc_per_vm_req_vblank = disp_dlg_regs->refcyc_per_vm_req_vblank;
- out->dlg_regs.refcyc_per_vm_req_flip = disp_dlg_regs->refcyc_per_vm_req_flip;
- out->dlg_regs.dst_y_offset_cur0 = disp_dlg_regs->dst_y_offset_cur0;
- out->dlg_regs.chunk_hdl_adjust_cur0 = disp_dlg_regs->chunk_hdl_adjust_cur0;
- //out->dlg_regs.dst_y_offset_cur1 = disp_dlg_regs->dst_y_offset_cur1;
- //out->dlg_regs.chunk_hdl_adjust_cur1 = disp_dlg_regs->chunk_hdl_adjust_cur1;
- out->dlg_regs.vready_after_vcount0 = disp_dlg_regs->vready_after_vcount0;
- out->dlg_regs.dst_y_delta_drq_limit = disp_dlg_regs->dst_y_delta_drq_limit;
- out->dlg_regs.refcyc_per_vm_dmdata = disp_dlg_regs->refcyc_per_vm_dmdata;
- out->dlg_regs.dmdata_dl_delta = disp_dlg_regs->dmdata_dl_delta;
-
- memset(&out->ttu_regs, 0, sizeof(out->ttu_regs));
- out->ttu_regs.qos_level_low_wm = disp_ttu_regs->qos_level_low_wm;
- out->ttu_regs.qos_level_high_wm = disp_ttu_regs->qos_level_high_wm;
- out->ttu_regs.min_ttu_vblank = disp_ttu_regs->min_ttu_vblank;
- out->ttu_regs.qos_level_flip = disp_ttu_regs->qos_level_flip;
- out->ttu_regs.refcyc_per_req_delivery_l = disp_ttu_regs->refcyc_per_req_delivery_l;
- out->ttu_regs.refcyc_per_req_delivery_c = disp_ttu_regs->refcyc_per_req_delivery_c;
- out->ttu_regs.refcyc_per_req_delivery_cur0 = disp_ttu_regs->refcyc_per_req_delivery_cur0;
- //out->ttu_regs.refcyc_per_req_delivery_cur1 = disp_ttu_regs->refcyc_per_req_delivery_cur1;
- out->ttu_regs.refcyc_per_req_delivery_pre_l = disp_ttu_regs->refcyc_per_req_delivery_pre_l;
- out->ttu_regs.refcyc_per_req_delivery_pre_c = disp_ttu_regs->refcyc_per_req_delivery_pre_c;
- out->ttu_regs.refcyc_per_req_delivery_pre_cur0 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur0;
- //out->ttu_regs.refcyc_per_req_delivery_pre_cur1 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur1;
- out->ttu_regs.qos_level_fixed_l = disp_ttu_regs->qos_level_fixed_l;
- out->ttu_regs.qos_level_fixed_c = disp_ttu_regs->qos_level_fixed_c;
- out->ttu_regs.qos_level_fixed_cur0 = disp_ttu_regs->qos_level_fixed_cur0;
- //out->ttu_regs.qos_level_fixed_cur1 = disp_ttu_regs->qos_level_fixed_cur1;
- out->ttu_regs.qos_ramp_disable_l = disp_ttu_regs->qos_ramp_disable_l;
- out->ttu_regs.qos_ramp_disable_c = disp_ttu_regs->qos_ramp_disable_c;
- out->ttu_regs.qos_ramp_disable_cur0 = disp_ttu_regs->qos_ramp_disable_cur0;
- //out->ttu_regs.qos_ramp_disable_cur1 = disp_ttu_regs->qos_ramp_disable_cur1;
+ union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
+
+ if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
+ /* phantom has its own global sync */
+ global_sync = &stream_programming->phantom_stream.global_sync;
+ }
+
+ memcpy(&pipe_ctx->global_sync,
+ global_sync,
+ sizeof(union dml2_global_sync_programming));
}
void dml21_populate_mall_allocation_size(struct dc_state *context,
@@ -301,28 +214,16 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
{
unsigned int pipe_reg_index = 0;
- dml21_populate_pipe_ctx_dlg_params(dml_ctx, context, pipe_ctx, stream_prog);
+ dml21_pipe_populate_global_sync(dml_ctx, context, pipe_ctx, stream_prog);
find_pipe_regs_idx(dml_ctx, pipe_ctx, &pipe_reg_index);
if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
memcpy(&pipe_ctx->hubp_regs, pln_prog->phantom_plane.pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = false;
-
- /* legacy only, should be removed later */
- dml21_update_pipe_ctx_dchub_regs(&pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->rq_regs,
- &pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->dlg_regs,
- &pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
-
pipe_ctx->det_buffer_size_kb = 0;
} else {
memcpy(&pipe_ctx->hubp_regs, pln_prog->pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = pln_prog->pipe_regs[pipe_reg_index]->rq_regs.unbounded_request_enabled;
-
- /* legacy only, should be removed later */
- dml21_update_pipe_ctx_dchub_regs(&pln_prog->pipe_regs[pipe_reg_index]->rq_regs,
- &pln_prog->pipe_regs[pipe_reg_index]->dlg_regs,
- &pln_prog->pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
-
pipe_ctx->det_buffer_size_kb = pln_prog->pipe_regs[pipe_reg_index]->det_size * 64;
}
@@ -331,7 +232,6 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipe_ctx->plane_res.bw.dppclk_khz;
dml21_populate_mall_allocation_size(context, dml_ctx, pln_prog, pipe_ctx);
- memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[pipe_ctx->pipe_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
bool sub_vp_enabled = is_sub_vp_enabled(pipe_ctx->stream->ctx->dc, context);
@@ -484,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc,
/* reset fams2 data */
memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
if (dml_ctx->v21.mode_programming.programming->fams2_required) {
@@ -514,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc,
memcpy(static_base_state,
&dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params,
sizeof(union dmub_cmd_fams2_config));
- memcpy(static_sub_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
- sizeof(union dmub_cmd_fams2_config));
+
+ if (dc->debug.fams_version.major == 3) {
+ memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams],
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2,
+ sizeof(union dmub_fams2_stream_static_sub_state_v2));
+ } else {
+ memcpy(static_sub_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
+ sizeof(union dmub_cmd_fams2_config));
+ }
switch (dc->debug.fams_version.minor) {
case 1:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
index d5153fbac921..4bff52eaaef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
@@ -18,10 +18,10 @@ struct dml2_display_ttu_regs;
int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
int dml21_find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int plane_id);
bool dml21_get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id);
-void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- struct pipe_ctx *out);
+void dml21_pipe_populate_global_sync(struct dml2_context *dml_ctx,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming);
void dml21_populate_mall_allocation_size(struct dc_state *context,
struct dml2_context *in_ctx,
struct dml2_per_plane_programming *pln_prog,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
index bbc28b9a15a3..798abb2b2e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_internal_types.h"
#include "dml_top.h"
#include "dml2_core_dcn4_calcs.h"
@@ -11,13 +10,15 @@
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
+#define INVALID -1
+
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
- *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ *dml_ctx = vzalloc(sizeof(struct dml2_context));
if (!(*dml_ctx))
return false;
- (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
+ (*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance));
if (!((*dml_ctx)->v21.dml_init.dml2_instance))
return false;
@@ -27,22 +28,18 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
- (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
+ (*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming));
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
return true;
}
-static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_populate_configuration_options(const struct dc *in_dc,
+ struct dml2_context *dml_ctx,
+ const struct dml2_configuration_options *config)
{
- bool disable_fams2;
- struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options;
-
- /* ODM options */
- pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
- pmo_options->disable_dyn_odm_for_multi_stream = true;
- pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+ dml_ctx->config = *config;
/* UCLK P-State options */
if (in_dc->debug.dml21_force_pstate_method) {
@@ -52,51 +49,22 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex
} else {
dml_ctx->config.pmo.force_pstate_method_enable = false;
}
-
- pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
-
- /* NOTE: DRR and SubVP Require FAMS2 */
- disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
- pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
- in_dc->debug.force_disable_subvp ||
- disable_fams2;
- pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
- disable_fams2;
- pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
- disable_fams2;
- pmo_options->disable_fams2 = disable_fams2;
-
- pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
- in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
- pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- break;
- default:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid;
- }
- (*dml_ctx)->architecture = dml2_architecture_21;
+ dml_ctx->architecture = dml2_architecture_21;
+
+ dml21_populate_configuration_options(in_dc, dml_ctx, config);
- /* Store configuration options */
- (*dml_ctx)->config = *config;
+ DC_FP_START();
- /*Initialize SOCBB and DCNIP params */
- dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc);
- /* apply debug overrides */
- dml21_apply_debug_options(in_dc, *dml_ctx, config);
+ dml2_initialize_instance(&dml_ctx->v21.dml_init);
- /*Initialize DML21 instance */
- dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
+ DC_FP_END();
}
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
@@ -105,15 +73,15 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
if (!dml21_allocate_memory(dml_ctx))
return false;
- dml21_init(in_dc, dml_ctx, config);
+ dml21_init(in_dc, *dml_ctx, config);
return true;
}
void dml21_destroy(struct dml2_context *dml2)
{
- kfree(dml2->v21.dml_init.dml2_instance);
- kfree(dml2->v21.mode_programming.programming);
+ vfree(dml2->v21.dml_init.dml2_instance);
+ vfree(dml2->v21.mode_programming.programming);
}
static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state,
@@ -125,6 +93,7 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
int num_pipes;
+ unsigned int dml_phantom_prog_idx;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
@@ -138,6 +107,9 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
+ /* phantom's start after main planes */
+ dml_phantom_prog_idx = in_ctx->v21.mode_programming.programming->display_config.num_planes;
+
for (dml_prog_idx = 0; dml_prog_idx < DML2_MAX_PLANES; dml_prog_idx++) {
pln_prog = &in_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
@@ -163,6 +135,16 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog);
}
}
+
+ /* copy per plane mcache allocation */
+ memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
+ if (pln_prog->phantom_plane.valid) {
+ memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx],
+ &pln_prog->phantom_plane.mcache_allocation,
+ sizeof(struct dml2_mcache_surface_allocation));
+
+ dml_phantom_prog_idx++;
+ }
}
/* assign global clocks */
@@ -190,10 +172,40 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
}
}
+static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params)
+{
+ int dc_plane_idx = 0;
+ int dml_prog_idx, stream_idx, plane_idx;
+ struct dml2_per_plane_programming *pln_prog = NULL;
+
+ for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) {
+ for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) {
+ dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context);
+ if (dml_prog_idx == INVALID) {
+ continue;
+ }
+ pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+ mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid;
+ mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0;
+ mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1;
+ mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache;
+ mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1;
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane0,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane1,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ dc_plane_idx++;
+ }
+ }
+}
+
static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
bool result = false;
struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
+ struct dc_mcache_params mcache_params[MAX_PLANES] = {0};
memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
@@ -216,7 +228,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
if (!result)
return false;
+ DC_FP_START();
result = dml2_build_mode_programming(mode_programming);
+ DC_FP_END();
if (!result)
return false;
@@ -226,6 +240,14 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
/* if subvp phantoms are present, expand them into dc context */
dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
+
+ if (in_dc->res_pool->funcs->program_mcache_pipe_config) {
+ //Prepare mcache params for each plane based on mcache output from DML
+ dml21_prepare_mcache_params(dml_ctx, context, mcache_params);
+
+ //populate mcache regs to each pipe
+ dml_ctx->config.callbacks.allocate_mcache(context, mcache_params);
+ }
}
/* Copy DML CLK, WM and REG outputs to bandwidth context */
@@ -233,13 +255,6 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count);
dml21_copy_clocks_to_dc_state(dml_ctx, context);
dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx);
- if (in_dc->ctx->dce_version == DCN_VERSION_3_2) {
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.a, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.b, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.c, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.d, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- }
-
dml21_build_fams2_programming(in_dc, context, dml_ctx);
}
@@ -266,22 +281,26 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
+ DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
+ DC_FP_END();
if (!is_supported)
return false;
return true;
}
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate)
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate) {
+ /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
- } else
+ else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
+
return out;
}
@@ -420,8 +439,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
+ DC_FP_START();
+
/* need to initialize copied instance for internal references to be correct */
dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
+
+ DC_FP_END();
}
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
@@ -436,7 +459,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
return true;
}
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
dml21_init(in_dc, dml_ctx, config);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
new file mode 100644
index 000000000000..15f92029d2e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+
+#ifndef _DML21_WRAPPER_H_
+#define _DML21_WRAPPER_H_
+
+#include "os_types.h"
+#include "dml_top_soc_parameter_types.h"
+#include "dml_top_display_cfg_types.h"
+
+struct dc;
+struct dc_state;
+struct dml2_configuration_options;
+struct dml2_context;
+enum dc_validate_mode;
+
+/**
+ * dml2_create - Creates dml21_context.
+ * @in_dc: dc.
+ * @dml2: Created dml21 context.
+ * @config: dml21 configuration options.
+ *
+ * Create of DML21 is done as part of dc_state creation.
+ * DML21 IP, SOC and STATES are initialized at
+ * creation time.
+ *
+ * Return: True if dml2 is successfully created, false otherwise.
+ */
+bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
+void dml21_destroy(struct dml2_context *dml2);
+void dml21_copy(struct dml2_context *dst_dml_ctx,
+ struct dml2_context *src_dml_ctx);
+bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
+ struct dml2_context *src_dml_ctx);
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config);
+
+/**
+ * dml21_validate - Determines if a display configuration is supported or not.
+ * @in_dc: dc.
+ * @context: dc_state to be validated.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX
+ * will not populate context.res_ctx.
+ *
+ * Based on fast_validate option internally would call:
+ *
+ * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option
+ * Calculates if dc_state can be supported on the input display
+ * configuration. If supported, generates the necessary HW
+ * programming for the new dc_state.
+ *
+ * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option
+ * Calculates if dc_state can be supported for the input display
+ * config.
+
+ * Context: Two threads may not invoke this function concurrently unless they reference
+ * separate dc_states for validation.
+ * Return: True if mode is supported, false otherwise.
+ */
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode);
+
+/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
+void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
+
+/* Structure for inputting external SOCBB and DCNIP values for tool based debugging. */
+struct socbb_ip_params_external {
+ struct dml2_ip_capabilities ip_params;
+ struct dml2_soc_bb soc_bb;
+};
+
+/*mcache parameters decided by dml*/
+struct dc_mcache_params {
+ bool valid;
+ /*
+ * For iMALL, dedicated mall mcaches are required (sharing of last
+ * slice possible), for legacy phantom or phantom without return
+ * the only mall mcaches need to be valid.
+ */
+ bool requires_dedicated_mall_mcache;
+ unsigned int num_mcaches_plane0;
+ unsigned int num_mcaches_plane1;
+ /*
+ * Generally, plane0/1 slices must use a disjoint set of caches
+ * but in some cases the final segement of the two planes can
+ * use the same cache. If plane0_plane1 is set, then this is
+ * allowed.
+ *
+ * Similarly, the caches allocated to MALL prefetcher are generally
+ * disjoint, but if mall_prefetch is set, then the final segment
+ * between the main and the mall pixel requestor can use the same
+ * cache.
+ *
+ * Note that both bits may be set at the same time.
+ */
+ struct {
+ bool mall_comb_mcache_p0;
+ bool mall_comb_mcache_p1;
+ bool plane0_plane1;
+ } last_slice_sharing;
+ /*
+ * A plane is divided into vertical slices of mcaches,
+ * which wrap on the surface width.
+ *
+ * For example, if the surface width is 7680, and split into
+ * three slices of equal width, the boundary array would contain
+ * [2560, 5120, 7680]
+ *
+ * The assignments are
+ * 0 = [0 .. 2559]
+ * 1 = [2560 .. 5119]
+ * 2 = [5120 .. 7679]
+ * 0 = [7680 .. INF]
+ * The final element implicitly is the same as the first, and
+ * at first seems invalid since it is never referenced (since)
+ * it is outside the surface. However, its useful when shifting
+ * (see below).
+ *
+ * For any given valid mcache assignment, a shifted version, wrapped
+ * on the surface width boundary is also assumed to be valid.
+ *
+ * For example, shifting [2560, 5120, 7680] by -50 results in
+ * [2510, 5170, 7630].
+ *
+ * The assignments are now:
+ * 0 = [0 .. 2509]
+ * 1 = [2510 .. 5169]
+ * 2 = [5170 .. 7629]
+ * 0 = [7630 .. INF]
+ */
+ int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1];
+ int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1];
+};
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index 793e1c038efd..16a4f97bca4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_DML_DCN4_SOC_BB__
#define __DML_DML_DCN4_SOC_BB__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
index 281d7ad230d8..281d7ad230d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
index a64ec4dcf11a..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
index 25b607e7b726..bf57df42d1d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
@@ -121,6 +121,8 @@ struct dml2_display_rq_regs {
uint32_t crq_expansion_mode;
uint32_t plane1_base_address;
uint32_t unbounded_request_enabled;
+ bool pte_buffer_mode;
+ bool force_one_row_for_frame;
// MRQ
uint32_t mrq_expansion_mode;
@@ -156,6 +158,10 @@ struct dml2_dchub_watermark_regs {
uint32_t urgent;
uint32_t sr_enter;
uint32_t sr_exit;
+ uint32_t sr_enter_z8;
+ uint32_t sr_exit_z8;
+ uint32_t sr_enter_low_power;
+ uint32_t sr_exit_low_power;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
index 5e1ab6d97640..35aa954248cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
@@ -49,6 +49,11 @@ enum dml2_source_format_class {
dml2_422_packed_12 = 18
};
+enum dml2_sample_positioning {
+ dml2_interstitial = 0,
+ dml2_cosited = 1
+};
+
enum dml2_rotation_angle {
dml2_rotation_0 = 0,
dml2_rotation_90 = 1,
@@ -82,6 +87,15 @@ enum dml2_output_link_dp_rate {
dml2_dp_rate_uhbr20 = 6
};
+enum dml2_pstate_type {
+ dml2_pstate_type_uclk = 0,
+ dml2_pstate_type_fclk = 1,
+ dml2_pstate_type_ppt = 2,
+ dml2_pstate_type_temp_read = 3,
+ dml2_pstate_type_dummy_pstate = 4,
+ dml2_pstate_type_count = 5
+};
+
enum dml2_uclk_pstate_change_strategy {
dml2_uclk_pstate_change_strategy_auto = 0,
dml2_uclk_pstate_change_strategy_force_vactive = 1,
@@ -166,7 +180,7 @@ struct dml2_surface_cfg {
enum dml2_swizzle_mode tiling;
struct {
- unsigned long pitch;
+ unsigned long pitch; // In elements, two pixels per element in 422 packed format
unsigned long width;
unsigned long height;
} plane0;
@@ -222,6 +236,11 @@ struct dml2_composition_cfg {
struct {
bool enabled;
+ bool easf_enabled;
+ bool isharp_enabled;
+ bool upsp_enabled;
+ enum dml2_sample_positioning upsp_sample_positioning;
+ unsigned int upsp_vtaps;
struct {
double h_ratio;
double v_ratio;
@@ -383,8 +402,9 @@ struct dml2_plane_parameters {
// reserved_vblank_time_ns is the minimum time to reserve in vblank for Twait
// The actual reserved vblank time used for the corresponding stream in mode_programming would be at least as much as this per-plane override.
long reserved_vblank_time_ns;
- unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
+ unsigned int max_vactive_det_fill_delay_us[dml2_pstate_type_count]; // 0 = no reserved time, +ve = explicit max delay
unsigned int gpuvm_min_page_size_kbytes;
+ unsigned int hostvm_min_page_size_kbytes;
enum dml2_svp_mode_override legacy_svp_config; //TODO remove in favor of svp_config
@@ -411,7 +431,6 @@ struct dml2_stream_parameters {
bool disable_dynamic_odm;
bool disable_subvp;
int minimum_vblank_idle_requirement_us;
- bool minimize_active_latency_hiding;
struct {
struct {
@@ -425,6 +444,7 @@ struct dml2_stream_parameters {
struct dml2_display_cfg {
bool gpuvm_enable;
+ bool ffbm_enable;
bool hostvm_enable;
// Allocate DET proportionally between streams based on pixel rate
@@ -453,6 +473,7 @@ struct dml2_display_cfg {
bool enable;
bool value;
} force_nom_det_size_kbytes;
+
bool mode_support_check_disable;
bool mcache_admissibility_check_disable;
bool surface_viewport_size_check_disable;
@@ -475,7 +496,6 @@ struct dml2_display_cfg {
bool synchronize_ddr_displays_for_uclk_pstate_change;
bool max_outstanding_when_urgent_expected_disable;
bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming
- unsigned int best_effort_min_active_latency_hiding_us;
bool all_streams_blanked;
} overrides;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
index 8f624a912e78..8f624a912e78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
index 5f0bc42d1d2f..1fbc520c2540 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
@@ -89,16 +89,21 @@ struct dml2_soc_qos_parameters {
struct dml2_soc_power_management_parameters {
double dram_clk_change_blackout_us;
- double dram_clk_change_read_only_us;
- double dram_clk_change_write_only_us;
+ double dram_clk_change_read_only_us; // deprecated
+ double dram_clk_change_write_only_us; // deprecated
double fclk_change_blackout_us;
double g7_ppt_blackout_us;
+ double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
+ double low_power_stutter_enter_plus_exit_latency_us;
+ double low_power_stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE];
+ double type_b_dram_clk_change_blackout_us;
+ double type_b_ppt_blackout_us;
};
struct dml2_clk_table {
@@ -130,6 +135,7 @@ struct dml2_soc_state_table {
struct dml2_soc_vmin_clock_limits {
unsigned long dispclk_khz;
+ unsigned long dcfclk_khz;
};
struct dml2_soc_bb {
@@ -138,6 +144,9 @@ struct dml2_soc_bb {
struct dml2_soc_power_management_parameters power_management_parameters;
struct dml2_soc_vmin_clock_limits vmin_limit;
+ double lower_bound_bandwidth_dchub;
+ double fraction_of_urgent_bandwidth_nominal_target;
+ double fraction_of_urgent_bandwidth_flip_target;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
@@ -163,6 +172,7 @@ struct dml2_soc_bb {
struct dml2_ip_capabilities {
unsigned int pipe_count;
unsigned int otg_count;
+ unsigned int TDLUT_33cube_count;
unsigned int num_dsc;
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
@@ -181,7 +191,9 @@ struct dml2_ip_capabilities {
unsigned int subvp_prefetch_end_to_mall_start_us;
unsigned int subvp_fw_processing_delay;
unsigned int max_vactive_det_fill_delay_us;
-
+ unsigned int ppt_max_allow_delay_us;
+ unsigned int temp_read_max_allow_delay_us;
+ unsigned int dummy_pstate_max_allow_delay_us;
/* FAMS2 delays */
struct {
unsigned int max_allow_delay_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
index d2d053f2354d..452e4a2e72c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
@@ -14,16 +14,11 @@
struct dml2_instance;
-enum dml2_status {
- dml2_success = 0,
- dml2_error_generic = 1
-};
-
enum dml2_project_id {
dml2_project_invalid = 0,
- dml2_project_dcn4x_stage1 = 1,
- dml2_project_dcn4x_stage2 = 2,
- dml2_project_dcn4x_stage2_auto_drr_svp = 3,
+ dml2_project_dcn4x_stage1,
+ dml2_project_dcn4x_stage2,
+ dml2_project_dcn4x_stage2_auto_drr_svp,
};
enum dml2_pstate_change_support {
@@ -58,7 +53,9 @@ enum dml2_output_type_and_rate__rate {
dml2_output_rate_hdmi_rate_6x4 = 9,
dml2_output_rate_hdmi_rate_8x4 = 10,
dml2_output_rate_hdmi_rate_10x4 = 11,
- dml2_output_rate_hdmi_rate_12x4 = 12
+ dml2_output_rate_hdmi_rate_12x4 = 12,
+ dml2_output_rate_hdmi_rate_16x4 = 13,
+ dml2_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_pmo_options {
@@ -73,6 +70,8 @@ struct dml2_pmo_options {
bool disable_dyn_odm;
bool disable_dyn_odm_for_multi_stream;
bool disable_dyn_odm_for_stream_with_svp;
+ struct dml2_pmo_pstate_strategy *override_strategy_lists[DML2_MAX_PLANES];
+ unsigned int num_override_strategies_per_list[DML2_MAX_PLANES];
};
struct dml2_options {
@@ -245,6 +244,7 @@ struct dml2_per_plane_programming {
struct {
bool valid;
struct dml2_plane_parameters descriptor;
+ struct dml2_mcache_surface_allocation mcache_allocation;
struct dml2_dchub_per_pipe_register_set *pipe_regs[DML2_MAX_PLANES];
} phantom_plane;
};
@@ -283,7 +283,10 @@ struct dml2_per_stream_programming {
} phantom_stream;
union dmub_cmd_fams2_config fams2_base_params;
- union dmub_cmd_fams2_config fams2_sub_params;
+ union {
+ union dmub_cmd_fams2_config fams2_sub_params;
+ union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2;
+ };
};
//-----------------
@@ -309,6 +312,7 @@ struct dml2_mode_support_info {
bool NumberOfOTGSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
+ bool NumberOfTDLUT33cubeSupport;
bool WritebackScaleRatioAndTapsSupport;
bool CursorSupport;
bool PitchSupport;
@@ -356,6 +360,8 @@ struct dml2_mode_support_info {
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
bool temp_read_or_ppt_support;
+ bool qos_bandwidth_support;
+ bool dcfclk_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
@@ -416,6 +422,8 @@ struct dml2_display_cfg_programming {
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
+ uint8_t base_percent_efficiency; //LP1
+ uint8_t low_power_percent_efficiency; //LP2
} stutter;
struct {
@@ -458,6 +466,10 @@ struct dml2_display_cfg_programming {
} plane_info[DML2_MAX_PLANES];
struct {
+ unsigned int total_num_dpps_required;
+ } dpp;
+
+ struct {
unsigned long long total_surface_size_in_mall_bytes;
unsigned int subviewport_lines_needed_in_mall[DML2_MAX_PLANES];
} mall;
@@ -664,6 +676,8 @@ struct dml2_display_cfg_programming {
unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY
bool ROBUrgencyAvoidance;
double LowestPrefetchMargin;
+
+ unsigned int pstate_recout_reduction_lines[DML2_MAX_PLANES];
} misc;
struct dml2_mode_support_info mode_support_info;
@@ -674,9 +688,14 @@ struct dml2_display_cfg_programming {
// unlimited # of mcache
struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES];
+ bool failed_prefetch;
+ bool failed_uclk_pstate;
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
+ bool failed_mode_programming_dcfclk;
+ bool failed_mode_programming_prefetch;
+ bool failed_mode_programming_flip;
bool failed_map_watermarks;
} informative;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
index d68b4567e218..eba948e187c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.writeback_interface_buffer_size_kbytes = 90,
//Number of pipes after DCN Pipe harvesting
.max_num_dpp = 4,
+ .max_num_opp = 4,
.max_num_otg = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
@@ -141,9 +142,8 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines;
} else {
- memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
+ memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps);
-
core->clean_me_up.mode_lib.ip.imall_supported = false;
}
@@ -254,7 +254,8 @@ static void expand_implict_subvp(const struct display_configuation_with_meta *di
static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_instance *core, const struct display_configuation_with_meta *display_cfg,
const struct dml2_display_cfg *svp_expanded_display_cfg, struct dml2_display_cfg_programming *programming, struct dml2_core_scratch *scratch)
{
- unsigned int stream_index, plane_index, pipe_offset, stream_already_populated_mask, main_plane_index;
+ unsigned int stream_index, plane_index, pipe_offset, stream_already_populated_mask, main_plane_index, mcache_index;
+ unsigned int total_main_mcaches_required = 0;
int total_pipe_regs_copied = 0;
int dml_internal_pipe_index = 0;
const struct dml2_plane_parameters *main_plane;
@@ -325,6 +326,13 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
+ memcpy(&programming->plane_programming[plane_index].mcache_allocation,
+ &display_cfg->stage2.mcache_allocations[plane_index],
+ sizeof(struct dml2_mcache_surface_allocation));
+ total_main_mcaches_required += programming->plane_programming[plane_index].mcache_allocation.num_mcaches_plane0 +
+ programming->plane_programming[plane_index].mcache_allocation.num_mcaches_plane1 -
+ (programming->plane_programming[plane_index].mcache_allocation.last_slice_sharing.plane0_plane1 ? 1 : 0);
+
for (pipe_offset = 0; pipe_offset < programming->plane_programming[plane_index].num_dpps_required; pipe_offset++) {
// Assign storage for this pipe's register values
programming->plane_programming[plane_index].pipe_regs[pipe_offset] = &programming->pipe_regs[total_pipe_regs_copied];
@@ -363,6 +371,22 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
memcpy(&programming->plane_programming[main_plane_index].phantom_plane.descriptor, phantom_plane, sizeof(struct dml2_plane_parameters));
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[main_plane_index].svp_size_mall_bytes, dml_internal_pipe_index);
+
+ /* generate mcache allocation, phantoms use identical mcache configuration, but in the MALL set and unique mcache ID's beginning after all main ID's */
+ memcpy(&programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation,
+ &programming->plane_programming[main_plane_index].mcache_allocation,
+ sizeof(struct dml2_mcache_surface_allocation));
+ for (mcache_index = 0; mcache_index < programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.num_mcaches_plane0; mcache_index++) {
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane0[mcache_index] += total_main_mcaches_required;
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_mall_plane0[mcache_index] =
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane0[mcache_index];
+ }
+ for (mcache_index = 0; mcache_index < programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.num_mcaches_plane1; mcache_index++) {
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane1[mcache_index] += total_main_mcaches_required;
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_mall_plane1[mcache_index] =
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane1[mcache_index];
+ }
+
for (pipe_offset = 0; pipe_offset < programming->plane_programming[main_plane_index].num_dpps_required; pipe_offset++) {
// Assign storage for this pipe's register values
programming->plane_programming[main_plane_index].phantom_plane.pipe_regs[pipe_offset] = &programming->pipe_regs[total_pipe_regs_copied];
@@ -433,10 +457,10 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
in_out->mode_support_result.global.active.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.average_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
for (i = 0; i < l->svp_expanded_display_cfg.num_planes; i++) {
in_out->mode_support_result.per_plane[i].dppclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDPPCLK[i] * 1000);
@@ -486,7 +510,7 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
stream_index = l->svp_expanded_display_cfg.plane_descriptors[i].stream_index;
in_out->mode_support_result.per_stream[stream_index].dscclk_khz = (unsigned int)core->clean_me_up.mode_lib.ms.required_dscclk_freq_mhz[i] * 1000;
- dml2_printf("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
+ DML_LOG_VERBOSE("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
if (!((stream_bitmask >> stream_index) & 0x1)) {
in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].odms_used = odm_count;
@@ -572,6 +596,10 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
+ memcpy(&in_out->programming->plane_programming[plane_index].mcache_allocation,
+ &in_out->display_cfg->stage2.mcache_allocations[plane_index],
+ sizeof(struct dml2_mcache_surface_allocation));
+
for (pipe_offset = 0; pipe_offset < in_out->programming->plane_programming[plane_index].num_dpps_required; pipe_offset++) {
in_out->programming->plane_programming[plane_index].plane_descriptor = &in_out->programming->display_config.plane_descriptors[plane_index];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
index e62b2d3eeee6..a68bb001a346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
@@ -9,7 +9,4 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out);
bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out);
bool core_dcn4_populate_informative(struct dml2_core_populate_informative_in_out *in_out);
bool core_dcn4_calculate_mcache_allocation(struct dml2_calculate_mcache_allocation_in_out *in_out);
-
-bool core_dcn4_unit_test(void);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index b9ec243cf9ba..a02e9fd6b5ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -15,6 +15,7 @@
//#define DML_MODE_SUPPORT_USE_DPM_DRAM_BW
//#define DML_GLOBAL_PREFETCH_CHECK
#define ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE
+#define DML_MAX_VSTARTUP_START 1023
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
{
@@ -53,104 +54,104 @@ static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned
static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
@@ -178,11 +179,9 @@ static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg
} else {
out_bpp[k] = 0;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
}
}
@@ -211,9 +210,7 @@ static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const stru
num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
return num_active_pipes;
}
@@ -250,7 +247,7 @@ static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg,
unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[plane_idx]);
- dml2_printf("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
+ DML_LOG_VERBOSE("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
return is_phantom;
}
@@ -414,19 +411,17 @@ static void CalculateMaxDETAndMinCompressedBufferSize(
*nomDETInKByte = (unsigned int)(math_floor2((double)*MaxTotalDETInKByte / (double)MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte));
*MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
- dml2_printf("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
if (nomDETInKByteOverrideEnable) {
*nomDETInKByte = nomDETInKByteOverrideValue;
- dml2_printf("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
}
}
@@ -501,7 +496,7 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -534,7 +529,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
}
}
@@ -569,8 +564,8 @@ static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x) {
version = 11;
} else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -600,8 +595,8 @@ static void CalculateBytePerPixelAndBlockSizes(
{
*BytePerPixelDETY = 0;
*BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 1;
if (SourcePixelFormat == dml2_444_64) {
*BytePerPixelDETY = 8;
@@ -644,21 +639,19 @@ static void CalculateBytePerPixelAndBlockSizes(
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
- dml2_printf("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
+ DML_ASSERT(0);
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
- dml2_printf("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
- dml2_printf("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
- dml2_printf("DML::%s: pitch_y = %u\n", __func__, pitch_y);
- dml2_printf("DML::%s: pitch_c = %u\n", __func__, pitch_c);
- dml2_printf("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
- dml2_printf("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
-#endif
+ DML_LOG_VERBOSE("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: pitch_y = %u\n", __func__, pitch_y);
+ DML_LOG_VERBOSE("DML::%s: pitch_c = %u\n", __func__, pitch_c);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
*surf_linear128_l = 0;
@@ -702,12 +695,10 @@ static void CalculateBytePerPixelAndBlockSizes(
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
- dml2_printf("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
- dml2_printf("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
- dml2_printf("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
if (SurfaceTiling == dml2_gfx11_sw_linear) {
@@ -751,8 +742,8 @@ static void CalculateBytePerPixelAndBlockSizes(
} else if (SurfaceTiling == dml2_sw_256kb_2d) {
macro_tile_scale = 32;
} else {
- dml2_printf("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
+ DML_ASSERT(0);
}
*MacroTileHeightY = macro_tile_scale * *BlockHeight256BytesY;
@@ -765,12 +756,10 @@ static void CalculateBytePerPixelAndBlockSizes(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
- dml2_printf("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
- dml2_printf("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
- dml2_printf("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
}
static void CalculateSinglePipeDPPCLKAndSCLThroughput(
@@ -859,10 +848,8 @@ static void CalculateSwathWidth(
unsigned int surface_width_ub_c;
unsigned int surface_height_ub_c;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -871,11 +858,9 @@ static void CalculateSwathWidth(
SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u ViewportWidth=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
MainSurfaceODMMode = ODMMode[k];
@@ -898,13 +883,11 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u HActive=%u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
- dml2_printf("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
- dml2_printf("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
- dml2_printf("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u HActive=%lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
SwathWidthC[k] = SwathWidthY[k] / 2;
@@ -933,22 +916,20 @@ static void CalculateSwathWidth(
surface_width_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.width, req_width_horz_c);
surface_height_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.height, Read256BytesBlockHeightC[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
- dml2_printf("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
- dml2_printf("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
- dml2_printf("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
req_per_swath_ub_l[k] = 0;
req_per_swath_ub_c[k] = 0;
@@ -994,15 +975,12 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
-
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
}
}
@@ -1017,13 +995,11 @@ static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsi
if (unb_req_force_en) {
unb_req_en = unb_req_force_val && unb_req_ok;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
- dml2_printf("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
- dml2_printf("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
- dml2_printf("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
-#endif
- return (unb_req_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
+ DML_LOG_VERBOSE("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
+ DML_LOG_VERBOSE("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
+ return unb_req_en;
}
static void CalculateDETBufferSize(
@@ -1053,16 +1029,14 @@ static void CalculateDETBufferSize(
bool NextPotentialSurfaceToAssignDETPieceFound;
bool MinimizeReallocationSuccess = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
- dml2_printf("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
// Note: Will use default det size if that fits 2 swaths
if (UnboundedRequestEnabled) {
@@ -1091,19 +1065,15 @@ static void CalculateDETBufferSize(
l->minDET = l->minDET + ConfigReturnBufferSegmentSizeInkByte;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
- dml2_printf("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
- dml2_printf("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
if (l->minDET_pipe == 0) {
l->minDET_pipe = (unsigned int)(math_max2(128, math_ceil2(((double)full_swath_bytes_l[k] + (double)full_swath_bytes_c[k]) / 1024.0, ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
}
if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) {
@@ -1116,12 +1086,10 @@ static void CalculateDETBufferSize(
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - (ForceSingleDPP ? 1 : DPPPerSurface[k]) * l->minDET_pipe;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
}
if (display_cfg->minimize_det_reallocation) {
@@ -1193,14 +1161,12 @@ static void CalculateDETBufferSize(
l->TotalBandwidth = l->TotalBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
}
- dml2_printf("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
-#endif
- dml2_printf("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
+ DML_LOG_VERBOSE("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
l->BandwidthOfSurfacesNotAssignedDETPiece = l->TotalBandwidth;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
@@ -1212,10 +1178,8 @@ static void CalculateDETBufferSize(
} else {
DETPieceAssignedToThisSurfaceAlready[k] = false;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
}
for (unsigned int j = 0; j < NumberOfActiveSurfaces; ++j) {
@@ -1223,22 +1187,18 @@ static void CalculateDETBufferSize(
l->NextSurfaceToAssignDETPiece = 0;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
if (!DETPieceAssignedToThisSurfaceAlready[k] && (!NextPotentialSurfaceToAssignDETPieceFound ||
ReadBandwidthLuma[k] + ReadBandwidthChroma[k] < ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece] + ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece])) {
l->NextSurfaceToAssignDETPiece = k;
NextPotentialSurfaceToAssignDETPieceFound = true;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
}
if (NextPotentialSurfaceToAssignDETPieceFound) {
@@ -1248,20 +1208,16 @@ static void CalculateDETBufferSize(
* (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte,
math_floor2((double)l->DETBufferSizePoolInKByte, (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
- dml2_printf("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
- dml2_printf("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
- dml2_printf("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
- dml2_printf("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] = DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] + l->NextDETBufferPieceInKByte / (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - l->NextDETBufferPieceInKByte;
DETPieceAssignedToThisSurfaceAlready[l->NextSurfaceToAssignDETPiece] = true;
@@ -1273,29 +1229,36 @@ static void CalculateDETBufferSize(
}
*CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByte / ConfigReturnBufferSegmentSizeInkByte;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- After bandwidth adjustment ---\n", __func__);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: --- After bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
}
-#endif
}
static double CalculateRequiredDispclk(
enum dml2_odm_mode ODMMode,
- double PixelClock)
+ double PixelClock,
+ bool isTMDS420)
{
+ double DispClk;
if (ODMMode == dml2_odm_mode_combine_4to1) {
- return PixelClock / 4.0;
+ DispClk = PixelClock / 4.0;
} else if (ODMMode == dml2_odm_mode_combine_3to1) {
- return PixelClock / 3.0;
+ DispClk = PixelClock / 3.0;
} else if (ODMMode == dml2_odm_mode_combine_2to1) {
- return PixelClock / 2.0;
+ DispClk = PixelClock / 2.0;
} else {
- return PixelClock;
+ DispClk = PixelClock;
+ }
+
+ if (isTMDS420) {
+ double TMDS420MinPixClock = PixelClock / 2.0;
+ DispClk = math_max2(DispClk, TMDS420MinPixClock);
}
+
+ return DispClk;
}
static double TruncToValidBPP(
@@ -1340,6 +1303,7 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 16;
} else {
+
if (Output == dml2_hdmi || Output == dml2_hdmifrl) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 24;
@@ -1357,6 +1321,7 @@ static double TruncToValidBPP(
MaxDSCBPP = 16;
}
}
+
if (Output == dml2_dp2p0) {
MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0;
} else if (DSCEnable && Output == dml2_dp) {
@@ -1509,15 +1474,13 @@ static unsigned int dscceComputeDelay(
//pixel delay is group_delay (converted to pixels) + pipeline, however, first group is a special case since it is processed as soon as it arrives (i.e., in 3 cycles regardless of pixel format)
pixels = (group_delay - 1) * cycles_per_group + 3 + pipeline_delay;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: bpc: %u\n", __func__, bpc);
- dml2_printf("DML::%s: BPP: %f\n", __func__, BPP);
- dml2_printf("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
- dml2_printf("DML::%s: numSlices: %u\n", __func__, numSlices);
- dml2_printf("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Output: %u\n", __func__, Output);
- dml2_printf("DML::%s: pixels: %u\n", __func__, pixels);
-#endif
+ DML_LOG_VERBOSE("DML::%s: bpc: %u\n", __func__, bpc);
+ DML_LOG_VERBOSE("DML::%s: BPP: %f\n", __func__, BPP);
+ DML_LOG_VERBOSE("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
+ DML_LOG_VERBOSE("DML::%s: numSlices: %u\n", __func__, numSlices);
+ DML_LOG_VERBOSE("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Output: %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: pixels: %u\n", __func__, pixels);
return pixels;
}
@@ -1592,10 +1555,8 @@ static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, e
// sft
Delay = Delay + 1;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Delay = %u\n", __func__, Delay);
-#endif
+ DML_LOG_VERBOSE("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Delay = %u\n", __func__, Delay);
return Delay;
}
@@ -1666,10 +1627,8 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
}
meta_surface_bytes = (unsigned int)(p->DCCMetaPitch * vp_height_meta_ub * p->BytePerPixel / 256.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
- dml2_printf("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
+ DML_LOG_VERBOSE("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
if (p->GPUVMEnable == true) {
double meta_vmpg_bytes = 4.0 * 1024.0;
*p->meta_pte_bytes_per_frame_ub = (unsigned int)((math_ceil2((double) (meta_surface_bytes - meta_vmpg_bytes) / (8 * meta_vmpg_bytes), 1) + 1) * 64);
@@ -1723,25 +1682,23 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
vm_bytes = *p->meta_pte_bytes_per_frame_ub + extra_mpde_bytes + *p->dpde0_bytes_per_frame_ub + extra_dpde_bytes;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
- dml2_printf("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
- dml2_printf("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
- dml2_printf("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
- dml2_printf("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
- dml2_printf("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
- dml2_printf("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
- dml2_printf("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
- dml2_printf("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
- dml2_printf("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
- dml2_printf("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
- dml2_printf("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
- dml2_printf("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
+ DML_LOG_VERBOSE("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
+ DML_LOG_VERBOSE("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
+ DML_LOG_VERBOSE("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
if (p->SurfaceTiling == dml2_sw_linear) {
*p->PixelPTEReqHeight = 1;
@@ -1777,22 +1734,20 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
if (p->GPUVMEnable == true) {
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
__func__, p->GPUVMMinPageSizeKBytes, p->SurfaceTiling, dml_get_tile_block_size_bytes(p->SurfaceTiling));
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
- dml2_printf("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
- dml2_printf("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
- dml2_printf("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
- dml2_printf("DML::%s: Pitch = %u\n", __func__, p->Pitch);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
-#endif
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
+ DML_LOG_VERBOSE("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
+ DML_LOG_VERBOSE("DML::%s: Pitch = %u\n", __func__, p->Pitch);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
*p->dpte_row_height_one_row_per_frame = vp_height_dpte_ub;
*p->dpte_row_width_ub_one_row_per_frame = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height_one_row_per_frame / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * (double)*p->PixelPTEReqWidth);
@@ -1810,7 +1765,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_height_linear = 128;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
#endif
} else if (!dml_is_vertical_rotation(p->RotationAngle)) {
@@ -1824,7 +1779,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqWidth, 1) + 1.0) * *p->PixelPTEReqWidth);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
#endif
*p->PixelPTEBytesPerRow = *p->dpte_row_width_ub / *p->PixelPTEReqWidth * *p->PTERequestSize;
@@ -1839,7 +1794,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqHeight * *p->PTERequestSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
#endif
}
@@ -1851,18 +1806,18 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRowStorage = *p->PixelPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
- dml2_printf("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
- dml2_printf("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
- dml2_printf("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
- dml2_printf("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
- dml2_printf("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
- dml2_printf("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
- dml2_printf("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
#endif
return vm_bytes;
@@ -1893,12 +1848,12 @@ static unsigned int CalculatePrefetchSourceLines(
double numLines = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VTaps = %u\n", __func__, VTaps);
- dml2_printf("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
- dml2_printf("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
- dml2_printf("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
- dml2_printf("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VTaps = %u\n", __func__, VTaps);
+ DML_LOG_VERBOSE("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
+ DML_LOG_VERBOSE("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
#endif
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1) / 2.0, 1));
@@ -1933,11 +1888,11 @@ static unsigned int CalculatePrefetchSourceLines(
numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
- dml2_printf("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
- dml2_printf("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
- dml2_printf("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
- dml2_printf("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
+ DML_LOG_VERBOSE("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
+ DML_LOG_VERBOSE("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
+ DML_LOG_VERBOSE("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
#endif
return (unsigned int)(numLines);
@@ -2006,8 +1961,8 @@ static void CalculateMALLUseForStaticScreen(
if (is_using_mall_for_ss[k])
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
- dml2_printf("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
#endif
}
@@ -2021,7 +1976,7 @@ static void CalculateMALLUseForStaticScreen(
(!CanAddAnotherSurfaceToMALL || SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) {
CanAddAnotherSurfaceToMALL = true;
SurfaceToAddToMALL = k;
- dml2_printf("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
}
}
if (CanAddAnotherSurfaceToMALL) {
@@ -2029,8 +1984,8 @@ static void CalculateMALLUseForStaticScreen(
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
#endif
}
}
@@ -2202,15 +2157,15 @@ static void CalculateDCCConfiguration(
segment_order_vert_contiguous_chroma = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
- dml2_printf("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
- dml2_printf("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
- dml2_printf("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
- dml2_printf("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
- dml2_printf("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
- dml2_printf("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
+ DML_LOG_VERBOSE("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
#endif
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
@@ -2300,12 +2255,12 @@ static void CalculateDCCConfiguration(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
- dml2_printf("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
- dml2_printf("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
- dml2_printf("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
- dml2_printf("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
- dml2_printf("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
#endif
}
@@ -2325,26 +2280,26 @@ static void calculate_mcache_row_bytes(
unsigned int mvmpg_per_mcache;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_chans = %u\n", __func__, p->num_chans);
- dml2_printf("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
- dml2_printf("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
- dml2_printf("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
- dml2_printf("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
- dml2_printf("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
- dml2_printf("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
- dml2_printf("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
- dml2_printf("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
- dml2_printf("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
- dml2_printf("DML::%s: blk_width = %u\n", __func__, p->blk_width);
- dml2_printf("DML::%s: blk_height = %u\n", __func__, p->blk_height);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
- dml2_printf("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
-#endif
- DML2_ASSERT(p->mcache_line_size_bytes != 0);
- DML2_ASSERT(p->mcache_size_bytes != 0);
+ DML_LOG_VERBOSE("DML::%s: num_chans = %u\n", __func__, p->num_chans);
+ DML_LOG_VERBOSE("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
+ DML_LOG_VERBOSE("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
+ DML_LOG_VERBOSE("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
+ DML_LOG_VERBOSE("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
+ DML_LOG_VERBOSE("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
+ DML_LOG_VERBOSE("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
+ DML_LOG_VERBOSE("DML::%s: blk_width = %u\n", __func__, p->blk_width);
+ DML_LOG_VERBOSE("DML::%s: blk_height = %u\n", __func__, p->blk_height);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
+#endif
+ DML_ASSERT(p->mcache_line_size_bytes != 0);
+ DML_ASSERT(p->mcache_size_bytes != 0);
*p->mvmpg_width = 0;
*p->mvmpg_height = 0;
@@ -2352,6 +2307,7 @@ static void calculate_mcache_row_bytes(
if (p->full_vp_height == 0 && p->full_vp_width == 0) {
*p->num_mcaches = 0;
*p->mcache_row_bytes = 0;
+ *p->mcache_row_bytes_per_channel = 0;
} else {
blk_bytes = dml_get_tile_block_size_bytes(p->tiling_mode);
@@ -2368,8 +2324,8 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_width = p->vmpg_width;
*p->mvmpg_height = p->vmpg_height;
} else if (!((blk_bytes == 65536) && (vmpg_bytes == 4096))) {
- dml2_printf("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
+ DML_ASSERT(0);
}
}
@@ -2420,38 +2376,42 @@ static void calculate_mcache_row_bytes(
// If this mcache_row_bytes for the full viewport of the surface is less than or equal to mcache_bytes,
// then one mcache can be used for this request stream. If not, it is useful to know the width of the viewport that can be supported in the mcache_bytes.
- if (p->gpuvm_enable || !p->surf_vert) {
- *p->mcache_row_bytes = mvmpg_per_row_ub * meta_per_mvmpg_per_channel_ub;
+ if (p->gpuvm_enable || p->surf_vert) {
+ *p->mcache_row_bytes_per_channel = mvmpg_per_row_ub * meta_per_mvmpg_per_channel_ub;
+ *p->mcache_row_bytes = *p->mcache_row_bytes_per_channel * p->num_chans;
} else { // horizontal and gpuvm disable
*p->mcache_row_bytes = *p->meta_row_width_ub * p->blk_height * p->bytes_per_pixel / 256;
- *p->mcache_row_bytes = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->num_chans, p->mcache_line_size_bytes);
+ if (p->mcache_line_size_bytes != 0)
+ *p->mcache_row_bytes_per_channel = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->num_chans, p->mcache_line_size_bytes);
}
*p->dcc_dram_bw_pref_overhead_factor = 1 + math_max2(1.0 / 256.0, *p->mcache_row_bytes / p->full_swath_bytes); // dcc_dr_oh_pref
- *p->num_mcaches = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->mcache_size_bytes, 1);
+ if (p->mcache_size_bytes != 0)
+ *p->num_mcaches = (unsigned int)math_ceil2((double)*p->mcache_row_bytes_per_channel / p->mcache_size_bytes, 1);
mvmpg_per_mcache = p->mcache_size_bytes / meta_per_mvmpg_per_channel_ub;
*p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
- dml2_printf("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
- dml2_printf("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
- dml2_printf("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
- dml2_printf("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
- dml2_printf("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
- dml2_printf("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
+ DML_LOG_VERBOSE("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
#endif
- DML2_ASSERT(*p->num_mcaches > 0);
+ DML_ASSERT(*p->num_mcaches > 0);
}
static void calculate_mcache_setting(
@@ -2465,11 +2425,13 @@ static void calculate_mcache_setting(
*p->num_mcaches_l = 0;
*p->mcache_row_bytes_l = 0;
+ *p->mcache_row_bytes_per_channel_l = 0;
*p->dcc_dram_bw_nom_overhead_factor_l = 1.0;
*p->dcc_dram_bw_pref_overhead_factor_l = 1.0;
*p->num_mcaches_c = 0;
*p->mcache_row_bytes_c = 0;
+ *p->mcache_row_bytes_per_channel_c = 0;
*p->dcc_dram_bw_nom_overhead_factor_c = 1.0;
*p->dcc_dram_bw_pref_overhead_factor_c = 1.0;
@@ -2505,6 +2467,7 @@ static void calculate_mcache_setting(
// output
l->l_p.num_mcaches = p->num_mcaches_l;
l->l_p.mcache_row_bytes = p->mcache_row_bytes_l;
+ l->l_p.mcache_row_bytes_per_channel = p->mcache_row_bytes_per_channel_l;
l->l_p.dcc_dram_bw_nom_overhead_factor = p->dcc_dram_bw_nom_overhead_factor_l;
l->l_p.dcc_dram_bw_pref_overhead_factor = p->dcc_dram_bw_pref_overhead_factor_l;
l->l_p.mvmpg_width = &l->mvmpg_width_l;
@@ -2514,7 +2477,7 @@ static void calculate_mcache_setting(
l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l;
calculate_mcache_row_bytes(scratch, &l->l_p);
- dml2_assert(*p->num_mcaches_l > 0);
+ DML_ASSERT(*p->num_mcaches_l > 0);
if (l->is_dual_plane) {
l->c_p.num_chans = p->num_chans;
@@ -2540,6 +2503,7 @@ static void calculate_mcache_setting(
// output
l->c_p.num_mcaches = p->num_mcaches_c;
l->c_p.mcache_row_bytes = p->mcache_row_bytes_c;
+ l->c_p.mcache_row_bytes_per_channel = p->mcache_row_bytes_per_channel_c;
l->c_p.dcc_dram_bw_nom_overhead_factor = p->dcc_dram_bw_nom_overhead_factor_c;
l->c_p.dcc_dram_bw_pref_overhead_factor = p->dcc_dram_bw_pref_overhead_factor_c;
l->c_p.mvmpg_width = &l->mvmpg_width_c;
@@ -2549,12 +2513,12 @@ static void calculate_mcache_setting(
l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c;
calculate_mcache_row_bytes(scratch, &l->c_p);
- dml2_assert(*p->num_mcaches_c > 0);
+ DML_ASSERT(*p->num_mcaches_c > 0);
}
// Sharing for iMALL access
- l->mcache_remainder_l = *p->mcache_row_bytes_l % p->mcache_size_bytes;
- l->mcache_remainder_c = *p->mcache_row_bytes_c % p->mcache_size_bytes;
+ l->mcache_remainder_l = *p->mcache_row_bytes_per_channel_l % p->mcache_size_bytes;
+ l->mcache_remainder_c = *p->mcache_row_bytes_per_channel_c % p->mcache_size_bytes;
l->mvmpg_access_width_l = p->surf_vert ? l->mvmpg_height_l : l->mvmpg_width_l;
l->mvmpg_access_width_c = p->surf_vert ? l->mvmpg_height_c : l->mvmpg_width_c;
@@ -2577,36 +2541,39 @@ static void calculate_mcache_setting(
if (l->is_dual_plane) {
l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
- if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
- l->lc_comb_last_mcache_size = (unsigned int)((l->mcache_remainder_l * (*p->mall_comb_mcache_l ? 2 : 1) * l->luma_time_factor) +
- (l->mcache_remainder_c * (*p->mall_comb_mcache_c ? 2 : 1)));
+ /* if either remainder is 0, then mcache sharing is not needed or not possible due to full utilization */
+ if (l->mcache_remainder_l && l->mcache_remainder_c) {
+ if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
+ l->lc_comb_last_mcache_size = (unsigned int)((l->mcache_remainder_l * (*p->mall_comb_mcache_l ? 2 : 1) * l->luma_time_factor) +
+ (l->mcache_remainder_c * (*p->mall_comb_mcache_c ? 2 : 1)));
+ }
+ *p->lc_comb_mcache = (l->lc_comb_last_mcache_size <= p->mcache_size_bytes) && (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c);
}
- *p->lc_comb_mcache = (l->lc_comb_last_mcache_size <= p->mcache_size_bytes) && (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
- dml2_printf("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
- dml2_printf("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
- dml2_printf("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
- dml2_printf("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
- dml2_printf("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
- dml2_printf("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
- dml2_printf("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
- dml2_printf("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
- dml2_printf("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
+ DML_LOG_VERBOSE("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
+ DML_LOG_VERBOSE("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
+ DML_LOG_VERBOSE("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
if (l->is_dual_plane) {
- dml2_printf("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
- dml2_printf("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
- dml2_printf("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
- dml2_printf("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
- dml2_printf("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
- dml2_printf("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
- dml2_printf("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
- dml2_printf("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
- dml2_printf("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
- dml2_printf("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
+ DML_LOG_VERBOSE("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
}
#endif
// calculate split_coordinate
@@ -2626,11 +2593,11 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
if (l->is_dual_plane) {
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
}
#endif
@@ -2647,10 +2614,10 @@ static void calculate_mcache_setting(
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
#endif
}
@@ -2681,8 +2648,8 @@ static void calculate_mall_bw_overhead_factor(
mall_prefetch_dram_overhead_factor[k] = 2.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
- dml2_printf("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
#endif
}
}
@@ -2759,26 +2726,24 @@ static double dml_get_return_bandwidth_available(
else // dml2_core_internal_bw_dram
return_bw_mbps = derate_dram_bandwidth;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
- dml2_printf("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
- dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
- dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
- dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
- dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
- dml2_printf("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
- dml2_printf("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
- dml2_printf("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
- dml2_printf("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
- dml2_printf("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
+ DML_LOG_VERBOSE("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
+ DML_LOG_VERBOSE("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
+ DML_LOG_VERBOSE("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
return return_bw_mbps;
}
-static void calculate_bandwidth_available(
+static noinline_for_stack void calculate_bandwidth_available(
double avg_bandwidth_available_min[dml2_core_internal_soc_state_max],
double avg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
double urg_bandwidth_available_min[dml2_core_internal_soc_state_max], // min between SDP and DRAM
@@ -2794,9 +2759,9 @@ static void calculate_bandwidth_available(
{
unsigned int n, m;
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
// Calculate all the bandwidth availabe
for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
@@ -2815,8 +2780,8 @@ static void calculate_bandwidth_available(
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
#endif
// urg_bandwidth_available_vm_only is indexed by soc_state
@@ -2830,9 +2795,9 @@ static void calculate_bandwidth_available(
urg_bandwidth_available_min[m] = math_min2(urg_bandwidth_available[m][dml2_core_internal_bw_dram], urg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
#endif
}
}
@@ -2866,13 +2831,13 @@ static void calculate_avg_bandwidth_required(
// SysActive and SVP Prefetch AVG bandwidth Check
for (k = 0; k < num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: plane %0d\n", __func__, k);
- dml2_printf("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
- dml2_printf("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: plane %0d\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
#endif
sdp_overhead_factor = mall_prefetch_sdp_overhead_factor[k];
@@ -2889,10 +2854,10 @@ static void calculate_avg_bandwidth_required(
avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
#endif
}
}
@@ -3067,10 +3032,10 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->MaxNumSwathY[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
- dml2_printf("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
#endif
p->vm_bytes[k] = (s->vm_bytes_l + s->vm_bytes_c) * (1 + 8 * s->HostVMDynamicLevels);
p->meta_row_bytes[k] = s->meta_row_bytes_per_row_ub_l[k] + s->meta_row_bytes_per_row_ub_c[k];
@@ -3078,8 +3043,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->meta_row_bytes_per_row_ub_c[k] = s->meta_row_bytes_per_row_ub_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
#endif
if (s->PixelPTEBytesPerRowStorageY[k] <= 64 * s->PTEBufferSizeInRequestsForLuma[k] && s->PixelPTEBytesPerRowStorageC[k] <= 64 * s->PTEBufferSizeInRequestsForChroma[k]) {
p->PTEBufferSizeNotExceeded[k] = true;
@@ -3091,18 +3056,18 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
s->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForChroma[k]);
#ifdef __DML_VBA_DEBUG__
if (p->PTEBufferSizeNotExceeded[k] == 0 || s->one_row_per_frame_fits_in_buffer[k] == 0) {
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
}
#endif
}
@@ -3133,8 +3098,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
p->DCCMetaBufferSizeNotExceeded[k] = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
#endif
p->use_one_row_for_frame[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
(dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle));
@@ -3157,9 +3122,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->DCCMetaBufferSizeNotExceeded[k] = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
#endif
}
@@ -3196,20 +3161,20 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->dpte_row_bw[k],
&p->meta_row_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
- dml2_printf("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
- dml2_printf("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
- dml2_printf("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
#endif
}
}
@@ -3244,19 +3209,19 @@ static double CalculateUrgentLatency(
}
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
} else {
- dml2_printf("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
- dml2_printf("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
- dml2_printf("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
}
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
#endif
return urgent_latency;
}
@@ -3283,18 +3248,18 @@ static double CalculateTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
- dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
- dml2_printf("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
- dml2_printf("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
#endif
@@ -3321,14 +3286,14 @@ static double CalculateMetaTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
- dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
#endif
@@ -3345,7 +3310,6 @@ static void calculate_cursor_req_attributes(
unsigned int *cursor_bytes_per_chunk,
unsigned int *cursor_bytes)
{
- unsigned int cursor_pitch = 0;
unsigned int cursor_bytes_per_req = 0;
unsigned int cursor_width_bytes = 0;
unsigned int cursor_height = 0;
@@ -3353,10 +3317,6 @@ static void calculate_cursor_req_attributes(
//SW determines the cursor pitch to support the maximum cursor_width that will be used but the following restrictions apply.
//- For 2bpp, cursor_pitch = 256 pixels due to min cursor request size of 64B
//- For 32 or 64 bpp, cursor_pitch = 64, 128 or 256 pixels depending on the cursor width
- if (cursor_bpp == 2)
- cursor_pitch = 256;
- else
- cursor_pitch = (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1);
//The cursor requestor uses a cursor request size of 64B, 128B, or 256B depending on the cursor_width and cursor_bpp as follows.
@@ -3396,8 +3356,8 @@ static void calculate_cursor_req_attributes(
*cursor_lines_per_chunk = 1;
} else {
if (cursor_width > 0) {
- dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_assert(0);
+ DML_LOG_VERBOSE("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_ASSERT(0);
}
}
@@ -3408,15 +3368,15 @@ static void calculate_cursor_req_attributes(
cursor_height = cursor_width;
*cursor_bytes = *cursor_bytes_per_line * cursor_height;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_printf("DML::%s: cursor_width = %d\n", __func__, cursor_width);
- dml2_printf("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
- dml2_printf("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
- dml2_printf("DML::%s: cursor_pitch = %d\n", __func__, cursor_pitch);
+ DML_LOG_VERBOSE("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_LOG_VERBOSE("DML::%s: cursor_width = %d\n", __func__, cursor_width);
+ DML_LOG_VERBOSE("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
#endif
}
@@ -3440,20 +3400,20 @@ static void calculate_cursor_urgent_burst_factor(
CursorBufferSizeInTime = LinesInCursorBuffer * LineTime;
if (CursorBufferSizeInTime - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorCursor = 0;
+ *UrgentBurstFactorCursor = 1;
} else {
*NotEnoughUrgentLatencyHiding = 0;
*UrgentBurstFactorCursor = CursorBufferSizeInTime / (CursorBufferSizeInTime - UrgentLatency);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
- dml2_printf("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
- dml2_printf("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
- dml2_printf("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3488,22 +3448,22 @@ static void CalculateUrgentBurstFactor(
*UrgentBurstFactorChroma = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VRatioC = %f\n", __func__, VRatioC);
- dml2_printf("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
- dml2_printf("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
- dml2_printf("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
- dml2_printf("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VRatioC = %f\n", __func__, VRatioC);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
- DML2_ASSERT(VRatio > 0);
+ DML_ASSERT(VRatio > 0);
LinesInDETLuma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub;
DETBufferSizeInTimeLuma = math_floor2(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorLuma = 0;
+ *UrgentBurstFactorLuma = 1;
} else {
*UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
}
@@ -3514,24 +3474,23 @@ static void CalculateUrgentBurstFactor(
DETBufferSizeInTimeChroma = math_floor2(LinesInDETChroma, SwathHeightC) * LineTime / VRatioC;
if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorChroma = 0;
+ *UrgentBurstFactorChroma = 1;
} else {
*UrgentBurstFactorChroma = DETBufferSizeInTimeChroma / (DETBufferSizeInTimeChroma - UrgentLatency);
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
- dml2_printf("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
- dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
-
}
-static void CalculateDCFCLKDeepSleep(
+static void CalculateDCFCLKDeepSleepTdlut(
const struct dml2_display_cfg *display_cfg,
unsigned int NumberOfActiveSurfaces,
unsigned int BytePerPixelY[],
@@ -3546,6 +3505,10 @@ static void CalculateDCFCLKDeepSleep(
double ReadBandwidthChroma[],
unsigned int ReturnBusWidth,
+ double dispclk,
+ unsigned int tdlut_bytes_to_deliver[],
+ double prefetch_swath_time_us[],
+
// Output
double *DCFClkDeepSleep)
{
@@ -3580,9 +3543,25 @@ static void CalculateDCFCLKDeepSleep(
}
DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], pixel_rate_mhz / 16);
+ // adjust for 3dlut delivery time
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) {
+ double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k];
+
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
+
+ // increase the deepsleep dcfclk to match the original dispclk throughput rate
+ if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) {
+ DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], tdlut_required_deepsleep_dcfclk);
+ DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], dispclk / 4.0);
+ }
+ }
+
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
- dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
#endif
}
@@ -3593,16 +3572,63 @@ static void CalculateDCFCLKDeepSleep(
*DCFClkDeepSleep = math_max2(8.0, __DML2_CALCS_DCFCLK_FACTOR__ * ReadBandwidth / (double)ReturnBusWidth);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
- dml2_printf("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
- dml2_printf("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
*DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
}
- dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
+
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
+}
+
+static noinline_for_stack void CalculateDCFCLKDeepSleep(
+ const struct dml2_display_cfg *display_cfg,
+ unsigned int NumberOfActiveSurfaces,
+ unsigned int BytePerPixelY[],
+ unsigned int BytePerPixelC[],
+ unsigned int SwathWidthY[],
+ unsigned int SwathWidthC[],
+ unsigned int DPPPerSurface[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double Dppclk[],
+ double ReadBandwidthLuma[],
+ double ReadBandwidthChroma[],
+ unsigned int ReturnBusWidth,
+
+ // Output
+ double *DCFClkDeepSleep)
+{
+ double zero_double[DML2_MAX_PLANES];
+ unsigned int zero_integer[DML2_MAX_PLANES];
+
+ memset(zero_double, 0, DML2_MAX_PLANES * sizeof(double));
+ memset(zero_integer, 0, DML2_MAX_PLANES * sizeof(unsigned int));
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ NumberOfActiveSurfaces,
+ BytePerPixelY,
+ BytePerPixelC,
+ SwathWidthY,
+ SwathWidthC,
+ DPPPerSurface,
+ PSCL_THROUGHPUT,
+ PSCL_THROUGHPUT_CHROMA,
+ Dppclk,
+ ReadBandwidthLuma,
+ ReadBandwidthChroma,
+ ReturnBusWidth,
+ 0,
+ zero_integer, //tdlut_bytes_to_deliver,
+ zero_double, //prefetch_swath_time_us,
+
+ // Output
+ DCFClkDeepSleep);
}
static double CalculateWriteBackDelay(
@@ -3643,23 +3669,23 @@ static unsigned int CalculateMaxVStartup(
double line_time_us = (double)timing->h_total / ((double)timing->pixel_clock_khz / 1000);
unsigned int vblank_actual = timing->v_total - timing->v_active;
unsigned int vblank_nom_default_in_line = (unsigned int)math_floor2((double)vblank_nom_default_us / line_time_us, 1.0);
- unsigned int vblank_nom_input = (unsigned int)math_min2(timing->vblank_nom, vblank_nom_default_in_line);
- unsigned int vblank_avail = (vblank_nom_input == 0) ? vblank_nom_default_in_line : vblank_nom_input;
+ unsigned int vblank_avail = (timing->vblank_nom == 0) ? vblank_nom_default_in_line : (unsigned int)timing->vblank_nom;
vblank_size = (unsigned int)math_min2(vblank_actual, vblank_avail);
if (timing->interlaced && !ptoi_supported)
- max_vstartup_lines = (unsigned int)(math_floor2(vblank_size / 2.0, 1.0));
+ max_vstartup_lines = (unsigned int)(math_floor2((vblank_size - 1) / 2.0, 1.0));
else
max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VBlankNom = %u\n", __func__, timing->vblank_nom);
- dml2_printf("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
- dml2_printf("DML::%s: line_time_us = %f\n", __func__, line_time_us);
- dml2_printf("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
- dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
- dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
+ DML_LOG_VERBOSE("DML::%s: VBlankNom = %lu\n", __func__, timing->vblank_nom);
+ DML_LOG_VERBOSE("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
+ DML_LOG_VERBOSE("DML::%s: line_time_us = %f\n", __func__, line_time_us);
+ DML_LOG_VERBOSE("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
+ DML_LOG_VERBOSE("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
+ DML_LOG_VERBOSE("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
+ max_vstartup_lines = (unsigned int)math_min2(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
}
@@ -3682,9 +3708,9 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
const long MAXIMUMCOMPRESSION = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
}
#endif
CalculateSwathWidth(
@@ -3718,15 +3744,15 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->full_swath_bytes_l[k] = (unsigned int)(p->swath_width_luma_ub[k] * p->BytePerPixDETY[k] * MaximumSwathHeightY[k]);
p->full_swath_bytes_c[k] = (unsigned int)(p->swath_width_chroma_ub[k] * p->BytePerPixDETC[k] * MaximumSwathHeightC[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
#endif
if (p->display_cfg->plane_descriptors[k].pixel_format == dml2_420_10) {
p->full_swath_bytes_l[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_l[k], 256));
@@ -3769,11 +3795,11 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
#endif
*p->ViewportSizeSupport = true;
@@ -3781,7 +3807,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
DETBufferSizeInKByteForSwathCalculation = (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 1024 : p->DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
#endif
if (p->display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
p->SwathHeightY[k] = MaximumSwathHeightY[k];
@@ -3828,8 +3854,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
}
if (p->SwathHeightC[k] == 0)
@@ -3838,13 +3864,13 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if ((p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] / 2 > DETBufferSizeInKByteForSwathCalculation * 1024 / 2) ||
p->SwathWidth[k] > p->MaximumSwathWidthLuma[k] || (p->SwathHeightC[k] > 0 && p->SwathWidthChroma[k] > p->MaximumSwathWidthChroma[k])) {
*p->ViewportSizeSupport = false;
- dml2_printf("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
- dml2_printf("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
p->ViewportSizeSupportPerSurface[k] = false;
} else {
p->ViewportSizeSupportPerSurface[k] = true;
@@ -3852,35 +3878,35 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if (p->SwathHeightC[k] == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024;
p->DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY[k] <= 1.5 * RoundedUpSwathSizeBytesC[k]) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
} else {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = (unsigned int)(math_floor2(p->DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024));
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 - p->DETBufferSizeY[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
- dml2_printf("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
#endif
}
@@ -3890,12 +3916,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
*p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
(double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
#endif
*p->hw_debug5 = false;
@@ -3910,12 +3936,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
+ *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
*p->hw_debug5 = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
+ DML_LOG_VERBOSE("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
+ DML_LOG_VERBOSE("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
+ DML_LOG_VERBOSE("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
#endif
}
#endif
@@ -4023,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
bool UseDSC,
unsigned int NumberOfDSCSlices,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double DISPCLKRequired,
unsigned int NumberOfDPPRequired,
unsigned int MaxHActiveForDSC,
@@ -4039,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
if (DISPCLKRequired > MaxDispclk)
return false;
- if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP)
+ if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP)
return false;
if (are_odm_segments_symmetrical) {
if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle))
@@ -4076,7 +4104,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
return true;
}
-static void CalculateODMMode(
+static noinline_for_stack void CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
enum dml2_output_format_class OutFormat,
@@ -4085,7 +4113,9 @@ static void CalculateODMMode(
double MaxDispclk,
bool DSCEnable,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double PixelClock,
unsigned int NumberOfDSCSlices,
@@ -4107,21 +4137,22 @@ static void CalculateODMMode(
bool success;
bool UseDSC = DSCEnable && (NumberOfDSCSlices > 0);
enum dml2_odm_mode DecidedODMMode;
-
- SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMUse = %d\n", __func__, ODMUse);
- dml2_printf("DML::%s: Output = %d\n", __func__, Output);
- dml2_printf("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
- dml2_printf("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
- dml2_printf("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
+ bool isTMDS420 = (OutFormat == dml2_420 && Output == dml2_hdmi);
+
+ SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock, isTMDS420);
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
+ DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
+ DML_LOG_VERBOSE("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
#endif
if (ODMUse == dml2_odm_mode_auto)
DecidedODMMode = DecideODMMode(HActive,
@@ -4154,7 +4185,9 @@ static void CalculateODMMode(
UseDSC,
NumberOfDSCSlices,
TotalNumberOfActiveDPP,
+ TotalNumberOfActiveOPP,
MaxNumDPP,
+ MaxNumOPP,
DISPCLKRequired,
NumberOfDPPRequired,
MaxHActiveForDSC,
@@ -4166,14 +4199,14 @@ static void CalculateODMMode(
*NumberOfDPP = NumberOfDPPRequired;
*RequiredDISPCLKPerSurface = success ? DISPCLKRequired : 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
- dml2_printf("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
- dml2_printf("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
- dml2_printf("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
+ DML_LOG_VERBOSE("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
#endif
}
-static void CalculateOutputLink(
+static noinline_for_stack void CalculateOutputLink(
struct dml2_core_internal_scratch *s,
double PHYCLK,
double PHYCLKD18,
@@ -4213,17 +4246,17 @@ static void CalculateOutputLink(
*OutputRate = dml2_core_internal_output_rate_unknown;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
- dml2_printf("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
- dml2_printf("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
- dml2_printf("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
- dml2_printf("DML::%s: Output (encoder) = %u\n", __func__, Output);
- dml2_printf("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
+ DML_LOG_VERBOSE("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
+ DML_LOG_VERBOSE("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
+ DML_LOG_VERBOSE("DML::%s: Output (encoder) = %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
#endif
{
if (Output == dml2_hdmi) {
@@ -4408,9 +4441,9 @@ static void CalculateOutputLink(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
- dml2_printf("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
- dml2_printf("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
+ DML_LOG_VERBOSE("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
+ DML_LOG_VERBOSE("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
+ DML_LOG_VERBOSE("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
#endif
}
@@ -4492,17 +4525,17 @@ static unsigned int DSCDelayRequirement(
DSCDelayRequirement_val = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, ODMMode);
- dml2_printf("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
- dml2_printf("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
- dml2_printf("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
- dml2_printf("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
+ DML_LOG_VERBOSE("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, ODMMode);
+ DML_LOG_VERBOSE("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
+ DML_LOG_VERBOSE("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
+ DML_LOG_VERBOSE("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
#endif
return DSCDelayRequirement_val;
@@ -4575,10 +4608,10 @@ static void CalculateSurfaceSizeInMall(
(TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
- dml2_printf("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
+ DML_LOG_VERBOSE("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
+ DML_LOG_VERBOSE("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
#endif
}
@@ -4595,7 +4628,6 @@ static void calculate_tdlut_setting(
unsigned int tdlut_vmpg_per_frame;
unsigned int tdlut_pte_req_per_frame;
unsigned int tdlut_bytes_per_line;
- unsigned int tdlut_delivery_cycles;
double tdlut_drain_rate;
unsigned int tdlut_mpc_width;
unsigned int tdlut_bytes_per_group_simple;
@@ -4604,6 +4636,7 @@ static void calculate_tdlut_setting(
*p->tdlut_groups_per_2row_ub = 0;
*p->tdlut_opt_time = 0;
*p->tdlut_drain_time = 0;
+ *p->tdlut_bytes_to_deliver = 0;
*p->tdlut_bytes_per_group = 0;
*p->tdlut_pte_bytes_per_frame = 0;
*p->tdlut_bytes_per_frame = 0;
@@ -4657,42 +4690,47 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_frame = tdlut_bytes_per_line * tdlut_mpc_width * tdlut_mpc_width;
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
*p->tdlut_bytes_per_group = tdlut_bytes_per_group_simple;
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
tdlut_drain_rate = 2 * tdlut_bpe * p->dispclk_mhz;
}
//the tdlut is fetched during the 2 row times of prefetch.
if (p->setup_for_tdlut) {
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
- *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024)
+ *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ else
+ *p->tdlut_opt_time = 0;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
- dml2_printf("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
-
- dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
- dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
- dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
- dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
- dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
- dml2_printf("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
- dml2_printf("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
- dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
- dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
+ *p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
+
+ DML_LOG_VERBOSE("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
+ DML_LOG_VERBOSE("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
+ DML_LOG_VERBOSE("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
+ DML_LOG_VERBOSE("DML::%s: tdlut_delivery_cycles = %u\n", __func__, p->tdlut_addressing_mode == dml2_tdlut_sw_linear ? (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width : (unsigned int)math_ceil2(tdlut_width/2.0, 1));
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
+ DML_LOG_VERBOSE("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
#endif
}
@@ -4738,10 +4776,10 @@ static void CalculateTarb(
*Tarb = extra_bytes / ReturnBW;
*Tarb_prefetch = extra_bytes_prefetch / ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
- dml2_printf("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
- dml2_printf("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
- dml2_printf("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
#endif
}
@@ -4756,10 +4794,10 @@ static double CalculateTWait(
TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, Ttrip);
- dml2_printf("DML::%s: TWait = %f\n", __func__, TWait);
+ DML_LOG_VERBOSE("DML::%s: reserved_vblank_time_ns = %ld\n", __func__, reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, Ttrip);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, TWait);
#endif
return TWait;
}
@@ -4805,20 +4843,20 @@ static void CalculateVUpdateAndDynamicMetadataParameters(
*Tdmsks = *Tdmsks / 2;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
- dml2_printf("DML::%s: VBlank = %u\n", __func__, VBlank);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, Dppclk);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
- dml2_printf("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
- dml2_printf("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
+ DML_LOG_VERBOSE("DML::%s: VBlank = %u\n", __func__, VBlank);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
+ DML_LOG_VERBOSE("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
- dml2_printf("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
- dml2_printf("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
- dml2_printf("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
+ DML_LOG_VERBOSE("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
- dml2_printf("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
#endif
}
@@ -4841,6 +4879,7 @@ static double get_urgent_bandwidth_required(
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
+ double PrefetchBandwidthMax[],
double excess_vactive_fill_bw_l[],
double excess_vactive_fill_bw_c[],
double cursor_bw[],
@@ -4879,11 +4918,11 @@ static double get_urgent_bandwidth_required(
l->adj_factor_cur_pre = UrgentBurstFactorCursorPre[k];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]);
- bool exclude_this_plane = 0;
+ bool exclude_this_plane = false;
// Exclude phantom pipe in bw calculation for non svp prefetch state
if (state_type != dml2_core_internal_soc_state_svp_prefetch && is_phantom)
- exclude_this_plane = 1;
+ exclude_this_plane = true;
// The qualified row bandwidth, qual_row_bw, accounts for the regular non-flip row bandwidth when there is no possible immediate flip or HostVM invalidation flip.
// The qual_row_bw is zero if HostVM is possible and only non-zero and equal to row_bw(i) if immediate flip is not allowed for that pipe.
@@ -4904,19 +4943,20 @@ static double get_urgent_bandwidth_required(
l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
- surface_required_bw[k] = math_max4(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw);
+ surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
- dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
- dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
- dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
- dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
- dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
#endif
} else {
surface_required_bw[k] = 0.0;
@@ -4925,34 +4965,34 @@ static double get_urgent_bandwidth_required(
l->required_bandwidth_mbps += surface_required_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
- dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
- dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
- dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
- dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
- dml2_printf("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
- dml2_printf("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
- dml2_printf("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
- dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
- dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
- dml2_printf("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
#endif
}
@@ -4976,7 +5016,7 @@ static void CalculateExtraLatency(
double HostVMInefficiencyFactorPrefetch,
unsigned int HostVMMinPageSize,
enum dml2_qos_param_type qos_type,
- bool max_oustanding_when_urgent_expected,
+ bool max_outstanding_when_urgent_expected,
unsigned int max_outstanding_requests,
unsigned int request_size_bytes_luma[],
unsigned int request_size_bytes_chroma[],
@@ -5024,7 +5064,7 @@ static void CalculateExtraLatency(
if (qos_type == dml2_qos_param_type_dcn4x) {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
*ExtraLatency = *ExtraLatency_sr;
- if (max_oustanding_when_urgent_expected)
+ if (max_outstanding_when_urgent_expected)
*ExtraLatency = *ExtraLatency + (ROBBufferSizeInKByte * 1024 - max_outstanding_requests * max_request_size_bytes) / ReturnBW;
} else {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK + RoundTripPingLatencyCycles / FabricClock + ReorderingBytes / ReturnBW;
@@ -5036,19 +5076,19 @@ static void CalculateExtraLatency(
*ExtraLatency_sr = *ExtraLatency_sr + Tarb;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
- dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
- dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
- dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
- dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
- dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
- dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
- dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
- dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
- dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
- dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
- dml2_printf("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: qos_type=%u\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ DML_LOG_VERBOSE("DML::%s: Tex_trips=%f\n", __func__, Tex_trips);
+ DML_LOG_VERBOSE("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
+ DML_LOG_VERBOSE("DML::%s: FabricClock=%f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ DML_LOG_VERBOSE("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
+ DML_LOG_VERBOSE("DML::%s: Tarb=%f\n", __func__, Tarb);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
#endif
}
@@ -5103,6 +5143,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
+ *p->RequiredPrefetchBWMax = 0.0;
dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
@@ -5114,20 +5155,20 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->HostVMDynamicLevelsTrips = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
- dml2_printf("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
- dml2_printf("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
- dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
+ DML_LOG_VERBOSE("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
+ DML_LOG_VERBOSE("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: VStartup = %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
#endif
CalculateVUpdateAndDynamicMetadataParameters(
p->MaxInterDCNTileRepeaters,
@@ -5173,11 +5214,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->DynamicMetadataEnable == true) {
if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
*p->NotEnoughTimeForDynamicMetadata = true;
- dml2_printf("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
} else {
*p->NotEnoughTimeForDynamicMetadata = false;
}
@@ -5203,21 +5244,21 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
((p->myPipe->ODMMode == dml2_odm_mode_mso_1to4) ? (double)p->myPipe->HActive * 3.0 / 4.0 : 0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
- dml2_printf("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
- dml2_printf("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
- dml2_printf("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
- dml2_printf("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
- dml2_printf("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
+ DML_LOG_VERBOSE("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
+ DML_LOG_VERBOSE("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
+ DML_LOG_VERBOSE("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
#endif
if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
@@ -5229,17 +5270,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->DSTYAfterScaler = (unsigned int)(math_floor2(s->DSTTotalPixelsAfterScaler / p->myPipe->HTotal, 1));
*p->DSTXAfterScaler = (unsigned int)(s->DSTTotalPixelsAfterScaler - ((double)(*p->DSTYAfterScaler * p->myPipe->HTotal)));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
#endif
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
#endif
if (p->display_cfg->gpuvm_enable) {
s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
@@ -5316,6 +5357,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw_oto += (p->swath_width_chroma_ub * p->myPipe->BytePerPixelC) / s->LineTime;
}
+ /* oto prefetch bw should be always be less than total vactive bw */
+ //DML_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
+
s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_bw_oto = math_min2(s->prefetch_bw_oto, *p->prefetch_sw_bytes/(s->min_Lsw_oto*s->LineTime));
@@ -5326,10 +5370,16 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
+ /* oto bw needs to be outputted even if the oto schedule isn't being used to avoid ms/mp mismatch.
+ * mp will fail if ms decides to use equ schedule and mp decides to use oto schedule
+ * and the required bandwidth increases when going from ms to mp
+ */
+ *p->RequiredPrefetchBWMax = s->prefetch_bw_oto;
+
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
- dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
- dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
#endif
if (p->display_cfg->gpuvm_enable == true) {
@@ -5339,9 +5389,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
s->Tvm_oto = s->Tvm_trips_rounded;
@@ -5353,9 +5403,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
} else
s->Tr0_oto = s->LineTime / 4.0;
@@ -5365,11 +5415,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
#ifdef DML_GLOBAL_PREFETCH_CHECK
- dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
+ DML_LOG_VERBOSE("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
if (p->impacted_dst_y_pre > 0) {
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
}
#endif
*p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime;
@@ -5398,72 +5448,71 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
- dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
- dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
- dml2_printf("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
- dml2_printf("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
- dml2_printf("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
- dml2_printf("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
- dml2_printf("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
- dml2_printf("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
- dml2_printf("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
-#endif
- double Tpre = s->dst_y_prefetch_equ * s->LineTime;
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
+ DML_LOG_VERBOSE("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
+ DML_LOG_VERBOSE("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
+#endif
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
*p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
- dml2_printf("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
- dml2_printf("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
- dml2_printf("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- dml2_printf("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
- dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime: %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: VStartup: %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
+ DML_LOG_VERBOSE("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
+ DML_LOG_VERBOSE("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, (s->dst_y_prefetch_equ * s->LineTime), *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
#endif
*p->dst_y_per_vm_vblank = 0;
@@ -5502,19 +5551,19 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw1 = 0;
- dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
(*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
- dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
- dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
- dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
+ DML_LOG_VERBOSE("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
+ DML_LOG_VERBOSE("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
@@ -5526,10 +5575,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw2 = 0;
- dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
- dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
}
// prefetch_bw3: 2*R0 + SW
@@ -5540,10 +5589,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw3 = 0;
- dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
- dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
// prefetch_bw4: SW
@@ -5553,17 +5602,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw4 = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
- dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
- dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
- dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
- dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
- dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
- dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
- dml2_printf("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
- dml2_printf("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, s->dst_y_prefetch_equ * s->LineTime, *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
+ DML_LOG_VERBOSE("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
#endif
{
bool Case1OK = false;
@@ -5582,14 +5631,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
- dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
- dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
if (s->prefetch_bw1 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1;
double row_transfer_time = total_row_bytes / s->prefetch_bw1;
- dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case1OK = true;
}
@@ -5602,8 +5651,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw2 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2;
double row_transfer_time = total_row_bytes / s->prefetch_bw2;
- dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) {
Case2OK = true;
}
@@ -5615,8 +5664,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw3 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3;
double row_transfer_time = total_row_bytes / s->prefetch_bw3;
- dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case3OK = true;
}
@@ -5636,10 +5685,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
- dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
- dml2_printf("DML::%s: Case3OK: %u\n", __func__, Case3OK);
- dml2_printf("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: Case1OK: %u\n", __func__, Case1OK);
+ DML_LOG_VERBOSE("DML::%s: Case2OK: %u\n", __func__, Case2OK);
+ DML_LOG_VERBOSE("DML::%s: Case3OK: %u\n", __func__, Case3OK);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
#endif
if (s->prefetch_bw_equ > 0) {
@@ -5659,12 +5708,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
s->Tvm_equ = 0;
s->Tr0_equ = 0;
- dml2_printf("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
- dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
+ DML_LOG_VERBOSE("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
+ DML_LOG_VERBOSE("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
@@ -5675,7 +5724,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
} else {
@@ -5687,11 +5736,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+
+ /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming.
+ * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data
+ * bandwidth may end up higher than what was calculated in mode support.
+ */
+ *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
@@ -5700,32 +5755,34 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
*p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
+ *p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
- dml2_printf("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
- dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
+ DML_LOG_VERBOSE("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
#endif
- dml2_assert(*p->dst_y_prefetch < 64);
+ DML_ASSERT(*p->dst_y_prefetch < 64);
unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime);
if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) {
*p->VRatioPrefetchY = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData;
*p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
- dml2_printf("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
#endif
if ((p->SwathHeightY > 4) && (p->VInitPreFillY > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillY - 3.0) / 2.0) {
@@ -5733,13 +5790,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
*p->VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
#endif
}
@@ -5747,22 +5804,22 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
- dml2_printf("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
#endif
if ((p->SwathHeightC > 4) && (p->VInitPreFillC > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillC - 3.0) / 2.0) {
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
*p->VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
#endif
}
@@ -5770,36 +5827,34 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->RequiredPrefetchPixelDataBWChroma = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelC * p->swath_width_chroma_ub / s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
#endif
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
*p->VRatioPrefetchY = 0;
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
}
- dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
- dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
- dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
- dml2_printf("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
- dml2_printf("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
- dml2_printf("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
- dml2_printf("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
- dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
+ DML_LOG_VERBOSE("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
+ DML_LOG_VERBOSE("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
+ DML_LOG_VERBOSE("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
+ DML_LOG_VERBOSE("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
} else {
- dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
__func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n",
- __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5820,18 +5875,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_vm_bw = 0;
} else if (*p->dst_y_per_vm_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
}
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
@@ -5840,14 +5895,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
#endif
} else {
prefetch_row_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
}
*p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
@@ -5867,12 +5922,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_vmrow_bw = 0;
}
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
- dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
return s->NoTimeToPrefetch;
}
@@ -5909,7 +5964,7 @@ static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned
}
}
if (max_idx <= 0) {
- dml2_assert(max_idx >= 0);
+ DML_ASSERT(max_idx >= 0);
max_idx = this_plane_idx;
}
@@ -5928,7 +5983,7 @@ static double calculate_impacted_Tsw(unsigned int exclude_plane_idx, unsigned in
}
// a global check against the aggregate effect of the per plane prefetch schedule
-static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *scratch,
+static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *scratch,
struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *p)
{
struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals *s = &scratch->CheckGlobalPrefetchAdmissibility_locals;
@@ -5941,12 +5996,12 @@ static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *
// worst case if the rob and cdb is fully hogged
s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
- dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
- dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
- dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
- dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
+ DML_LOG_VERBOSE("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
#endif
// calculate the return impact from each plane, request is 256B per dcfclk
@@ -5967,12 +6022,12 @@ static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *
s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
- dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
- dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
#endif
if (s->src_swath_bytes_c[i] > 0) { // dual_plane
@@ -5983,10 +6038,10 @@ static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
- dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
#endif
}
@@ -5994,9 +6049,9 @@ static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *
s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
- dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
- dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
+ DML_LOG_VERBOSE("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
#endif
// clamping to worst case delay which is one which occupy the full rob+cdb
if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles)
@@ -6013,7 +6068,7 @@ static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *
p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
+ DML_LOG_VERBOSE("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
#endif
}
@@ -6024,8 +6079,8 @@ static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *
*p->recalc_prefetch_schedule = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
- dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
#endif
}
} else {
@@ -6035,8 +6090,8 @@ static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
- dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
+ DML_LOG_VERBOSE("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
+ DML_LOG_VERBOSE("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
#endif
return s->prefetch_global_check_passed;
@@ -6054,8 +6109,8 @@ static void calculate_peak_bandwidth_required(
memset(l, 0, sizeof(struct dml2_core_shared_calculate_peak_bandwidth_required_locals));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
#endif
for (unsigned int k = 0; k < p->num_active_planes; ++k) {
@@ -6084,6 +6139,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
+ l->zero_array, //PrefetchBWMax
l->zero_array,
l->zero_array,
l->zero_array,
@@ -6120,6 +6176,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
+ l->zero_array, //PrefetchBWMax
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6156,6 +6213,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6192,6 +6250,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6228,6 +6287,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6246,12 +6306,12 @@ static void calculate_peak_bandwidth_required(
p->surface_peak_required_bw[m][n]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
#endif
- dml2_assert(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
+ DML_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
}
}
}
@@ -6313,18 +6373,18 @@ static void check_urgent_bandwidth_support(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
- dml2_printf("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
+ DML_LOG_VERBOSE("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required[m][n]) ? "<" : ">=", urg_bandwidth_required[m][n]);
}
@@ -6345,14 +6405,14 @@ static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal
flip_bw_available_mbps = flip_bw_available_sdp_mbps < flip_bw_available_dram_mbps ? flip_bw_available_sdp_mbps : flip_bw_available_dram_mbps;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
- dml2_printf("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
- dml2_printf("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
#endif
return flip_bw_available_mbps;
@@ -6377,28 +6437,28 @@ static void calculate_immediate_flip_bandwidth_support(
*flip_bandwidth_support_ok &= urg_bandwidth_available[eval_state][n] >= urg_bandwidth_required_flip[eval_state][n];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
- dml2_printf("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
#endif
- dml2_assert(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
}
*frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram;
*flip_bandwidth_support_ok &= (*frac_urg_bandwidth_flip <= 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
- dml2_printf("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required_flip[m][n]) ? "<" : ">=", urg_bandwidth_required_flip[m][n]);
}
@@ -6448,27 +6508,27 @@ static void CalculateFlipSchedule(
l->dpte_row_bytes = DPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
- dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
- dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
- dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
- dml2_printf("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
- dml2_printf("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
- dml2_printf("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
- dml2_printf("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
- dml2_printf("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
- dml2_printf("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
- dml2_printf("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
#endif
if (TotImmediateFlipBytes > 0 && (GPUVMEnable || dcc_mrq_enable)) {
@@ -6495,9 +6555,9 @@ static void CalculateFlipSchedule(
l->min_row_time = l->min_row_height * LineTime / VRatio;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
#endif
- dml2_assert(l->min_row_time > 0);
+ DML_ASSERT(l->min_row_time > 0);
if (use_lb_flip_bw) {
// For mode check, calculation the flip bw requirement with worst case flip time
@@ -6518,20 +6578,20 @@ static void CalculateFlipSchedule(
l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded),
l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
- dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
- dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
- dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
- dml2_printf("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (%f row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
if (l->lb_flip_bw > 0) {
- dml2_printf("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
- dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
- dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
- dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
}
#endif
l->lb_flip_bw = math_max3(l->lb_flip_bw,
@@ -6539,8 +6599,8 @@ static void CalculateFlipSchedule(
(l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
- dml2_printf("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#endif
}
@@ -6552,13 +6612,12 @@ static void CalculateFlipSchedule(
} else {
if (iflip_enable) {
l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
- double portion = (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
- dml2_printf("DML::%s: portion of flip bw = %f\n", __func__, portion);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
+ DML_LOG_VERBOSE("DML::%s: portion of flip bw = %f\n", __func__, (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes);
#endif
if (l->ImmediateFlipBW == 0) {
l->Tvm_flip = 0;
@@ -6573,11 +6632,11 @@ static void CalculateFlipSchedule(
LineTime / 4.0);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
- dml2_printf("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
#endif
*dst_y_per_vm_flip = math_ceil2(4.0 * (l->Tvm_flip / LineTime), 1.0) / 4.0;
*dst_y_per_row_flip = math_ceil2(4.0 * (l->Tr0_flip / LineTime), 1.0) / 4.0;
@@ -6610,14 +6669,14 @@ static void CalculateFlipSchedule(
#ifdef __DML_VBA_DEBUG__
if (!use_lb_flip_bw) {
- dml2_printf("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
- dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
- dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
- dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
}
- dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
- dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
+ DML_LOG_VERBOSE("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
#endif
}
@@ -6635,7 +6694,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->UrgentWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
#endif
p->Watermark->USRRetrainingWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency + p->mmSOCParameters.USRRetrainingLatency + p->mmSOCParameters.SMNLatency;
@@ -6654,20 +6713,20 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
- dml2_printf("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
- dml2_printf("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
- dml2_printf("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
- dml2_printf("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
- dml2_printf("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
- dml2_printf("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
- dml2_printf("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
- dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
#endif
s->TotalActiveWriteback = 0;
@@ -6700,11 +6759,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->WritebackFCLKChangeWatermark = p->Watermark->WritebackFCLKChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
- dml2_printf("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
- dml2_printf("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
- dml2_printf("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
+ DML_LOG_VERBOSE("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
#endif
s->TotalPixelBW = 0.0;
@@ -6735,11 +6794,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
- dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
- dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
- dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
- dml2_printf("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBBitPerPixel = %f\n", __func__, k, LBBitPerPixel);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
#endif
s->EffectiveLBLatencyHidingY = s->LBLatencyHidingSourceLinesY[k] / v_ratio * (h_total / pixel_clock_mhz);
@@ -6842,16 +6901,16 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->sub_vp_lines_l = s->src_y_pstate_l + s->src_y_ahead_l + p->meta_row_height_l[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
- dml2_printf("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
- dml2_printf("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
- dml2_printf("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
- dml2_printf("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
#endif
p->SubViewportLinesNeededInMALL[k] = s->sub_vp_lines_l;
@@ -6866,10 +6925,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
- dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
- dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
#endif
}
}
@@ -6891,10 +6950,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
- dml2_printf("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
- dml2_printf("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
- dml2_printf("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
+ DML_LOG_VERBOSE("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
#endif
}
@@ -6913,7 +6972,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency(
stream_index = p->display_cfg->plane_descriptors[plane_index].stream_index;
- dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us /
+ dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us[0] /
((double)p->display_cfg->stream_descriptors[stream_index].timing.h_total /
(double)p->display_cfg->stream_descriptors[stream_index].timing.pixel_clock_khz * 1000.0));
@@ -6941,7 +7000,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency(
}
}
-static void calculate_vactive_det_fill_latency(
+static noinline_for_stack void calculate_vactive_det_fill_latency(
const struct dml2_display_cfg *display_cfg,
unsigned int num_active_planes,
unsigned int bytes_required_l[],
@@ -7010,9 +7069,9 @@ static void calculate_excess_vactive_bandwidth_required(
excess_vactive_fill_bw_l[plane_index] = 0.0;
excess_vactive_fill_bw_c[plane_index] = 0.0;
- if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us > 0) {
- excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us;
- excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us;
+ if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] > 0) {
+ excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk];
+ excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk];
}
}
}
@@ -7040,7 +7099,7 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -7052,31 +7111,30 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
break;
}
}
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
-#endif
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
return index;
}
static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
- dml2_assert(clk_entry_found);
+ if (!clk_entry_found)
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
@@ -7116,10 +7174,10 @@ static void calculate_hostvm_inefficiency_factor(
if ((*HostVMInefficiencyFactorPrefetch < 4) && (remote_iommu_outstanding_translations < max_outstanding_reqs))
*HostVMInefficiencyFactorPrefetch = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
- dml2_printf("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
- dml2_printf("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
#endif
}
}
@@ -7233,30 +7291,660 @@ static void calculate_pstate_keepout_dst_lines(
}
}
+static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_internal_display_mode_lib *mode_lib,
+ const struct dml2_display_cfg *display_cfg)
+{
+ struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
+ struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
+ struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
+#endif
+ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
+
+ double min_return_bw_for_latency;
+ unsigned int k;
+
+ mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
+
+ calculate_hostvm_inefficiency_factor(
+ &s->HostVMInefficiencyFactor,
+ &s->HostVMInefficiencyFactorPrefetch,
+
+ display_cfg->gpuvm_enable,
+ display_cfg->hostvm_enable,
+ mode_lib->ip.remote_iommu_outstanding_translations,
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
+ mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
+
+ mode_lib->ms.Total3dlutActive = 0;
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
+ mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
+
+ // Calculate tdlut schedule related terms
+ calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
+ calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
+ calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
+ calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
+ calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
+ calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
+ calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
+ calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
+
+ // output
+ calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
+ calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
+ calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
+
+ calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
+ }
+
+ min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
+ CalculateExtraLatency(
+ display_cfg,
+ mode_lib->ip.rob_buffer_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
+ mode_lib->ms.DCFCLK,
+ mode_lib->ms.FabricClock,
+ mode_lib->ip.pixel_chunk_size_kbytes,
+ min_return_bw_for_latency,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.dpte_group_bytes,
+ s->tdlut_bytes_per_group,
+ s->HostVMInefficiencyFactor,
+ s->HostVMInefficiencyFactorPrefetch,
+ mode_lib->soc.hostvm_min_page_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_type,
+ !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.request_size_bytes_luma,
+ mode_lib->ms.support.request_size_bytes_chroma,
+ mode_lib->ip.meta_chunk_size_kbytes,
+ mode_lib->ip.dchub_arb_to_ret_delay,
+ mode_lib->ms.TripToMemory,
+ mode_lib->ip.hostvm_mode,
+
+ // output
+ &mode_lib->ms.ExtraLatency,
+ &mode_lib->ms.ExtraLatency_sr,
+ &mode_lib->ms.ExtraLatencyPrefetch);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ s->impacted_dst_y_pre[k] = 0;
+
+ s->recalc_prefetch_schedule = 0;
+ s->recalc_prefetch_done = 0;
+ do {
+ mode_lib->ms.support.PrefetchSupported = true;
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
+
+ mode_lib->ms.TWait[k] = CalculateTWait(
+ display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx) : 0.0);
+
+ myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
+ myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
+ myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
+ myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
+ myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
+ myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
+ myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
+ myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
+ myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
+ myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
+ myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
+ myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
+ myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
+ myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
+ myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
+ myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
+ myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
+ myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ myPipe->ODMMode = mode_lib->ms.ODMMode[k];
+ myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
+ myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
+ myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
+ myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
+#endif
+ CalculatePrefetchSchedule_params->display_cfg = display_cfg;
+ CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
+ CalculatePrefetchSchedule_params->myPipe = myPipe;
+ CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
+ CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
+ CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
+ CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
+ CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
+ CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
+ CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
+ CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
+ CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
+ CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
+ CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
+ CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
+ CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
+ CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
+ CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
+ CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
+ CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
+ CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
+ CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
+ CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
+ CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
+ CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
+ CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
+ CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
+ CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
+ CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
+ CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
+ CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
+ CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
+ CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
+
+ // output
+ CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
+ CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
+ CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
+ CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
+ CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k];
+ CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
+ CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
+ CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
+ CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
+ CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
+ CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
+ CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
+ CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
+ CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
+ CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
+ CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
+ CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
+
+ mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
+
+ mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
+ } // for k num_planes
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.BytePerPixelY,
+ mode_lib->ms.BytePerPixelC,
+ mode_lib->ms.SwathWidthY,
+ mode_lib->ms.SwathWidthC,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.PSCL_FACTOR,
+ mode_lib->ms.PSCL_FACTOR_CHROMA,
+ mode_lib->ms.RequiredDPPCLK,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
+ mode_lib->soc.return_bus_width_bytes,
+ mode_lib->ms.RequiredDISPCLK,
+ s->tdlut_bytes_to_deliver,
+ s->prefetch_swath_time_us,
+
+ /* Output */
+ &mode_lib->ms.dcfclk_deepsleep);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.dst_y_prefetch[k] < 2.0
+ || mode_lib->ms.LinesForVM[k] >= 32.0
+ || mode_lib->ms.LinesForDPTERow[k] >= 16.0
+ || mode_lib->ms.NoTimeForPrefetch[k] == true
+ || s->DSTYAfterScaler[k] > 8) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
+ }
+ }
+
+ mode_lib->ms.support.DynamicMetadataSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
+ mode_lib->ms.support.DynamicMetadataSupported = false;
+ }
+ }
+
+ mode_lib->ms.support.VRatioInPrefetchSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
+ mode_lib->ms.support.VRatioInPrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
+ }
+ }
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
+
+ // By default, do not recalc prefetch schedule
+ s->recalc_prefetch_schedule = 0;
+
+ // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
+ if (mode_lib->ms.support.PrefetchSupported) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ // Calculate Urgent burst factor for prefetch
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
+#endif
+ CalculateUrgentBurstFactor(
+ &display_cfg->plane_descriptors[k],
+ mode_lib->ms.swath_width_luma_ub[k],
+ mode_lib->ms.swath_width_chroma_ub[k],
+ mode_lib->ms.SwathHeightY[k],
+ mode_lib->ms.SwathHeightC[k],
+ s->line_times[k],
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.VRatioPreY[k],
+ mode_lib->ms.VRatioPreC[k],
+ mode_lib->ms.BytePerPixelInDETY[k],
+ mode_lib->ms.BytePerPixelInDETC[k],
+ mode_lib->ms.DETBufferSizeY[k],
+ mode_lib->ms.DETBufferSizeC[k],
+ /* Output */
+ &mode_lib->ms.UrgentBurstFactorLumaPre[k],
+ &mode_lib->ms.UrgentBurstFactorChromaPre[k],
+ &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+
+ // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
+ // assume flip bw is 0 at this point
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ mode_lib->ms.final_flip_bw[k] = 0;
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 0;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ // Check urg peak bandwidth against available urg bw
+ // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
+ check_urgent_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth
+ &s->dummy_single[1], // double* frac_urg_bandwidth_mall
+ &mode_lib->ms.support.UrgVactiveBandwidthSupport,
+ &mode_lib->ms.support.PrefetchBandwidthSupported,
+
+ mode_lib->soc.mall_allocated_for_dcn_mbytes,
+ mode_lib->ms.support.non_urg_bandwidth_required,
+ mode_lib->ms.support.urg_vactive_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
+ DML_LOG_VERBOSE("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+ }
+
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
+ if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
+
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
+ ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
+ s->recalc_prefetch_done = 1;
+ s->recalc_prefetch_schedule = 1;
+ }
+#endif
+ } // prefetch schedule ok, do urg bw and flip schedule
+ } while (s->recalc_prefetch_schedule);
+
+ // Flip Schedule
+ // Both prefetch schedule and BW okay
+ if (mode_lib->ms.support.PrefetchSupported == true) {
+ mode_lib->ms.BandwidthAvailableForImmediateFlip =
+ get_bandwidth_available_for_immediate_flip(
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip) {
+ s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
+ s->HostVMInefficiencyFactor,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.meta_row_bytes[k]);
+ } else {
+ s->per_pipe_flip_bytes[k] = 0;
+ }
+ mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
+
+ }
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ CalculateFlipSchedule(
+ &mode_lib->scratch,
+ display_cfg->plane_descriptors[k].immediate_flip,
+ 1, // use_lb_flip_bw
+ s->HostVMInefficiencyFactor,
+ s->Tvm_trips_flip[k],
+ s->Tr0_trips_flip[k],
+ s->Tvm_trips_flip_rounded[k],
+ s->Tr0_trips_flip_rounded[k],
+ display_cfg->gpuvm_enable,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.BandwidthAvailableForImmediateFlip,
+ mode_lib->ms.TotImmediateFlipBytes,
+ display_cfg->plane_descriptors[k].pixel_format,
+ (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
+ mode_lib->ms.Tno_bw_flip[k],
+ mode_lib->ms.dpte_row_height[k],
+ mode_lib->ms.dpte_row_height_chroma[k],
+ mode_lib->ms.use_one_row_for_frame_flip[k],
+ mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
+ s->per_pipe_flip_bytes[k],
+ mode_lib->ms.meta_row_bytes[k],
+ s->meta_row_height_luma[k],
+ s->meta_row_height_chroma[k],
+ mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
+
+ /* Output */
+ &mode_lib->ms.dst_y_per_vm_flip[k],
+ &mode_lib->ms.dst_y_per_row_flip[k],
+ &mode_lib->ms.final_flip_bw[k],
+ &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
+ }
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 1;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ calculate_immediate_flip_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth_flip
+ &mode_lib->ms.support.ImmediateFlipSupport,
+
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_flip,
+ mode_lib->ms.support.non_urg_bandwidth_required_flip,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ } else { // if prefetch not support, assume iflip is not supported too
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
+ s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
+ s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
+ s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
+ s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
+ s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.USRRetrainingLatency = 0;
+ s->mSOCParameters.SMNLatency = 0;
+ s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx);
+ s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, mode_lib->ms.state_idx);
+ s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
+
+ CalculateWatermarks_params->display_cfg = display_cfg;
+ CalculateWatermarks_params->USRRetrainingRequired = false;
+ CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
+ CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
+ CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
+ CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
+ CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
+ CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
+ CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
+ CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
+ CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
+ CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
+ CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
+ CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
+ CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
+ CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
+ CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
+ CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
+ CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
+ CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
+ CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
+ CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
+ CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
+ CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
+ CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
+ CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
+ CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
+ CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
+
+ // Output
+ CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
+ CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
+ CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
+ CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
+ CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
+ CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
+ CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
+ CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
+ CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
+ CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
+ CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
+ CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
+
+ CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
+
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
+ DML_LOG_VERBOSE("DML::%s: Done prefetch calculation\n", __func__);
+
+}
+
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
-#if defined(__DML_VBA_DEBUG__)
- double old_ReadBandwidthLuma;
- double old_ReadBandwidthChroma;
-#endif
double outstanding_latency_us = 0;
- double min_return_bw_for_latency;
struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
-#endif
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params *calculate_bytes_to_fetch_required_to_hide_latency_params = &mode_lib->scratch.calculate_bytes_to_fetch_required_to_hide_latency_params;
unsigned int k, m, n;
@@ -7272,9 +7960,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.FabricClock = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz / 1000);
mode_lib->ms.MaxDCFCLK = (double)min_clk_table->max_clocks_khz.dcfclk / 1000;
mode_lib->ms.MaxFabricClock = (double)min_clk_table->max_clocks_khz.fclk / 1000;
- mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dispclk / 1000;
+ mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dispclk / 1000;
mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000;
- mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
+ mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dppclk / 1000;
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000);
@@ -7282,25 +7970,25 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: --- START --- \n", __func__);
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
- dml2_printf("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
- dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
- dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
+ DML_LOG_VERBOSE("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
+ DML_LOG_VERBOSE("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
+ DML_LOG_VERBOSE("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -7402,12 +8090,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
#ifdef __DML_VBA_DEBUG__
- old_ReadBandwidthLuma = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
#endif
}
@@ -7527,13 +8213,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MaximumSwathWidthLuma[k] = math_min2(s->MaximumSwathWidthSupportLuma, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
mode_lib->ms.MaximumSwathWidthChroma[k] = math_min2(s->MaximumSwathWidthSupportChroma, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
}
/* Cursor Support Check */
@@ -7570,11 +8256,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.AlignedCPitch[k] > display_cfg->plane_descriptors[k].surface.plane1.pitch) {
mode_lib->ms.support.PitchSupport = false;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
- dml2_printf("DML::%s: k=%u PitchY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
- dml2_printf("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
- dml2_printf("DML::%s: k=%u PitchC = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
- dml2_printf("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchC = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
#endif
}
@@ -7606,11 +8292,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
mode_lib->ms.support.ViewportExceedsSurface = true;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u SurfaceWidthY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
- dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceWidthY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceHeightY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
#endif
}
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
@@ -7680,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
mode_lib->ms.support.TotalAvailablePipesSupport = true;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
@@ -7715,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
false, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -7734,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
true, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -7792,8 +8483,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPDSC;
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
#endif
// ensure the number dsc slices is integer multiple based on ODM mode
@@ -7809,9 +8500,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DSCSlicesODMModeSupported = ((mode_lib->ms.support.NumberOfDSCSlices[k] % 4) == 0);
#if defined(__DML_VBA_DEBUG__)
if (!mode_lib->ms.support.DSCSlicesODMModeSupported) {
- dml2_printf("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
- dml2_printf("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
}
#endif
} else {
@@ -7838,40 +8529,50 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
+ mode_lib->ms.NoOfOPP[k] = 1;
if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 4;
+ mode_lib->ms.NoOfOPP[k] = 4;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 3;
+ mode_lib->ms.NoOfOPP[k] = 3;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 2;
+ mode_lib->ms.NoOfOPP[k] = 2;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
if (!mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- dml2_printf("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
+ DML_LOG_VERBOSE("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
}
} else {
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
#endif
}
+ mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k];
+ mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k];
+ }
if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp)
mode_lib->ms.support.TotalAvailablePipesSupport = false;
+ if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp)
+ mode_lib->ms.support.TotalAvailablePipesSupport = false;
mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0;
@@ -8036,7 +8737,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout);
- if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_clocks_khz.dtbclk / 1000)) {
+ if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_ss_clocks_khz.dtbclk / 1000)) {
mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true;
}
} else {
@@ -8065,7 +8766,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->DSCFormatFactor = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
#endif
if (mode_lib->ms.RequiresDSC[k] == true) {
s->PixelClockBackEndFactor = 3.0;
@@ -8083,10 +8784,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
#endif
}
}
@@ -8321,13 +9022,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
- dml2_printf("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
#endif
/* VActive bytes to fetch for UCLK P-State */
@@ -8350,11 +9051,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->ms.SwathWidthC;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->ms.SwathHeightY;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->ms.SwathHeightC;
- calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
/* outputs */
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l;
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk];
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk];
calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params);
@@ -8362,8 +9063,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_excess_vactive_bandwidth_required(
display_cfg,
mode_lib->ms.num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
/* outputs */
mode_lib->ms.excess_vactive_fill_bw_l,
mode_lib->ms.excess_vactive_fill_bw_c);
@@ -8400,7 +9101,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
calculate_cursor_req_attributes(
@@ -8429,9 +9130,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
#endif
CalculateUrgentBurstFactor(
@@ -8503,7 +9204,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
#endif
/* Immediate Flip and MALL parameters */
@@ -8552,16 +9253,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
(s->SubViewportMALLPStateMethod && s->FullFrameMALLPStateMethod) || s->SubViewportMALLRefreshGreaterThan120Hz;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
- dml2_printf("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
- dml2_printf("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
- dml2_printf("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
- dml2_printf("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
- dml2_printf("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
+ DML_LOG_VERBOSE("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
#endif
mode_lib->ms.support.OutstandingRequestsSupport = true;
@@ -8601,10 +9301,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
- dml2_printf("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
- dml2_printf("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
#endif
}
@@ -8620,8 +9320,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
#endif
}
}
@@ -8682,11 +9382,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_mcache_setting_params->num_mcaches_l = &mode_lib->ms.num_mcaches_l[k];
calculate_mcache_setting_params->mcache_row_bytes_l = &mode_lib->ms.mcache_row_bytes_l[k];
+ calculate_mcache_setting_params->mcache_row_bytes_per_channel_l = &mode_lib->ms.mcache_row_bytes_per_channel_l[k];
calculate_mcache_setting_params->mcache_offsets_l = mode_lib->ms.mcache_offsets_l[k];
calculate_mcache_setting_params->mcache_shift_granularity_l = &mode_lib->ms.mcache_shift_granularity_l[k];
calculate_mcache_setting_params->num_mcaches_c = &mode_lib->ms.num_mcaches_c[k];
calculate_mcache_setting_params->mcache_row_bytes_c = &mode_lib->ms.mcache_row_bytes_c[k];
+ calculate_mcache_setting_params->mcache_row_bytes_per_channel_c = &mode_lib->ms.mcache_row_bytes_per_channel_c[k];
calculate_mcache_setting_params->mcache_offsets_c = mode_lib->ms.mcache_offsets_c[k];
calculate_mcache_setting_params->mcache_shift_granularity_c = &mode_lib->ms.mcache_shift_granularity_c[k];
@@ -8765,7 +9467,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
if (mode_lib->ms.NotEnoughUrgentLatencyHiding[k]) {
mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = false;
- dml2_printf("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
}
}
@@ -8774,613 +9476,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.avg_bandwidth_support_ok[m][n] && (m == dml2_core_internal_soc_state_sys_active || mode_lib->soc.mall_allocated_for_dcn_mbytes > 0)) {
mode_lib->ms.support.AvgBandwidthSupport = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
#endif
}
}
}
- /* Prefetch Check */
- {
- mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- mode_lib->ms.Total3dlutActive = 0;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
-
- // Calculate tdlut schedule related terms
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
- calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- min_return_bw_for_latency,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.request_size_bytes_luma,
- mode_lib->ms.support.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->ms.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->ms.ExtraLatency,
- &mode_lib->ms.ExtraLatency_sr,
- &mode_lib->ms.ExtraLatencyPrefetch);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- s->impacted_dst_y_pre[k] = 0;
-
- s->recalc_prefetch_schedule = 0;
- s->recalc_prefetch_done = 0;
- do {
- mode_lib->ms.support.PrefetchSupported = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
-
- s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
-
- mode_lib->ms.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory,
- !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
-
- myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
- myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->ms.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
- dml2_printf("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
- CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
- CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
- CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
- CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
- CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
- CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
- CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
-
- mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
- mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
- dml2_printf("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
- } // for k num_planes
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.dst_y_prefetch[k] < 2.0
- || mode_lib->ms.LinesForVM[k] >= 32.0
- || mode_lib->ms.LinesForDPTERow[k] >= 16.0
- || mode_lib->ms.NoTimeForPrefetch[k] == true
- || s->DSTYAfterScaler[k] > 8) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
- dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
- dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
- }
- }
-
- mode_lib->ms.support.DynamicMetadataSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
- mode_lib->ms.support.DynamicMetadataSupported = false;
- }
- }
-
- mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
- mode_lib->ms.support.VRatioInPrefetchSupported = false;
- dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
- }
- }
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
-
- // By default, do not recalc prefetch schedule
- s->recalc_prefetch_schedule = 0;
-
- // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
- if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- // Calculate Urgent burst factor for prefetch
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
- dml2_printf("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
-#endif
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- s->line_times[k],
- mode_lib->ms.UrgLatency,
- mode_lib->ms.VRatioPreY[k],
- mode_lib->ms.VRatioPreC[k],
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
- /* Output */
- &mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChromaPre[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
-
- // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
- // assume flip bw is 0 at this point
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 0;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth
- &s->dummy_single[1], // double* frac_urg_bandwidth_mall
- &mode_lib->ms.support.UrgVactiveBandwidthSupport,
- &mode_lib->ms.support.PrefetchBandwidthSupported,
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->ms.support.non_urg_bandwidth_required,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
- dml2_printf("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
- }
-
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
- CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
- CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
- CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
- CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
- CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
- CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
- CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
- CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
- if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
-
- CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
- ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
-
- // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
- CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
- CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
- mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
- s->recalc_prefetch_done = 1;
- s->recalc_prefetch_schedule = 1;
- }
-#endif
- } // prefetch schedule ok, do urg bw and flip schedule
- } while (s->recalc_prefetch_schedule);
-
- // Flip Schedule
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
-
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- mode_lib->ip.max_flip_time_lines,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 1;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- } else { // if prefetch not support, assume iflip is not supported too
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
- s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
- s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
- s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.USRRetrainingLatency = 0;
- s->mSOCParameters.SMNLatency = 0;
- s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
- s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
- s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
- s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false;
- CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
- CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
- CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
- CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
-
- calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
- }
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
- // End of Prefetch Check
+ dml_core_ms_prefetch_check(mode_lib, display_cfg);
mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us;
@@ -9404,8 +9506,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_vactive_det_fill_latency(
display_cfg,
mode_lib->ms.num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
mode_lib->ms.vactive_sw_bw_l,
@@ -9413,11 +9515,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.surface_avg_vactive_required_bw,
mode_lib->ms.surface_peak_required_bw,
/* outputs */
- mode_lib->ms.dram_change_vactive_det_fill_delay_us);
+ mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
- dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
+ DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
#endif
/*Mode Support, Voltage State and SOC Configuration*/
@@ -9467,17 +9569,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&& !mode_lib->ms.support.ExceededMALLSize
&& mode_lib->ms.support.g6_temp_read_support
&& ((!display_cfg->hostvm_enable && !s->ImmediateFlipRequired) || mode_lib->ms.support.ImmediateFlipSupport)) {
- dml2_printf("DML::%s: mode is supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is supported\n", __func__);
mode_lib->ms.support.ModeSupport = true;
} else {
- dml2_printf("DML::%s: mode is NOT supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is NOT supported\n", __func__);
mode_lib->ms.support.ModeSupport = false;
}
}
// Since now the mode_support work on 1 particular power state, so there is only 1 state idx (index 0).
- dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
- dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
@@ -9493,8 +9595,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutputRate[k] = mode_lib->ms.OutputRate[k];
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
- dml2_printf("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
#endif
}
@@ -9502,7 +9604,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.ModeSupport)
dml2_print_mode_support_info(&mode_lib->ms.support, true);
- dml2_printf("DML::%s: --- DONE --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- DONE --- \n", __func__);
#endif
return mode_lib->ms.support.ModeSupport;
@@ -9512,18 +9614,18 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
{
unsigned int result;
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
result = dml_core_mode_support(in_out_params);
if (result)
*in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
- dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -9557,19 +9659,19 @@ static void CalculatePixelDeliveryTimes(
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
- dml2_printf("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
- dml2_printf("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
- dml2_printf("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
- dml2_printf("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
+ DML_LOG_VERBOSE("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
#endif
if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
@@ -9603,10 +9705,10 @@ static void CalculatePixelDeliveryTimes(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
#endif
}
@@ -9622,12 +9724,12 @@ static void CalculatePixelDeliveryTimes(
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub_c[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
#endif
}
}
@@ -9723,14 +9825,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
#endif
}
@@ -9751,7 +9853,7 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
else
p->time_per_tdlut_group[k] = 0;
- dml2_printf("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
if (p->display_cfg->gpuvm_enable == true) {
if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -9767,14 +9869,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_luma_ub <= 2) {
dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
p->time_per_pte_group_nom_luma[k] = p->DST_Y_PER_PTE_ROW_NOM_L[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
p->time_per_pte_group_vblank_luma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
@@ -9798,9 +9900,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_chroma_ub <= 2) {
dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
p->time_per_pte_group_nom_chroma[k] = p->DST_Y_PER_PTE_ROW_NOM_C[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
p->time_per_pte_group_vblank_chroma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
@@ -9815,17 +9917,17 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
p->time_per_pte_group_flip_chroma[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
#endif
}
} // CalculateMetaAndPTETimes
@@ -9861,18 +9963,18 @@ static void CalculateVMGroupAndRequestTimes(
double line_time;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
bool dcc_mrq_enable = display_cfg->plane_descriptors[k].surface.dcc.enable && mrq_present;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
- dml2_printf("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
#endif
if (display_cfg->gpuvm_enable) {
@@ -9941,13 +10043,13 @@ static void CalculateVMGroupAndRequestTimes(
else
TimePerVMRequestFlip[k] = 0.0;
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
- dml2_printf("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %f\n", __func__, k, num_group_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %f\n", __func__, k, num_group_per_lower_vm_stage_flip);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %f\n", __func__, k, num_req_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %f\n", __func__, k, num_req_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %d\n", __func__, k, num_group_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %d\n", __func__, k, num_group_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %d\n", __func__, k, num_req_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %d\n", __func__, k, num_req_per_lower_vm_stage_flip);
if (display_cfg->gpuvm_max_page_table_levels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
@@ -9964,10 +10066,10 @@ static void CalculateVMGroupAndRequestTimes(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
#endif
}
}
@@ -9983,7 +10085,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
unsigned int SingleVTotal = 0;
bool SameTiming = true;
bool FoundCriticalSurface = false;
- double LastZ8StutterPeriod = 0;
memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
@@ -9997,9 +10098,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0, l->MaximumEffectiveCompressionLuma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0 / l->MaximumEffectiveCompressionLuma;
@@ -10012,9 +10113,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1, l->MaximumEffectiveCompressionChroma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1 / l->MaximumEffectiveCompressionChroma;
@@ -10030,19 +10131,19 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->AverageDCCZeroSizeFraction = l->TotalZeroSizeRequestReadBandwidth / p->TotalDataReadBandwidth;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
- dml2_printf("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
- dml2_printf("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
- dml2_printf("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
- dml2_printf("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
#endif
if (l->AverageDCCZeroSizeFraction == 1) {
l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
@@ -10059,10 +10160,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
- dml2_printf("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
+ DML_LOG_VERBOSE("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
#endif
} else {
l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
@@ -10070,16 +10171,16 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * (p->rob_alloc_compressed ? l->AverageDCCCompressionRate : 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
- dml2_printf("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
- dml2_printf("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
+ DML_LOG_VERBOSE("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
+ DML_LOG_VERBOSE("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
#endif
*p->StutterPeriod = 0;
@@ -10090,15 +10191,15 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->LinesInDETYRoundedDownToSwath = math_floor2(l->LinesInDETY, p->SwathHeightY[k]);
l->DETBufferingTimeY = l->LinesInDETYRoundedDownToSwath * ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
- dml2_printf("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
- dml2_printf("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
#endif
if (!FoundCriticalSurface || l->DETBufferingTimeY < *p->StutterPeriod) {
@@ -10118,17 +10219,17 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->SinglePipeCriticalSurface = (p->DPPPerSurface[k] == 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
- dml2_printf("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
- dml2_printf("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
- dml2_printf("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
- dml2_printf("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
#endif
}
}
@@ -10146,14 +10247,14 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = math_min2(*p->StutterPeriod * p->TotalDataReadBandwidth, l->EffectiveCompressedBufferSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
- dml2_printf("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
- dml2_printf("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
- dml2_printf("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
#endif
l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
@@ -10162,10 +10263,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
/ math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
*p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
- dml2_printf("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
+ DML_LOG_VERBOSE("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
#endif
l->TotalActiveWriteback = 0;
memset(l->stream_visited, 0, DML2_MAX_PLANES * sizeof(bool));
@@ -10194,9 +10295,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
if (l->TotalActiveWriteback == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
- dml2_printf("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
+ DML_LOG_VERBOSE("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
#endif
*p->StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitTime + l->StutterBurstTime) / *p->StutterPeriod) * 100;
*p->Z8StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitZ8Time + l->StutterBurstTime) / *p->StutterPeriod) * 100;
@@ -10209,11 +10310,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
*p->Z8NumberOfStutterBurstsPerFrame = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
if (*p->StutterEfficiencyNotIncludingVBlank > 0) {
@@ -10228,7 +10329,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
if (*p->Z8StutterEfficiencyNotIncludingVBlank > 0) {
- LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
+ //LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
*p->Z8StutterEfficiency = *p->Z8StutterEfficiencyNotIncludingVBlank;
} else {
@@ -10240,25 +10341,25 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
- dml2_printf("DML::%s: SameTiming = %u\n", __func__, SameTiming);
- dml2_printf("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
- dml2_printf("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
- dml2_printf("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
- dml2_printf("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
+ DML_LOG_VERBOSE("DML::%s: SameTiming = %u\n", __func__, SameTiming);
+ DML_LOG_VERBOSE("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
+ DML_LOG_VERBOSE("DML::%s: LastZ8StutterPeriod = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank > 0 ? l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod : 0);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
*p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#endif
}
@@ -10292,7 +10393,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
double max_uclk_mhz = 0;
double min_return_latency_in_DCFCLK_cycles = 0;
- dml2_printf("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
memset(&mode_lib->scratch, 0, sizeof(struct dml2_core_internal_scratch));
memset(&mode_lib->mp, 0, sizeof(struct dml2_core_internal_mode_program));
@@ -10314,13 +10415,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- dml2_assert(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
- dml2_assert(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
if (cfg_support_info->stream_support_info[stream_index].odms_used > 1)
- dml2_assert(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
switch (cfg_support_info->stream_support_info[stream_index].odms_used) {
case (4):
@@ -10346,57 +10447,51 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
- dml2_assert(mode_lib->mp.Dppclk[k] > 0);
+ DML_ASSERT(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
- dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
- dml2_assert(mode_lib->mp.Dcfclk > 0);
- dml2_assert(mode_lib->mp.FabricClock > 0);
- dml2_assert(mode_lib->mp.dram_bw_mbps > 0);
- dml2_assert(mode_lib->mp.uclk_freq_mhz > 0);
- dml2_assert(mode_lib->mp.GlobalDPPCLK > 0);
- dml2_assert(mode_lib->mp.Dispclk > 0);
- dml2_assert(mode_lib->mp.DCFCLKDeepSleep > 0);
- dml2_assert(s->SOCCLK > 0);
-
-#ifdef __DML_VBA_DEBUG__
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, s->num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, s->num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, s->num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, s->num_active_planes);
- // dml2_printf_dml_display_cfg_hw_resource(&display_cfg->hw, s->num_active_planes);
-
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
- dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
- dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
- dml2_printf("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
+ DML_ASSERT(mode_lib->mp.Dcfclk > 0);
+ DML_ASSERT(mode_lib->mp.FabricClock > 0);
+ DML_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
+ DML_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
+ DML_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
+ DML_ASSERT(mode_lib->mp.Dispclk > 0);
+ DML_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
+ DML_ASSERT(s->SOCCLK > 0);
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
- }
- dml2_printf("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
- dml2_printf("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: min_clk_table min_fclk_khz = %d\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
- dml2_printf("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
+ DML_LOG_VERBOSE("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
+ }
+ DML_LOG_VERBOSE("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
+ DML_LOG_VERBOSE("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table min_fclk_khz = %ld\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) {
- dml2_printf("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
- dml2_printf("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
+ DML_LOG_VERBOSE("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
}
for (k = 0; k < s->num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -10493,8 +10588,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
}
CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
@@ -10759,11 +10854,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_mcache_setting_params->num_mcaches_l = &mode_lib->mp.num_mcaches_l[k];
calculate_mcache_setting_params->mcache_row_bytes_l = &mode_lib->mp.mcache_row_bytes_l[k];
+ calculate_mcache_setting_params->mcache_row_bytes_per_channel_l = &mode_lib->mp.mcache_row_bytes_per_channel_l[k];
calculate_mcache_setting_params->mcache_offsets_l = mode_lib->mp.mcache_offsets_l[k];
calculate_mcache_setting_params->mcache_shift_granularity_l = &mode_lib->mp.mcache_shift_granularity_l[k];
calculate_mcache_setting_params->num_mcaches_c = &mode_lib->mp.num_mcaches_c[k];
calculate_mcache_setting_params->mcache_row_bytes_c = &mode_lib->mp.mcache_row_bytes_c[k];
+ calculate_mcache_setting_params->mcache_row_bytes_per_channel_c = &mode_lib->mp.mcache_row_bytes_per_channel_c[k];
calculate_mcache_setting_params->mcache_offsets_c = mode_lib->mp.mcache_offsets_c[k];
calculate_mcache_setting_params->mcache_shift_granularity_c = &mode_lib->mp.mcache_shift_granularity_c[k];
@@ -10832,8 +10929,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
}
@@ -10912,11 +11009,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->mp.SwathWidthC;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->mp.SwathHeightY;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->mp.SwathHeightC;
- calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
/* outputs */
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l;
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk];
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk];
calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params);
@@ -10924,8 +11021,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_excess_vactive_bandwidth_required(
display_cfg,
s->num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
/* outputs */
mode_lib->mp.excess_vactive_fill_bw_l,
mode_lib->mp.excess_vactive_fill_bw_c);
@@ -10971,7 +11068,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
@@ -11047,8 +11144,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.WritebackDelay[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
#endif
}
@@ -11057,7 +11154,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->immediate_flip_required = s->immediate_flip_required || display_cfg->plane_descriptors[k].immediate_flip;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
#endif
if (s->num_active_planes > 1) {
@@ -11093,12 +11190,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->DestinationLineTimesForPrefetchLessThan2 = false;
s->VRatioPrefetchMoreThanMax = false;
- dml2_printf("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
mode_lib->mp.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->mp.UrgentLatency,
@@ -11135,7 +11232,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
#endif
CalculatePrefetchSchedule_params->display_cfg = display_cfg;
CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
@@ -11199,6 +11296,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
@@ -11219,6 +11317,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->dummy_single[0];
mode_lib->mp.NoTimeToPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
@@ -11228,7 +11327,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.impacted_prefetch_margin_us[k] = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
#endif
mode_lib->mp.VStartupMin[k] = s->MaxVStartupLines[k];
} // for k
@@ -11238,9 +11337,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.NoTimeToPrefetch[k] == true ||
mode_lib->mp.NotEnoughTimeForDynamicMetadata[k] ||
mode_lib->mp.DSTYAfterScaler[k] > 8) {
- dml2_printf("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
- dml2_printf("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
- dml2_printf("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
if (mode_lib->mp.dst_y_prefetch[k] < 2)
@@ -11249,24 +11348,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
s->VRatioPrefetchMoreThanMax = true;
- dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
}
if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
if (s->VRatioPrefetchMoreThanMax == true || s->DestinationLineTimesForPrefetchLessThan2 == true) {
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
- dml2_printf("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
mode_lib->mp.PrefetchModeSupported = false;
}
- dml2_printf("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
+ DML_LOG_VERBOSE("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
mode_lib->mp.PrefetchModeSupported ? "" : "NOT ", CalculatePrefetchSchedule_params->VStartup);
// Prefetch schedule OK, now check prefetch bw
@@ -11294,24 +11393,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
&mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
- dml2_printf("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
- dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
- dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
- dml2_printf("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
#endif
}
@@ -11340,6 +11439,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw;
@@ -11374,11 +11474,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.PrefetchModeSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
@@ -11404,12 +11504,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
mode_lib->mp.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->mp.NoOfDPP[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k = %u\n", __func__, k);
- dml2_printf("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: k = %u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
#endif
}
for (k = 0; k < s->num_active_planes; ++k) {
@@ -11479,6 +11579,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw;
calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw;
calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw;
calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma;
calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma;
@@ -11501,13 +11602,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.ImmediateFlipSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for flip!", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for flip!", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (display_cfg->plane_descriptors[k].immediate_flip && mode_lib->mp.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->mp.ImmediateFlipSupported = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
#endif
}
}
@@ -11520,28 +11621,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.PrefetchAndImmediateFlipSupported = (mode_lib->mp.PrefetchModeSupported == true && (!must_support_iflip || mode_lib->mp.ImmediateFlipSupported));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
- dml2_printf("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
- dml2_printf("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
#endif
- dml2_printf("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
}
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
if (!mode_lib->mp.PrefetchAndImmediateFlipSupported) {
- dml2_printf("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
} else {
- dml2_printf("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
// DCC Configuration
for (k = 0; k < s->num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
#endif
CalculateDCCConfiguration(
display_cfg->plane_descriptors[k].surface.dcc.enable,
@@ -11650,8 +11751,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines);
- dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
- dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
+ DML_LOG_VERBOSE("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
+ DML_LOG_VERBOSE("DML::%s: DEBUG PixelClock = %ld kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
@@ -11763,15 +11864,15 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TCalc + mode_lib->mp.MinTTUVBlank[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
s->Tvstartup_margin = (s->MaxVStartupLines[k] - mode_lib->mp.VStartupMin[k]) * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.MinTTUVBlank[k] + s->Tvstartup_margin;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
- dml2_printf("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
mode_lib->mp.Tdmdl[k] = mode_lib->mp.Tdmdl[k] + s->Tvstartup_margin;
@@ -11790,9 +11891,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->blank_lines_remaining = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active) - mode_lib->mp.VStartup[k];
if (s->blank_lines_remaining < 0) {
- dml2_printf("ERROR: Vstartup is larger than vblank!?\n");
+ DML_LOG_VERBOSE("ERROR: Vstartup is larger than vblank!?\n");
s->blank_lines_remaining = 0;
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
@@ -11806,23 +11907,23 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
- dml2_printf("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
- dml2_printf("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
- dml2_printf("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, HTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
- dml2_printf("DML::%s: k=%u, VTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- dml2_printf("DML::%s: k=%u, VActive = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
- dml2_printf("DML::%s: k=%u, VFrontPorch = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- dml2_printf("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
- dml2_printf("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
- dml2_printf("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActive = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VFrontPorch = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
#endif
}
//Maximum Bandwidth Used
- s->TotalWRBandwidth = 0;
+ mode_lib->mp.TotalWRBandwidth = 0;
for (k = 0; k < display_cfg->num_streams; ++k) {
s->WRBandwidth = 0;
if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) {
@@ -11831,7 +11932,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
(display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height
/ ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000))
* (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0);
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
+ mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth;
}
}
@@ -11839,9 +11940,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
#endif
}
@@ -11921,28 +12022,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
min_return_latency_in_DCFCLK_cycles = (min_return_uclk_cycles / max_uclk_mhz + min_return_fclk_cycles / max_fclk_mhz) * hard_minimum_dcfclk_mhz;
mode_lib->mp.min_return_latency_in_dcfclk = (unsigned int)min_return_latency_in_DCFCLK_cycles;
mode_lib->mp.dcfclk_deep_sleep_hysteresis = (unsigned int)math_max2(32, (double)mode_lib->ip.pixel_chunk_size_kbytes * 1024 * 3 / 4 / 64 - min_return_latency_in_DCFCLK_cycles);
- DML2_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
+ DML_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
- dml2_printf("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
- dml2_printf("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
- dml2_printf("DML::%s: min_return_uclk_cycles = %d\n", __func__, min_return_uclk_cycles);
- dml2_printf("DML::%s: min_return_fclk_cycles = %d\n", __func__, min_return_fclk_cycles);
- dml2_printf("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
- dml2_printf("DML::%s: --- END --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: min_return_uclk_cycles = %ld\n", __func__, min_return_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_fclk_cycles = %ld\n", __func__, min_return_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: --- END --- \n", __func__);
#endif
return (in_out_params->mode_lib->mp.PrefetchAndImmediateFlipSupported);
}
bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params)
{
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
bool result = dml_core_mode_programming(in_out_params);
- dml2_printf("DML::%s: result = %0d\n", __func__, result);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: result = %0d\n", __func__, result);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -12000,16 +12101,16 @@ void dml2_core_calcs_get_dpte_row_height(
unsigned int MacroTileHeight = is_plane1 ? MacroTileHeightC : MacroTileHeightY;
unsigned int PTEBufferSizeInRequests = is_plane1 ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
- dml2_printf("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
- dml2_printf("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
- dml2_printf("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
- dml2_printf("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
- dml2_printf("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
- dml2_printf("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
- dml2_printf("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
+ DML_LOG_VERBOSE("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
+ DML_LOG_VERBOSE("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
+ DML_LOG_VERBOSE("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
+ DML_LOG_VERBOSE("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
+ DML_LOG_VERBOSE("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
#endif
unsigned int dummy_integer[21];
@@ -12063,16 +12164,16 @@ void dml2_core_calcs_get_dpte_row_height(
CalculateVMAndRowBytes(&mode_lib->scratch.calculate_vm_and_row_bytes_params);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
+ DML_LOG_VERBOSE("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
#endif
}
static bool is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -12090,6 +12191,8 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -12116,11 +12219,11 @@ void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs
cursor_dlg_regs->dst_x_offset = (unsigned int) ((dst_x_offset > 0) ? dst_x_offset : 0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
- dml2_printf("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
- dml2_printf("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
+ DML_LOG_VERBOSE("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
#endif
cursor_dlg_regs->chunk_hdl_adjust = 3;
@@ -12156,7 +12259,7 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
double stored_swath_c_bytes;
bool is_phantom_pipe;
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
pixel_chunk_bytes = (unsigned int)(mode_lib->ip.pixel_chunk_size_kbytes * 1024);
min_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.min_pixel_chunk_size_bytes);
@@ -12199,19 +12302,19 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (sw_mode == dml2_sw_linear && display_cfg->gpuvm_enable) {
unsigned int p0_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_l(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
#endif
- DML2_ASSERT(p0_pte_row_height_linear >= 8);
+ DML_ASSERT(p0_pte_row_height_linear >= 8);
rq_regs->rq_regs_l.pte_row_height_linear = math_log2_approx(p0_pte_row_height_linear) - 3;
if (dual_plane) {
unsigned int p1_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_c(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
#endif
if (sw_mode == dml2_sw_linear) {
- DML2_ASSERT(p1_pte_row_height_linear >= 8);
+ DML_ASSERT(p1_pte_row_height_linear >= 8);
}
rq_regs->rq_regs_c.pte_row_height_linear = math_log2_approx(p1_pte_row_height_linear) - 3;
}
@@ -12245,12 +12348,12 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
detile_buf_plane1_addr = (unsigned int)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
#endif
} else {
detile_buf_plane1_addr = (unsigned int)(dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
#endif
}
}
@@ -12258,15 +12361,15 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
rq_regs->plane1_base_address = detile_buf_plane1_addr;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
- dml2_printf("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
- dml2_printf("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
- dml2_printf("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
+ DML_LOG_VERBOSE("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
#endif
- //dml2_printf_rq_regs_st(rq_regs);
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ //DML_LOG_VERBOSE_rq_regs_st(rq_regs);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
static void rq_dlg_get_dlg_reg(
@@ -12281,10 +12384,10 @@ static void rq_dlg_get_dlg_reg(
memset(l, 0, sizeof(struct dml2_core_shared_rq_dlg_get_dlg_reg_locals));
- dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- dml2_assert(l->plane_idx < DML2_MAX_PLANES);
+ DML_ASSERT(l->plane_idx < DML2_MAX_PLANES);
l->source_format = dml2_444_8;
l->odm_mode = dml2_odm_mode_bypass;
@@ -12314,18 +12417,18 @@ static void rq_dlg_get_dlg_reg(
l->pclk_freq_in_mhz = (double)l->timing->pixel_clock_khz / 1000;
l->ref_freq_to_pix_freq = l->refclk_freq_in_mhz / l->pclk_freq_in_mhz;
- dml2_printf("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
- dml2_printf("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
- dml2_printf("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
- dml2_printf("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.dchub_refclk_mhz);
- dml2_printf("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
+ DML_LOG_VERBOSE("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
+ DML_LOG_VERBOSE("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: soc.refclk_mhz = %d\n", __func__, mode_lib->soc.dchub_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
- DML2_ASSERT(l->refclk_freq_in_mhz != 0);
- DML2_ASSERT(l->pclk_freq_in_mhz != 0);
- DML2_ASSERT(l->ref_freq_to_pix_freq < 4.0);
+ DML_ASSERT(l->refclk_freq_in_mhz != 0);
+ DML_ASSERT(l->pclk_freq_in_mhz != 0);
+ DML_ASSERT(l->ref_freq_to_pix_freq < 4.0);
// Need to figure out which side of odm combine we're in
// Assume the pipe instance under the same plane is in order
@@ -12354,14 +12457,14 @@ static void rq_dlg_get_dlg_reg(
l->pipe_idx_in_combine = pipe_idx - l->first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.)
disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double)l->hblank_end + (double)l->pipe_idx_in_combine * (double)l->hactive / (double)l->odm_combine_factor) * l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
- dml2_printf("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
- dml2_printf("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
- dml2_printf("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
+ DML_LOG_VERBOSE("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
}
- dml2_printf("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
- DML2_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int)(l->ref_freq_to_pix_freq * math_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int)(l->ref_freq_to_pix_freq * (double)l->htotal * math_pow(2, 8));
@@ -12370,20 +12473,20 @@ static void rq_dlg_get_dlg_reg(
l->min_ttu_vblank = mode_lib->mp.MinTTUVBlank[mode_lib->mp.pipe_plane[pipe_idx]];
l->min_dst_y_next_start = (unsigned int)(mode_lib->mp.MIN_DST_Y_NEXT_START[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
- dml2_printf("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
l->vready_after_vcount0 = (unsigned int)(mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[mode_lib->mp.pipe_plane[pipe_idx]]);
disp_dlg_regs->vready_after_vcount0 = l->vready_after_vcount0;
- dml2_printf("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
+ DML_LOG_VERBOSE("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
l->dst_x_after_scaler = (unsigned int)(mode_lib->mp.DSTXAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
l->dst_y_after_scaler = (unsigned int)(mode_lib->mp.DSTYAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
- dml2_printf("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
l->dst_y_prefetch = mode_lib->mp.dst_y_prefetch[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_vm_vblank = mode_lib->mp.dst_y_per_vm_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
@@ -12391,28 +12494,28 @@ static void rq_dlg_get_dlg_reg(
l->dst_y_per_vm_flip = mode_lib->mp.dst_y_per_vm_flip[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
if (l->dst_y_prefetch > 0 && l->dst_y_per_vm_vblank > 0 && l->dst_y_per_row_vblank > 0) {
- DML2_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
+ DML_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
}
l->vratio_pre_l = mode_lib->mp.VRatioPrefetchY[mode_lib->mp.pipe_plane[pipe_idx]];
l->vratio_pre_c = mode_lib->mp.VRatioPrefetchC[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
- dml2_printf("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
// Active
l->refcyc_per_line_delivery_pre_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
l->refcyc_per_line_delivery_pre_c = 0.0;
l->refcyc_per_line_delivery_c = 0.0;
@@ -12421,8 +12524,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_line_delivery_pre_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
}
disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int)(mode_lib->mp.Tdmdl_vm[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12431,8 +12534,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
l->refcyc_per_req_delivery_pre_c = 0.0;
l->refcyc_per_req_delivery_c = 0.0;
@@ -12440,16 +12543,16 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
}
// TTU - Cursor
- DML2_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
+ DML_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
// Assign to register structures
disp_dlg_regs->min_dst_y_next_start = (unsigned int)((double)l->min_dst_y_next_start * math_pow(2, 2));
- DML2_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
+ DML_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
disp_dlg_regs->dst_y_after_scaler = l->dst_y_after_scaler; // in terms of line
disp_dlg_regs->refcyc_x_after_scaler = (unsigned int)((double)l->dst_x_after_scaler * l->ref_freq_to_pix_freq); // in terms of refclk
@@ -12462,10 +12565,10 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->vratio_prefetch = (unsigned int)(l->vratio_pre_l * math_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int)(l->vratio_pre_c * math_pow(2, 19));
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(mode_lib->mp.TimePerVMGroupVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(mode_lib->mp.TimePerVMGroupFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12532,11 +12635,11 @@ static void rq_dlg_get_dlg_reg(
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->min_ttu_vblank = (unsigned int)(l->min_ttu_vblank * l->refclk_freq_in_mhz);
- // CHECK for HW registers' range, DML2_ASSERT or clamp
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
+ // CHECK for HW registers' range, DML_ASSERT or clamp
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(math_pow(2, 23) - 1);
@@ -12550,16 +12653,16 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
- DML2_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
- DML2_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
+ DML_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_l = (unsigned int)math_pow(2, 17) - 1;
}
if (l->dual_plane) {
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_c = (unsigned int)math_pow(2, 17) - 1;
}
}
@@ -12570,20 +12673,20 @@ static void rq_dlg_get_dlg_reg(
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(math_pow(2, 23) - 1);
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
if (l->dual_plane) {
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
}
@@ -12606,11 +12709,11 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co
arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
- dml2_printf("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
- dml2_printf("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
- dml2_printf("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
+ DML_LOG_VERBOSE("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
+ DML_LOG_VERBOSE("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
#endif
}
@@ -12676,7 +12779,7 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
{
const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index];
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index];
- const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index];
+ const struct dml2_pstate_meta *stream_pstate_meta = &display_cfg->stage3.stream_pstate_meta[plane_descriptor->stream_index];
struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base;
union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state;
@@ -12691,24 +12794,24 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* from display configuration */
base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
- base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_start = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch);
- base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_end = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_descriptor->timing.v_active);
base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
/* from meta */
base_programming->otg_vline_time_ns =
- (unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0);
- base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
- base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
- base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
- base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ (unsigned int)(stream_pstate_meta->otg_vline_time_us * 1000.0);
+ base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_pstate_meta->scheduling_delay_otg_vlines;
+ base_programming->contention_delay_otg_vlines = (uint8_t)stream_pstate_meta->contention_delay_otg_vlines;
+ base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines;
+ base_programming->drr_keepout_otg_vline = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
- stream_fams2_meta->method_drr.programming_delay_otg_vlines);
- base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
- base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines);
+ base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ base_programming->max_vtotal = (uint16_t)stream_pstate_meta->max_vtotal;
/* from core */
base_programming->config.bits.min_ttu_vblank_usable = true;
@@ -12727,11 +12830,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vactive */
base_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
sub_programming->legacy.vactive_det_fill_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_vblank:
@@ -12739,22 +12842,22 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vblank */
base_programming->type = FAMS2_STREAM_TYPE_VBLANK;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_fw_drr:
/* drr */
base_programming->type = FAMS2_STREAM_TYPE_DRR;
sub_programming->drr.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_drr.programming_delay_otg_vlines;
sub_programming->drr.nom_stretched_vtotal =
- (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
+ (uint16_t)stream_pstate_meta->method_drr.stretched_vtotal;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_end_otg_vline;
/* drr only clamps to vtotal min for single display */
base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
sub_programming->drr.only_stretch_if_required = true;
@@ -12767,13 +12870,13 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
(uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
sub_programming->subvp.vratio_denominator = 1000;
sub_programming->subvp.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.programming_delay_otg_vlines;
sub_programming->subvp.prefetch_to_mall_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
sub_programming->subvp.phantom_vtotal =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vtotal;
sub_programming->subvp.phantom_vactive =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vactive;
sub_programming->subvp.config.bits.is_multi_planar =
plane_descriptor->surface.plane1.height > 0;
sub_programming->subvp.config.bits.is_yuv420 =
@@ -12782,9 +12885,9 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
plane_descriptor->pixel_format == dml2_420_12;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_reserved_hw:
@@ -12840,7 +12943,8 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ
out->active_latency_hiding_us = (int)mode_lib->ms.VActiveLatencyHidingUs[plane_idx];
- out->dram_change_vactive_det_fill_delay_us = (unsigned int)math_ceil(mode_lib->ms.dram_change_vactive_det_fill_delay_us[plane_idx]);
+ out->vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_ceil(mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk][plane_idx]);
}
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index)
@@ -12883,10 +12987,10 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
- dml2_printf("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
- dml2_printf("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
- dml2_printf("DML::%s: vblank_reserved_time_us = %f\n", __func__, out->vblank_reserved_time_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
+ DML_LOG_VERBOSE("DML::%s: vblank_reserved_time_us = %u\n", __func__, out->vblank_reserved_time_us);
#endif
}
@@ -12921,7 +13025,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
- out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support;
+ out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.global_temp_read_or_ppt_supported;
out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support;
out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
@@ -12947,7 +13051,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported;
out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support;
out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport;
+ out->informative.mode_support_info.NumberOfTDLUT33cubeSupport = mode_lib->ms.support.NumberOfTDLUT33cubeSupport;
out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport;
+ out->informative.mode_support_info.qos_bandwidth_support = mode_lib->ms.support.qos_bandwidth_support;
+ out->informative.mode_support_info.dcfclk_support = mode_lib->ms.support.dcfclk_support;
for (k = 0; k < out->display_config.num_planes; k++) {
@@ -12999,6 +13106,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4;
else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4)
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4;
out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k];
out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k];
@@ -13019,8 +13130,11 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.watermarks.temp_read_or_ppt_watermark_us = dml_get_wm_temp_read_or_ppt(mode_lib);
out->informative.mall.total_surface_size_in_mall_bytes = 0;
- for (k = 0; k < out->display_config.num_planes; ++k)
+ out->informative.dpp.total_num_dpps_required = 0;
+ for (k = 0; k < out->display_config.num_planes; ++k) {
out->informative.mall.total_surface_size_in_mall_bytes += mode_lib->mp.SurfaceSizeInTheMALL[k];
+ out->informative.dpp.total_num_dpps_required += mode_lib->mp.NoOfDPP[k];
+ }
out->informative.qos.min_return_latency_in_dcfclk = mode_lib->mp.min_return_latency_in_dcfclk;
out->informative.qos.urgent_latency_us = dml_get_urgent_latency(mode_lib);
@@ -13180,7 +13294,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
- out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0;
+ out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0;
out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index 27ef0e096b25..27ef0e096b25 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
index 28394de02885..cc4f0663c6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
@@ -10,11 +10,13 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
+ out->project_id = project_id;
+
switch (project_id) {
case dml2_project_dcn4x_stage1:
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
index 411c514fe65c..411c514fe65c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
index 4f54e54102ef..1087a8c926ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -36,7 +36,9 @@ struct dml2_core_ip_params {
unsigned int max_line_buffer_lines;
unsigned int writeback_interface_buffer_size_kbytes;
unsigned int max_num_dpp;
+ unsigned int max_num_opp;
unsigned int max_num_otg;
+ unsigned int TDLUT_33cube_count;
unsigned int max_num_wb;
unsigned int max_dchub_pscl_bw_pix_per_clk;
unsigned int max_pscl_lb_bw_pix_per_clk;
@@ -46,6 +48,7 @@ struct dml2_core_ip_params {
double max_vscl_ratio;
unsigned int max_hscl_taps;
unsigned int max_vscl_taps;
+ unsigned int odm_combine_support_mask;
unsigned int num_dsc;
unsigned int maximum_dsc_bits_per_component;
unsigned int maximum_pixels_per_line_per_dsc_unit;
@@ -82,7 +85,6 @@ struct dml2_core_ip_params {
unsigned int subvp_swath_height_margin_lines;
unsigned int subvp_fw_processing_delay_us;
unsigned int subvp_pstate_allow_width_us;
-
// MRQ
bool dcn_mrq_present;
unsigned int zero_size_buffer_entries;
@@ -102,6 +104,9 @@ struct dml2_core_internal_DmlPipe {
double DCFClkDeepSleep;
unsigned int DPPPerSurface;
bool ScalerEnabled;
+ bool UPSPEnabled;
+ unsigned int UPSPVTaps;
+ enum dml2_sample_positioning UPSPSamplePositioning;
enum dml2_rotation_angle RotationAngle;
bool mirrored;
unsigned int ViewportHeight;
@@ -186,7 +191,9 @@ enum dml2_core_internal_output_type_rate {
dml2_core_internal_output_rate_hdmi_rate_6x4 = 9,
dml2_core_internal_output_rate_hdmi_rate_8x4 = 10,
dml2_core_internal_output_rate_hdmi_rate_10x4 = 11,
- dml2_core_internal_output_rate_hdmi_rate_12x4 = 12
+ dml2_core_internal_output_rate_hdmi_rate_12x4 = 12,
+ dml2_core_internal_output_rate_hdmi_rate_16x4 = 13,
+ dml2_core_internal_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_core_internal_watermarks {
@@ -198,6 +205,8 @@ struct dml2_core_internal_watermarks {
double WritebackFCLKChangeWatermark;
double StutterExitWatermark;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
@@ -225,6 +234,7 @@ struct dml2_core_internal_mode_support_info {
bool MSOOrODMSplitWithNonDPLink;
bool NotEnoughLanesForMSO;
bool NumberOfOTGSupport;
+ bool NumberOfTDLUT33cubeSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
bool WritebackScaleRatioAndTapsSupport;
@@ -254,18 +264,23 @@ struct dml2_core_internal_mode_support_info {
bool DCCMetaBufferSizeNotExceeded;
enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES];
+ bool global_dram_clock_change_support_required;
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
+ bool global_temp_read_or_ppt_supported;
bool USRRetrainingSupport;
bool AvgBandwidthSupport;
bool UrgVactiveBandwidthSupport;
bool EnoughUrgentLatencyHidingSupport;
+ bool PrefetchScheduleSupported;
bool PrefetchSupported;
bool PrefetchBandwidthSupported;
bool DynamicMetadataSupported;
bool VRatioInPrefetchSupported;
bool DISPCLK_DPPCLK_Support;
bool TotalAvailablePipesSupport;
+ bool ODMSupport;
bool ModeSupport;
bool ViewportSizeSupport;
@@ -314,9 +329,7 @@ struct dml2_core_internal_mode_support_info {
double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor
double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
double max_urgent_latency_us;
double max_non_urgent_latency_us;
double avg_non_urgent_latency_us;
@@ -326,9 +339,10 @@ struct dml2_core_internal_mode_support_info {
bool incorrect_imall_usage;
bool g6_temp_read_support;
- bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
+ bool dcfclk_support;
+ bool qos_bandwidth_support;
};
struct dml2_core_internal_mode_support {
@@ -350,9 +364,11 @@ struct dml2_core_internal_mode_support {
double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes
+ double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes
double uclk_freq_mhz;
double dram_bw_mbps;
double max_dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state
double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
@@ -394,9 +410,13 @@ struct dml2_core_internal_mode_support {
double TWait[DML2_MAX_PLANES];
bool UnboundedRequestEnabled;
+ unsigned int compbuf_reserved_space_64b;
+ bool hw_debug5;
unsigned int CompressedBufferSizeInkByte;
double VRatioPreY[DML2_MAX_PLANES];
double VRatioPreC[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_l[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_c[DML2_MAX_PLANES];
unsigned int swath_width_luma_ub[DML2_MAX_PLANES];
unsigned int swath_width_chroma_ub[DML2_MAX_PLANES];
unsigned int RequiredSlots[DML2_MAX_PLANES];
@@ -417,8 +437,8 @@ struct dml2_core_internal_mode_support {
double dst_y_prefetch[DML2_MAX_PLANES];
double LinesForVM[DML2_MAX_PLANES];
double LinesForDPTERow[DML2_MAX_PLANES];
- double SwathWidthYSingleDPP[DML2_MAX_PLANES];
- double SwathWidthCSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES];
unsigned int BytePerPixelY[DML2_MAX_PLANES];
unsigned int BytePerPixelC[DML2_MAX_PLANES];
double BytePerPixelInDETY[DML2_MAX_PLANES];
@@ -469,13 +489,58 @@ struct dml2_core_internal_mode_support {
double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe
double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES];
+ bool is_using_mall_for_ss[DML2_MAX_PLANES];
+ unsigned int meta_row_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES];
+ bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
+ unsigned int meta_req_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES];
+ unsigned int meta_req_width[DML2_MAX_PLANES];
+ unsigned int meta_row_width[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeY[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeC[DML2_MAX_PLANES];
+ unsigned int meta_req_height[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_req_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES];
+ unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
+ unsigned int vm_group_bytes[DML2_MAX_PLANES];
+ unsigned int VReadyOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateWidthPix[DML2_MAX_PLANES];
+ double TSetup[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
+ unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos.
+ double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
+ double MaxActiveFCLKChangeLatencySupported;
+
// Backend
bool RequiresDSC[DML2_MAX_PLANES];
bool RequiresFEC[DML2_MAX_PLANES];
double OutputBpp[DML2_MAX_PLANES];
+ double DesiredOutputBpp[DML2_MAX_PLANES];
+ double PixelClockBackEnd[DML2_MAX_PLANES];
unsigned int DSCDelay[DML2_MAX_PLANES];
enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES];
enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES];
+ bool TotalAvailablePipesSupportNoDSC;
+ bool TotalAvailablePipesSupportDSC;
+ unsigned int NumberOfDPPNoDSC;
+ unsigned int NumberOfDPPDSC;
+ enum dml2_odm_mode ODMModeNoDSC;
+ enum dml2_odm_mode ODMModeDSC;
+ double RequiredDISPCLKPerSurfaceNoDSC;
+ double RequiredDISPCLKPerSurfaceDSC;
+ unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES];
// Bandwidth Related Info
double BandwidthAvailableForImmediateFlip;
@@ -484,6 +549,14 @@ struct dml2_core_internal_mode_support {
double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
+ /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches.
+ * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp
+ *
+ * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support.
+ * Some slight difference in variables may cause the pixel data bandwidth to be higher
+ * even though overall equ prefetch bandwidths can be lower going from ms to mp
+ */
+ double RequiredPrefetchBWMax[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
double prefetch_cursor_bw[DML2_MAX_PLANES];
double prefetch_vmrow_bw[DML2_MAX_PLANES];
@@ -500,6 +573,7 @@ struct dml2_core_internal_mode_support {
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES];
unsigned int NoOfDPP[DML2_MAX_PLANES];
+ unsigned int NoOfOPP[DML2_MAX_PLANES];
bool MPCCombine[DML2_MAX_PLANES];
double dcfclk_deepsleep;
double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES];
@@ -510,6 +584,7 @@ struct dml2_core_internal_mode_support {
bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES];
bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES];
unsigned int TotalNumberOfActiveDPP;
+ unsigned int TotalNumberOfActiveOPP;
unsigned int TotalNumberOfSingleDPPSurfaces;
unsigned int TotalNumberOfDCCActiveDPP;
unsigned int Total3dlutActive;
@@ -518,15 +593,17 @@ struct dml2_core_internal_mode_support {
double VActiveLatencyHidingMargin[DML2_MAX_PLANES];
double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int MaxVStartupLines[DML2_MAX_PLANES];
- double dram_change_vactive_det_fill_delay_us[DML2_MAX_PLANES];
+ double pstate_vactive_det_fill_delay_us[dml2_pstate_type_count][DML2_MAX_PLANES];
unsigned int num_mcaches_l[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_l[DML2_MAX_PLANES];
+ unsigned int mcache_row_bytes_per_channel_l[DML2_MAX_PLANES];
unsigned int mcache_offsets_l[DML2_MAX_PLANES][DML2_MAX_MCACHES + 1];
unsigned int mcache_shift_granularity_l[DML2_MAX_PLANES];
unsigned int num_mcaches_c[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_c[DML2_MAX_PLANES];
+ unsigned int mcache_row_bytes_per_channel_c[DML2_MAX_PLANES];
unsigned int mcache_offsets_c[DML2_MAX_PLANES][DML2_MAX_MCACHES + 1];
unsigned int mcache_shift_granularity_c[DML2_MAX_PLANES];
@@ -534,7 +611,44 @@ struct dml2_core_internal_mode_support {
bool mall_comb_mcache_c[DML2_MAX_PLANES];
bool lc_comb_mcache[DML2_MAX_PLANES];
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
+ unsigned int meta_row_height_luma[DML2_MAX_PLANES];
+ unsigned int meta_row_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+
+ unsigned int MaximumVStartup[DML2_MAX_PLANES];
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+
+ unsigned int DSTYAfterScaler[DML2_MAX_PLANES];
+ unsigned int DSTXAfterScaler[DML2_MAX_PLANES];
+
+ enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES];
};
/// @brief A mega structure that houses various info for model programming step.
@@ -544,6 +658,7 @@ struct dml2_core_internal_mode_program {
double FabricClock; /// <brief Basically just the clock freq at the min (or given) state
//double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double uclk_freq_mhz;
unsigned int NoOfDPP[DML2_MAX_PLANES];
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
@@ -595,6 +710,8 @@ struct dml2_core_internal_mode_program {
unsigned int MacroTileHeightC[DML2_MAX_PLANES];
unsigned int MacroTileWidthY[DML2_MAX_PLANES];
unsigned int MacroTileWidthC[DML2_MAX_PLANES];
+ double MaximumSwathWidthLuma[DML2_MAX_PLANES];
+ double MaximumSwathWidthChroma[DML2_MAX_PLANES];
bool surf_linear128_l[DML2_MAX_PLANES];
bool surf_linear128_c[DML2_MAX_PLANES];
@@ -627,6 +744,14 @@ struct dml2_core_internal_mode_program {
double UrgentBurstFactorChroma[DML2_MAX_PLANES];
double UrgentBurstFactorChromaPre[DML2_MAX_PLANES];
+ double MaximumSwathWidthInLineBufferLuma;
+ double MaximumSwathWidthInLineBufferChroma;
+
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
double meta_row_bw[DML2_MAX_PLANES];
unsigned int meta_row_bytes[DML2_MAX_PLANES];
unsigned int meta_req_width[DML2_MAX_PLANES];
@@ -648,7 +773,9 @@ struct dml2_core_internal_mode_program {
unsigned int PTERequestSizeC[DML2_MAX_PLANES];
double TWait[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
double Tdmdl_vm[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
double Tdmdl[DML2_MAX_PLANES];
double TSetup[DML2_MAX_PLANES];
unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
@@ -680,6 +807,39 @@ struct dml2_core_internal_mode_program {
double TCalc;
unsigned int TotImmediateFlipBytes;
+ unsigned int MaxTotalDETInKByte;
+ unsigned int NomDETInKByte;
+ unsigned int MinCompressedBufferSizeInKByte;
+ double PixelClockBackEnd[DML2_MAX_PLANES];
+ double OutputBpp[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+ unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+ bool immediate_flip_required; // any pipes need immediate flip
+ double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
+ double TotalWRBandwidth;
+ double max_urgent_latency_us;
+ double df_response_time_us;
+
+ enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES];
// -------------------
// Output
// -------------------
@@ -690,9 +850,12 @@ struct dml2_core_internal_mode_program {
// Support
bool UrgVactiveBandwidthSupport;
+ bool PrefetchScheduleSupported;
+ bool UrgentBandwidthSupport;
bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported
bool ImmediateFlipSupported;
bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES];
+ bool dcfclk_support;
// Clock
double Dcfclk;
@@ -726,6 +889,9 @@ struct dml2_core_internal_mode_program {
double Z8StutterEfficiency;
unsigned int Z8NumberOfStutterBurstsPerFrame;
double Z8StutterEfficiencyNotIncludingVBlank;
+ double LowPowerStutterEfficiency;
+ double LowPowerStutterEfficiencyNotIncludingVBlank;
+ unsigned int LowPowerNumberOfStutterBurstsPerFrame;
double StutterPeriod;
double Z8StutterEfficiencyBestCase;
unsigned int Z8NumberOfStutterBurstsPerFrameBestCase;
@@ -784,7 +950,7 @@ struct dml2_core_internal_mode_program {
// RQ registers
bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
-
+ double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES];
bool is_using_mall_for_ss[DML2_MAX_PLANES];
@@ -800,11 +966,12 @@ struct dml2_core_internal_mode_program {
double MaxActiveFCLKChangeLatencySupported;
bool USRRetrainingSupport;
bool g6_temp_read_support;
- bool temp_read_or_ppt_support;
enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
+ bool global_temp_read_or_ppt_supported;
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES];
double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES];
@@ -839,11 +1006,13 @@ struct dml2_core_internal_mode_program {
unsigned int num_mcaches_l[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_l[DML2_MAX_PLANES];
+ unsigned int mcache_row_bytes_per_channel_l[DML2_MAX_PLANES];
unsigned int mcache_offsets_l[DML2_MAX_PLANES][DML2_MAX_MCACHES + 1];
unsigned int mcache_shift_granularity_l[DML2_MAX_PLANES];
unsigned int num_mcaches_c[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_c[DML2_MAX_PLANES];
+ unsigned int mcache_row_bytes_per_channel_c[DML2_MAX_PLANES];
unsigned int mcache_offsets_c[DML2_MAX_PLANES][DML2_MAX_MCACHES + 1];
unsigned int mcache_shift_granularity_c[DML2_MAX_PLANES];
@@ -863,6 +1032,8 @@ struct dml2_core_internal_SOCParametersList {
double FCLKChangeLatency;
double SRExitTime;
double SREnterPlusExitTime;
+ double SRExitTimeLowPower;
+ double SREnterPlusExitTimeLowPower;
double SRExitZ8Time;
double SREnterPlusExitZ8Time;
double USRRetrainingLatency;
@@ -958,6 +1129,7 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
double tdlut_opt_time[DML2_MAX_PLANES];
double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_to_deliver[DML2_MAX_PLANES];
unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
@@ -966,8 +1138,8 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int cursor_bytes[DML2_MAX_PLANES];
bool stream_visited[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
double prefetch_sw_bytes[DML2_MAX_PLANES];
double Tpre_rounded[DML2_MAX_PLANES];
@@ -979,6 +1151,7 @@ struct dml2_core_calcs_mode_support_locals {
enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
unsigned int lb_source_lines_l[DML2_MAX_PLANES];
unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ double prefetch_swath_time_us[DML2_MAX_PLANES];
};
struct dml2_core_calcs_mode_programming_locals {
@@ -993,10 +1166,10 @@ struct dml2_core_calcs_mode_programming_locals {
double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
- unsigned int dummy_integer_array[2][DML2_MAX_PLANES];
+ unsigned int dummy_integer_array[4][DML2_MAX_PLANES];
enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES];
double dummy_single_array[2][DML2_MAX_PLANES];
- unsigned int dummy_long_array[4][DML2_MAX_PLANES];
+ unsigned int dummy_long_array[8][DML2_MAX_PLANES];
bool dummy_boolean_array[2][DML2_MAX_PLANES];
bool dummy_boolean[2];
double dummy_single[2];
@@ -1020,7 +1193,6 @@ struct dml2_core_calcs_mode_programming_locals {
double dlg_vblank_start;
double LSetup;
double blank_lines_remaining;
- double TotalWRBandwidth;
double WRBandwidth;
struct dml2_core_internal_DmlPipe myPipe;
double PixelClockBackEndFactor;
@@ -1042,6 +1214,7 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
double tdlut_opt_time[DML2_MAX_PLANES];
double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_to_deliver[DML2_MAX_PLANES];
unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
@@ -1057,8 +1230,8 @@ struct dml2_core_calcs_mode_programming_locals {
double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
unsigned int per_pipe_flip_bytes[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
double prefetch_sw_bytes[DML2_MAX_PLANES];
double Tpre_rounded[DML2_MAX_PLANES];
@@ -1069,6 +1242,8 @@ struct dml2_core_calcs_mode_programming_locals {
enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
unsigned int lb_source_lines_l[DML2_MAX_PLANES];
unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
};
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals {
@@ -1142,6 +1317,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params {
unsigned int HostVMMinPageSize;
unsigned int DCCMetaBufferSizeBytes;
bool mrq_present;
+ enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
bool *PTEBufferSizeNotExceeded;
@@ -1378,6 +1554,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double vm_row_bw;
double flip_and_active_bw;
double flip_and_prefetch_bw;
+ double flip_and_prefetch_bw_max;
double active_and_excess_bw;
};
@@ -1406,6 +1583,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals {
struct dml2_core_shared_rq_dlg_get_dlg_reg_locals {
unsigned int plane_idx;
+ unsigned int stream_idx;
enum dml2_source_format_class source_format;
const struct dml2_timing_cfg *timing;
bool dual_plane;
@@ -1561,15 +1739,17 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
unsigned int *DSTYAfterScaler;
bool UnboundedRequestEnabled;
unsigned int CompressedBufferSizeInkByte;
- bool max_oustanding_when_urgent_expected;
+ bool max_outstanding_when_urgent_expected;
unsigned int max_outstanding_requests;
unsigned int max_request_size_bytes;
unsigned int *meta_row_height_l;
unsigned int *meta_row_height_c;
+ enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
struct dml2_core_internal_watermarks *Watermark;
enum dml2_pstate_change_support *DRAMClockChangeSupport;
+ bool *global_dram_clock_change_support_required;
bool *global_dram_clock_change_supported;
double *MaxActiveDRAMClockChangeLatencySupported;
unsigned int *SubViewportLinesNeededInMALL;
@@ -1580,10 +1760,10 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
double *VActiveLatencyHidingMargin;
double *VActiveLatencyHidingUs;
bool *g6_temp_read_support;
- bool *temp_read_or_ppt_support;
+ enum dml2_pstate_change_support *temp_read_or_ppt_support;
+ bool *global_temp_read_or_ppt_supported;
};
-
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
const struct dml2_display_cfg *display_cfg;
unsigned int ConfigReturnBufferSizeInKByte;
@@ -1613,6 +1793,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
double *BytePerPixDETC;
unsigned int *DPPPerSurface;
bool mrq_present;
+ unsigned int dummy[2][DML2_MAX_PLANES];
+ unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES];
+ unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES];
// output
unsigned int *req_per_swath_ub_l;
@@ -1630,6 +1813,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
unsigned int *DETBufferSizeC;
unsigned int *full_swath_bytes_l;
unsigned int *full_swath_bytes_c;
+ unsigned int *full_swath_bytes_single_dpp_l;
+ unsigned int *full_swath_bytes_single_dpp_c;
bool *UnboundedRequestEnabled;
unsigned int *compbuf_reserved_space_64b;
unsigned int *CompressedBufferSizeInkByte;
@@ -1686,9 +1871,11 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
unsigned int CompbufReservedSpaceZs;
bool hw_debug5;
double SRExitTime;
+ double SRExitTimeLowPower;
double SRExitZ8Time;
bool SynchronizeTimings;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterEnterPlusExitWatermark;
bool ProgressiveToInterlaceUnitInOPP;
double *MinTTUVBlank;
@@ -1714,7 +1901,10 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
// output
double *StutterEfficiencyNotIncludingVBlank;
double *StutterEfficiency;
+ double *LowPowerStutterEfficiencyNotIncludingVBlank;
+ double *LowPowerStutterEfficiency;
unsigned int *NumberOfStutterBurstsPerFrame;
+ unsigned int *LowPowerNumberOfStutterBurstsPerFrame;
double *Z8StutterEfficiencyNotIncludingVBlank;
double *Z8StutterEfficiency;
unsigned int *Z8NumberOfStutterBurstsPerFrame;
@@ -1789,6 +1979,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *VRatioPrefetchC;
double *RequiredPrefetchPixelDataBWLuma;
double *RequiredPrefetchPixelDataBWChroma;
+ double *RequiredPrefetchBWMax;
bool *NotEnoughTimeForDynamicMetadata;
double *Tno_bw;
double *Tno_bw_flip;
@@ -1809,6 +2000,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
unsigned int *VReadyOffsetPix;
double *prefetch_cursor_bw;
double *prefetch_sw_bytes;
+ double *prefetch_swath_time_us;
};
struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params {
@@ -1879,6 +2071,7 @@ struct dml2_core_calcs_calculate_mcache_row_bytes_params {
// output
unsigned int *num_mcaches;
unsigned int *mcache_row_bytes;
+ unsigned int *mcache_row_bytes_per_channel;
unsigned int *meta_row_width_ub;
double *dcc_dram_bw_nom_overhead_factor;
double *dcc_dram_bw_pref_overhead_factor;
@@ -1958,6 +2151,7 @@ struct dml2_core_calcs_calculate_mcache_setting_params {
// output
unsigned int *num_mcaches_l;
unsigned int *mcache_row_bytes_l;
+ unsigned int *mcache_row_bytes_per_channel_l;
unsigned int *mcache_offsets_l;
unsigned int *mcache_shift_granularity_l;
double *dcc_dram_bw_nom_overhead_factor_l;
@@ -1965,6 +2159,7 @@ struct dml2_core_calcs_calculate_mcache_setting_params {
unsigned int *num_mcaches_c;
unsigned int *mcache_row_bytes_c;
+ unsigned int *mcache_row_bytes_per_channel_c;
unsigned int *mcache_offsets_c;
unsigned int *mcache_shift_granularity_c;
double *dcc_dram_bw_nom_overhead_factor_c;
@@ -1993,6 +2188,7 @@ struct dml2_core_calcs_calculate_tdlut_setting_params {
unsigned int *tdlut_groups_per_2row_ub;
double *tdlut_opt_time;
double *tdlut_drain_time;
+ unsigned int *tdlut_bytes_to_deliver;
unsigned int *tdlut_bytes_per_group;
};
@@ -2020,6 +2216,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params {
double *surface_read_bandwidth_c;
double *prefetch_bandwidth_l;
double *prefetch_bandwidth_c;
+ double *prefetch_bandwidth_max;
double *excess_vactive_fill_bw_l;
double *excess_vactive_fill_bw_c;
double *cursor_bw;
@@ -2056,7 +2253,7 @@ struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params
unsigned int *swath_width_c;
unsigned int *swath_height_l;
unsigned int *swath_height_c;
- double latency_to_hide_us;
+ double latency_to_hide_us[DML2_MAX_PLANES];
/* outputs */
unsigned int *bytes_required_l;
@@ -2124,6 +2321,7 @@ struct dml2_core_calcs_mode_support_ex {
const struct dml2_display_cfg *in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
+ enum dml2_project_id project_id;
//unsigned int in_state_index;
struct dml2_core_internal_mode_support_info *out_evaluation_info;
};
@@ -2136,8 +2334,8 @@ struct dml2_core_calcs_mode_programming_ex {
const struct dml2_mcg_min_clock_table *min_clk_table;
const struct core_display_cfg_support_info *cfg_support_info;
int min_clk_index;
+ enum dml2_project_id project_id;
struct dml2_display_cfg_programming *programming;
-
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
index 1548dfc68b8e..b57d0f6ea6a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
@@ -82,7 +82,7 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -145,7 +145,7 @@ bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -208,7 +208,7 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
val = 1;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -216,104 +216,106 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ if (!fail_only || (support->global_dram_clock_change_supported == 0 && support->global_dram_clock_change_support_required))
+ DML_LOG_VERBOSE("DML: support: dram_clock_change_support = %d\n", support->global_dram_clock_change_supported);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
@@ -358,9 +360,9 @@ void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_di
out_bpp[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
#endif
}
}
@@ -391,7 +393,7 @@ unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
#endif
return num_active_pipes;
}
@@ -452,7 +454,7 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
};
}
@@ -464,7 +466,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
{
- return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
+ return sw_mode == dml2_sw_linear;
};
@@ -498,8 +500,8 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -511,7 +513,7 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -524,8 +526,8 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
#endif
return index;
}
@@ -533,31 +535,32 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
- dml2_assert(clk_entry_found);
+ if (!clk_entry_found)
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
- if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha))
+ ret_val = true;
return ret_val;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
index 95f0d017add4..95f0d017add4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 8a78b9adfc62..22969a533a7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -212,7 +212,7 @@ static bool add_margin_and_round_to_dfs_grainularity(double clock_khz, double ma
clock_khz *= 1.0 + margin;
- divider = (unsigned int)(DFS_DIVIDER_RANGE_SCALE_FACTOR * (vco_freq_khz / clock_khz));
+ divider = (unsigned int)((int)DFS_DIVIDER_RANGE_SCALE_FACTOR * (vco_freq_khz / clock_khz));
/* we want to floor here to get higher clock than required rather than lower */
if (divider < DFS_DIVIDER_RANGE_2_START) {
@@ -307,8 +307,8 @@ static bool map_soc_min_clocks_to_dpm_fine_grained(struct dml2_display_cfg_progr
/* these clocks are optional, so they can fail to map, in which case map all to 0 */
if (result) {
if (!round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz, &state_table->dcfclk) ||
- !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz, &state_table->fclk) ||
- !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz, &state_table->uclk)) {
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz, &state_table->fclk) ||
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz, &state_table->uclk)) {
display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz = 0;
display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz = 0;
display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz = 0;
@@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk);
- if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk);
-
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (result)
result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk);
@@ -417,11 +414,11 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *display_config, int mask)
{
- unsigned char i;
+ unsigned int i;
bool identical = true;
bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->num_streams; i++) {
@@ -456,10 +453,10 @@ static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *displa
static int find_smallest_idle_time_in_vblank_us(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int mask)
{
- unsigned char i;
+ unsigned int i;
int min_idle_us = 0;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
// Create a remap array to enable simple iteration through only masked stream indicies
@@ -754,6 +751,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -768,6 +767,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index b165c58dfd11..e7b58f2efda4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
@@ -11,6 +11,4 @@ bool dpmm_dcn3_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_i
bool dpmm_dcn4_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
-bool dpmm_dcn4_unit_test(void);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index 3861bc6c9621..dfd01440737d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -20,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
index 20ba2e446f1d..20ba2e446f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index f4b1a7d02d42..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
@@ -182,6 +182,10 @@ static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_
min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+ min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dtbclk = (unsigned int)((double)min_table->max_clocks_khz.dtbclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+
min_table->max_clocks_khz.dcfclk = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1];
min_table->max_clocks_khz.fclk = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index 02da6f45cbf7..f54fde8fba90 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -10,4 +10,4 @@
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool mcg_dcn4_unit_test(void);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
index c60b8fe90819..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
index ad307deca3b0..ad307deca3b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index a31db5742675..1b9579a32ff2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -48,18 +48,19 @@ static void set_reserved_time_on_all_planes_with_stream_index(struct display_con
static void remove_duplicates(double *list_a, int *list_a_size)
{
- int cur_element = 0;
- // For all elements b[i] in list_b[]
- while (cur_element < *list_a_size - 1) {
- if (list_a[cur_element] == list_a[cur_element + 1]) {
- for (int j = cur_element + 1; j < *list_a_size - 1; j++) {
- list_a[j] = list_a[j + 1];
- }
- *list_a_size = *list_a_size - 1;
- } else {
- cur_element++;
+ int j = 0;
+
+ if (*list_a_size == 0)
+ return;
+
+ for (int i = 1; i < *list_a_size; i++) {
+ if (list_a[j] != list_a[i]) {
+ j++;
+ list_a[j] = list_a[i];
}
}
+
+ *list_a_size = j + 1;
}
static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
@@ -195,11 +196,11 @@ static int count_planes_with_stream_index(const struct dml2_display_cfg *display
static bool are_timings_trivially_synchronizable(struct display_configuation_with_meta *display_config, int mask)
{
- unsigned char i;
+ unsigned int i;
bool identical = true;
bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->display_config.num_streams; i++) {
@@ -347,8 +348,12 @@ static int find_highest_odm_load_stream_index(
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
+ if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
+ odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
+ else
+ odm_load = 0;
+
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
index f00bd9e72a86..f00bd9e72a86 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 1efbc0329f85..c26e100fcaf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -642,6 +642,11 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
int i = 0;
struct dml2_pmo_instance *pmo = in_out->instance;
+ unsigned int base_list_size = 0;
+ const struct dml2_pmo_pstate_strategy *base_list = NULL;
+ unsigned int *expanded_list_size = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_list = NULL;
+
pmo->soc_bb = in_out->soc_bb;
pmo->ip_caps = in_out->ip_caps;
pmo->mpc_combine_limit = 2;
@@ -656,53 +661,71 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
pmo->options = in_out->options;
/* generate permutations of p-state configs from base strategy list */
- for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
- switch (i) {
+ for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) {
+ switch (i+1) {
case 1:
- DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_1_display,
- base_strategy_list_1_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_1_display;
+ base_list_size = base_strategy_list_1_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display;
+
break;
case 2:
- DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_2_display,
- base_strategy_list_2_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_2_display;
+ base_list_size = base_strategy_list_2_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display;
+
break;
case 3:
- DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_3_display,
- base_strategy_list_3_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_3_display;
+ base_list_size = base_strategy_list_3_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display;
+
break;
case 4:
- DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_4_display,
- base_strategy_list_4_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_4_display;
+ base_list_size = base_strategy_list_4_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display;
+
break;
}
+
+ DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+
+ /* populate list */
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_list,
+ base_list_size,
+ i + 1,
+ expanded_list,
+ expanded_list_size);
}
return true;
@@ -813,8 +836,12 @@ static int find_highest_odm_load_stream_index(
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
+ if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
+ odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
+ else
+ odm_load = 0;
+
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
@@ -986,7 +1013,7 @@ static bool all_timings_support_vactive(const struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_config,
unsigned int mask)
{
- unsigned char i;
+ unsigned int i;
bool valid = true;
// Create a remap array to enable simple iteration through only masked stream indicies
@@ -1022,45 +1049,45 @@ static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo,
return synchronizable;
}
-static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta)
+static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta)
{
- return fams2_meta->contention_delay_otg_vlines +
- fams2_meta->method_subvp.programming_delay_otg_vlines +
- fams2_meta->method_subvp.phantom_vtotal +
- fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- fams2_meta->dram_clk_change_blackout_otg_vlines;
+ return pstate_meta->contention_delay_otg_vlines +
+ pstate_meta->method_subvp.programming_delay_otg_vlines +
+ pstate_meta->method_subvp.phantom_vtotal +
+ pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ pstate_meta->blackout_otg_vlines;
}
static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_config,
unsigned int mask)
{
- unsigned char i;
+ unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
const struct dml2_stream_parameters *stream_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (!stream_descriptor->timing.drr_config.enabled)
return false;
/* cannot support required vtotal */
- if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) {
+ if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) {
return false;
}
/* check rr is within bounds */
- if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
+ if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
return false;
}
/* check required stretch is allowed */
if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 &&
- stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
+ stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > (int)stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
return false;
}
}
@@ -1075,15 +1102,24 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
{
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_plane_parameters *plane_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
unsigned int microschedule_vlines;
- unsigned char i;
+ unsigned int i;
+ unsigned int mcaches_per_plane;
+ unsigned int total_mcaches_required = 0;
unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
/* confirm timing it is not a centered timing */
for (i = 0; i < display_config->display_config.num_planes; i++) {
plane_descriptor = &display_config->display_config.plane_descriptors[i];
+ mcaches_per_plane = 0;
+
+ if (plane_descriptor->surface.dcc.enable) {
+ mcaches_per_plane += display_config->stage2.mcache_allocations[i].num_mcaches_plane0 +
+ display_config->stage2.mcache_allocations[i].num_mcaches_plane1 -
+ (display_config->stage2.mcache_allocations[i].last_slice_sharing.plane0_plane1 ? 1 : 0);
+ }
if (is_bit_set_in_bitfield(mask, (unsigned char)plane_descriptor->stream_index)) {
num_planes_per_stream[plane_descriptor->stream_index]++;
@@ -1094,19 +1130,30 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
plane_descriptor->composition.rotation_angle != dml2_rotation_0) {
return false;
}
+
+ /* phantom requires same number of mcaches as main */
+ if (plane_descriptor->surface.dcc.enable) {
+ mcaches_per_plane *= 2;
+ }
}
+ total_mcaches_required += mcaches_per_plane;
+ }
+
+ if (total_mcaches_required > pmo->soc_bb->num_dcc_mcaches) {
+ /* too many mcaches required */
+ return false;
}
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (stream_descriptor->overrides.disable_subvp) {
return false;
}
- microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]);
+ microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]);
/* block if using an interlaced timing */
if (stream_descriptor->timing.interlaced) {
@@ -1117,8 +1164,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
* 2) refresh rate must be within the allowed bounds
*/
if (microschedule_vlines >= stream_descriptor->timing.v_active ||
- (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
+ (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
return false;
}
}
@@ -1194,7 +1241,7 @@ static enum dml2_uclk_pstate_change_strategy pstate_method_to_uclk_pstate_strate
static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pstate_method method)
{
- unsigned char i;
+ unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(plane_mask, i)) {
@@ -1208,43 +1255,43 @@ static bool all_planes_match_method(const struct display_configuation_with_meta
}
static void build_method_scheduling_params(
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta,
- struct dml2_fams2_meta *stream_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta,
+ struct dml2_pstate_meta *stream_pstate_meta)
{
- stream_method_fams2_meta->allow_time_us =
- (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) *
- stream_fams2_meta->otg_vline_time_us;
- if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) {
+ stream_method_pstate_meta->allow_time_us =
+ (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) *
+ stream_pstate_meta->otg_vline_time_us;
+ if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) {
/* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/
- stream_method_fams2_meta->disallow_time_us = 0.0;
+ stream_method_pstate_meta->disallow_time_us = 0.0;
} else {
- stream_method_fams2_meta->disallow_time_us =
- stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us;
+ stream_method_pstate_meta->disallow_time_us =
+ stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us;
}
}
-static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
+static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
enum dml2_pstate_method stream_pstate_method,
int stream_idx)
{
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL;
switch (stream_pstate_method) {
case dml2_pstate_method_vactive:
case dml2_pstate_method_fw_vactive_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common;
break;
case dml2_pstate_method_vblank:
case dml2_pstate_method_fw_vblank_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common;
break;
case dml2_pstate_method_fw_svp:
case dml2_pstate_method_fw_svp_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common;
break;
case dml2_pstate_method_fw_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common;
break;
case dml2_pstate_method_reserved_hw:
case dml2_pstate_method_reserved_fw:
@@ -1253,10 +1300,10 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
case dml2_pstate_method_count:
case dml2_pstate_method_na:
default:
- stream_method_fams2_meta = NULL;
+ stream_method_pstate_meta = NULL;
}
- return stream_method_fams2_meta;
+ return stream_method_pstate_meta;
}
static bool is_timing_group_schedulable(
@@ -1264,10 +1311,10 @@ static bool is_timing_group_schedulable(
const struct display_configuation_with_meta *display_cfg,
const struct dml2_pmo_pstate_strategy *pstate_strategy,
const unsigned int timing_group_idx,
- struct dml2_fams2_per_method_common_meta *group_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *group_pstate_meta)
{
unsigned int i;
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta;
unsigned int base_stream_idx = 0;
struct dml2_pmo_scratch *s = &pmo->scratch;
@@ -1281,31 +1328,31 @@ static bool is_timing_group_schedulable(
}
/* init allow start and end lines for timing group */
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
+ if (!stream_method_pstate_meta)
return false;
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
- group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
+ group_pstate_meta->period_us = stream_method_pstate_meta->period_us;
for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
+ if (!stream_method_pstate_meta)
continue;
- if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
}
- if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) {
/* set group allow end to smaller otg vline */
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
}
/* check waveform still has positive width */
- if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) {
/* timing group is not schedulable */
return false;
}
@@ -1313,10 +1360,10 @@ static bool is_timing_group_schedulable(
}
/* calculate the rest of the meta */
- build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]);
+ build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]);
- return group_fams2_meta->allow_time_us > 0.0 &&
- group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
+ return group_pstate_meta->allow_time_us > 0.0 &&
+ group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
}
static bool is_config_schedulable(
@@ -1330,7 +1377,7 @@ static bool is_config_schedulable(
double max_allow_delay_us = 0.0;
- memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta));
+ memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta));
memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES);
/* search for a general solution to the schedule */
@@ -1345,12 +1392,12 @@ static bool is_config_schedulable(
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
- if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
+ if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) {
/* synchronized timing group was not schedulable */
schedulable = false;
break;
}
- max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us;
+ max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us;
}
if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) {
@@ -1367,12 +1414,12 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
- double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
+ double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
+ double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
- s->pmo_dcn4.sorted_group_gtl_disallow_index[j+1]);
+ s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]);
swapped = true;
}
}
@@ -1386,19 +1433,19 @@ static bool is_config_schedulable(
* other display, or when >2 streams continue to halve the remaining allow time.
*/
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
- if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) {
+ if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) {
/* this timing group always allows */
continue;
}
- double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us;
+ double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us;
for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) {
unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
/* stream can't overlap itself */
- if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) {
+ if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) {
max_allow_time_us = math_min2(
- s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us,
- (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2);
+ s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us,
+ (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2);
if (max_allow_time_us < 0.0) {
/* failed exit early */
@@ -1426,12 +1473,12 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
- double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
+ double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
+ double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
if (j_period_us < jp1_period_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
- s->pmo_dcn4.sorted_group_gtl_period_index[j+1]);
+ s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]);
swapped = true;
}
}
@@ -1446,7 +1493,7 @@ static bool is_config_schedulable(
unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i];
unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
- if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
+ if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us ||
(s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
schedulable = false;
break;
@@ -1468,18 +1515,18 @@ static bool is_config_schedulable(
/* default period_0 > period_1 */
unsigned int lrg_idx = 0;
unsigned int sml_idx = 1;
- if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) {
+ if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) {
/* period_0 < period_1 */
lrg_idx = 1;
sml_idx = 0;
}
- period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us;
- shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
- max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us;
- max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us;
+ period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us;
+ shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
+ max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us;
+ max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us;
if (shift_per_period > 0.0 &&
- shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us &&
+ shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us &&
max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
schedulable = true;
}
@@ -1545,7 +1592,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
{
struct dml2_pmo_scratch *s = &pmo->scratch;
- unsigned char stream_index = 0;
+ unsigned int stream_index = 0;
unsigned int svp_count = 0;
unsigned int svp_stream_mask = 0;
@@ -1609,7 +1656,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
- unsigned char i;
+ unsigned int i;
int min_vactive_margin_us = 0xFFFFFFF;
for (i = 0; i < DML2_MAX_PLANES; i++) {
@@ -1622,22 +1669,22 @@ static int get_vactive_pstate_margin(const struct display_configuation_with_meta
return min_vactive_margin_us;
}
-static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
+static int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
unsigned char i;
- unsigned int max_vactive_fill_us = 0;
+ int max_vactive_fill_us = 0;
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(plane_mask, i)) {
- if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us > max_vactive_fill_us)
- max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us;
+ if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk] > max_vactive_fill_us)
+ max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk];
}
}
return max_vactive_fill_us;
}
-static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
+static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo,
struct display_configuation_with_meta *display_config,
int stream_index)
{
@@ -1645,7 +1692,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index];
const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index];
const struct dml2_timing_cfg *timing = &stream_descriptor->timing;
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
/* worst case all other streams require some programming at the same time, 0 if only 1 stream */
unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us +
@@ -1653,142 +1700,142 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
(display_config->display_config.num_streams - 1);
/* common */
- stream_fams2_meta->valid = true;
- stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
- stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
- stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->nom_vtotal * timing->h_total);
- stream_fams2_meta->nom_frame_time_us =
- (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
- stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
+ stream_pstate_meta->valid = true;
+ stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
+ stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
+ stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->nom_vtotal * timing->h_total);
+ stream_pstate_meta->nom_frame_time_us =
+ (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us;
+ stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active;
if (stream_descriptor->timing.drr_config.enabled == true) {
if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9);
} else {
/* assume min of 48Hz */
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
(48000000.0 * stream_descriptor->timing.h_total) * 1e9);
}
} else {
- stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal;
- }
- stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->max_vtotal * timing->h_total);
- stream_fams2_meta->max_frame_time_us =
- (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us;
-
- stream_fams2_meta->scheduling_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->contention_delay_otg_vlines =
- (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal;
+ }
+ stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->max_vtotal * timing->h_total);
+ stream_pstate_meta->max_frame_time_us =
+ (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us;
+
+ stream_pstate_meta->scheduling_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->contention_delay_otg_vlines =
+ (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us);
/* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */
- stream_fams2_meta->allow_to_target_delay_otg_vlines =
- (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1;
- stream_fams2_meta->min_allow_width_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->allow_to_target_delay_otg_vlines =
+ (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1;
+ stream_pstate_meta->min_allow_width_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us);
/* this value should account for urgent latency */
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines =
+ stream_pstate_meta->blackout_otg_vlines =
(unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us /
- stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->otg_vline_time_us);
/* scheduling params should be built based on the worst case for allow_time:disallow_time */
/* vactive */
if (display_config->display_config.num_streams == 1) {
/* for single stream, guarantee at least an instant of allow */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
math_max2(0.0,
- timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
+ timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines));
} else {
/* for multi stream, bound to a max fill time defined by IP caps */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
- (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
+ (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us);
}
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us;
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us;
- if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
- timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline =
+ timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
} else {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0;
}
- stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
+ stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta);
/* vblank */
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
- stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
- stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta);
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start;
+ stream_pstate_meta->method_vblank.common.allow_end_otg_vline =
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1;
+ stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta);
/* subvp */
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.phantom_vactive =
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.phantom_vactive =
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
stream_info->phantom_min_v_active;
- stream_fams2_meta->method_subvp.phantom_vfp =
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.phantom_vfp =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines;
/* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/
- stream_fams2_meta->method_subvp.phantom_vtotal =
+ stream_pstate_meta->method_subvp.phantom_vtotal =
stream_info->phantom_v_startup +
- stream_fams2_meta->method_subvp.phantom_vfp +
+ stream_pstate_meta->method_subvp.phantom_vfp +
1 +
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vactive;
- stream_fams2_meta->method_subvp.common.allow_start_otg_vline =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_pstate_meta->method_subvp.common.allow_start_otg_vline =
stream_descriptor->timing.v_blank_end +
- stream_fams2_meta->contention_delay_otg_vlines +
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vtotal +
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
+ stream_pstate_meta->contention_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vtotal +
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
+ stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta);
/* drr */
- stream_fams2_meta->method_drr.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_drr.common.allow_start_otg_vline =
- stream_fams2_meta->vblank_start +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_drr.common.allow_start_otg_vline =
+ stream_pstate_meta->vblank_start +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us;
if (display_config->display_config.num_streams <= 1) {
/* only need to stretch vblank for blackout time */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
} else {
/* multi display needs to always be schedulable */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal * 2 +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- }
- stream_fams2_meta->method_drr.common.allow_end_otg_vline =
- stream_fams2_meta->method_drr.stretched_vtotal -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta);
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal * 2 +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
+ }
+ stream_pstate_meta->method_drr.common.allow_end_otg_vline =
+ stream_pstate_meta->method_drr.stretched_vtotal -
+ stream_pstate_meta->blackout_otg_vlines;
+ build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta);
}
static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
@@ -1796,14 +1843,14 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
int stream_index)
{
struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
stream_svp_meta->valid = true;
/* PMO FAMS2 precaulcates these values */
- stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive;
- stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp;
- stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal;
+ stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp;
+ stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal;
}
bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
@@ -1817,7 +1864,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
struct dml2_pmo_pstate_strategy override_base_strategy = { 0 };
unsigned int strategy_list_size = 0;
- unsigned char plane_index, stream_index, i;
+ unsigned int plane_index, stream_index, i;
bool build_override_strategy = true;
state->performed = true;
@@ -1855,7 +1902,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
/* FAMS2 meta */
- build_fams2_meta_per_stream(pmo, display_config, stream_index);
+ build_pstate_meta_per_stream(pmo, display_config, stream_index);
/* SVP meta */
build_subvp_meta_per_stream(pmo, display_config, stream_index);
@@ -1915,9 +1962,6 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
display_config->stage3.stream_svp_meta[stream_index].valid = false;
-
- display_config->display_config.stream_descriptors[stream_index].overrides.minimize_active_latency_hiding = false;
- display_config->display_config.overrides.best_effort_min_active_latency_hiding_us = 0;
}
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1940,7 +1984,7 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1950,7 +1994,6 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
-
}
}
}
@@ -1961,7 +2004,7 @@ static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *
{
struct dml2_pmo_scratch *scratch = &pmo->scratch;
- unsigned char plane_index;
+ unsigned int plane_index;
int stream_index = -1;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1984,7 +2027,7 @@ static void setup_planes_for_svp_drr_by_mask(struct display_configuation_with_me
{
struct dml2_pmo_scratch *scratch = &pmo->scratch;
- unsigned char plane_index;
+ unsigned int plane_index;
int stream_index = -1;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -2005,7 +2048,7 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -2016,7 +2059,6 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
plane->overrides.reserved_vblank_time_ns);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
-
}
}
}
@@ -2025,12 +2067,13 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
plane = &display_config->display_config.plane_descriptors[plane_index];
+
plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
@@ -2042,7 +2085,7 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
unsigned int stream_index;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -2052,8 +2095,8 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
- display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2063,7 +2106,7 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
unsigned int stream_index;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -2073,8 +2116,8 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
- display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2120,9 +2163,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
/* copy FAMS2 meta */
if (success) {
display_config->stage3.fams2_required = fams2_required;
- memcpy(&display_config->stage3.stream_fams2_meta,
- &scratch->pmo_dcn4.stream_fams2_meta,
- sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES);
+ memcpy(&display_config->stage3.stream_pstate_meta,
+ &scratch->pmo_dcn4.stream_pstate_meta,
+ sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES);
}
return success;
@@ -2131,7 +2174,7 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
{
int min_time_us = 0xFFFFFF;
- unsigned char plane_index = 0;
+ unsigned int plane_index = 0;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
@@ -2164,12 +2207,12 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
return false;
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
- struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index];
if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
- get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
+ get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 6baab7ad6ecc..6baab7ad6ecc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 7ed0242a4b33..55d2464365d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -26,7 +26,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
index 7218de1824cc..b90f6263cd85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -10,4 +10,4 @@
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
index e17b5ceba447..e17b5ceba447 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
index e13b0c5939b0..e13b0c5939b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
index 5f6dfc24df69..5a33e2f357f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
@@ -15,7 +15,6 @@ bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
{
switch (in_out->options.project_id) {
case dml2_project_dcn4x_stage1:
- return false;
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
return dml2_top_soc15_initialize_instance(in_out);
@@ -48,4 +47,3 @@ bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *
return in_out->dml2_instance->funcs.build_mcache_programming(in_out);
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
new file mode 100644
index 000000000000..5e14d85821e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_top_legacy.h"
+#include "dml2_top_soc15.h"
+#include "dml2_core_factory.h"
+#include "dml2_pmo_factory.h"
+#include "display_mode_core_structs.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
index 14d0ae03dce6..14d0ae03dce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
index b39029c0e56f..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
@@ -545,6 +545,7 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi
if (odm_combine_factor > 1) {
max_per_pipe_vp_p0 = plane->surface.plane0.width;
temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane0.h_ratio * stream->timing.h_active / odm_combine_factor);
+
if (temp < max_per_pipe_vp_p0)
max_per_pipe_vp_p0 = temp;
@@ -760,7 +761,7 @@ bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache
total_mcaches_required--;
}
}
- dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
+ DML_LOG_VERBOSE("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
result = false;
@@ -830,7 +831,6 @@ static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_program
bool uclk_pstate_success = false;
bool vmin_success = false;
bool stutter_success = false;
- unsigned int i;
memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
@@ -976,13 +976,6 @@ static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_program
}
/*
- * Populate mcache programming
- */
- for (i = 0; i < in_out->display_config->num_planes; i++) {
- in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
- }
-
- /*
* Call DPMM to map all requirements to minimum clock state
*/
if (result) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
index 6fda201af898..53bd8602f9ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
//
// Copyright 2024 Advanced Micro Devices, Inc.
+
#ifndef __DML2_TOP_SOC15_H__
#define __DML2_TOP_SOC15_H__
#include "dml2_internal_shared_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
new file mode 100644
index 000000000000..611c80f4f1bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_DEBUG_H__
+#define __DML2_DEBUG_H__
+
+#include "os_types.h"
+#define DML_ASSERT(condition) ASSERT(condition)
+#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
+#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
+
+/* private helper macros */
+#define _BOOL_FORMAT(field) "%s", field ? "true" : "false"
+#define _UINT_FORMAT(field) "%u", field
+#define _INT_FORMAT(field) "%d", field
+#define _DOUBLE_FORMAT(field) "%lf", field
+#define _ELEMENT_FUNC "function"
+#define _ELEMENT_COMP_IF "component_interface"
+#define _ELEMENT_TOP_IF "top_interface"
+#define _LOG_ENTRY(element) do { \
+ DML_LOG_INTERNAL("<"element" name=\""); \
+ DML_LOG_INTERNAL(__func__); \
+ DML_LOG_INTERNAL("\">\n"); \
+} while (0)
+#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n")
+#define _LOG_SCALAR(field, format) do { \
+ DML_LOG_INTERNAL(#field" = "format(field)); \
+ DML_LOG_INTERNAL("\n"); \
+} while (0)
+#define _LOG_ARRAY(field, size, format) do { \
+ DML_LOG_INTERNAL(#field " = ["); \
+ for (int _i = 0; _i < (int) size; _i++) { \
+ DML_LOG_INTERNAL(format(field[_i])); \
+ if (_i + 1 == (int) size) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+}} while (0)
+#define _LOG_2D_ARRAY(field, size0, size1, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j])); \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
+#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL("["); \
+ for (int _k = 0; _k < (int) size2; _k++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j][_k])); \
+ if (_k + 1 == (int) size2) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
+
+/* fatal errors for unrecoverable DML states until a full reset */
+#define DML_LOG_LEVEL_FATAL 0
+/* unexpected but recoverable failures inside DML */
+#define DML_LOG_LEVEL_ERROR 1
+/* unexpected inputs or events to DML */
+#define DML_LOG_LEVEL_WARN 2
+/* high level tracing of DML interfaces */
+#define DML_LOG_LEVEL_INFO 3
+/* tracing of DML internal executions */
+#define DML_LOG_LEVEL_DEBUG 4
+/* detailed tracing of DML calculation procedure */
+#define DML_LOG_LEVEL_VERBOSE 5
+
+#ifndef DML_LOG_LEVEL
+#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
+#endif /* #ifndef DML_LOG_LEVEL */
+
+/* public macros for DML_LOG_LEVEL_FATAL and up */
+#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_ERROR and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
+#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
+#define DML_ASSERT_MSG(condition, fmt, ...) \
+ do { \
+ if (!(condition)) { \
+ DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \
+ DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
+ DML_ASSERT(condition); \
+ } \
+ } while (0)
+#else
+#define DML_LOG_ERROR(fmt, ...) ((void)0)
+#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0)
+#endif
+
+/* public macros for DML_LOG_LEVEL_WARN and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
+#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_WARN(fmt, ...) ((void)0)
+#endif
+
+/* public macros for DML_LOG_LEVEL_INFO and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
+#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
+#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF)
+#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF)
+#else
+#define DML_LOG_INFO(fmt, ...) ((void)0)
+#define DML_LOG_TOP_IF_ENTER() ((void)0)
+#define DML_LOG_TOP_IF_EXIT() ((void)0)
+#endif
+
+/* public macros for DML_LOG_LEVEL_DEBUG and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
+#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
+#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF)
+#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF)
+#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC)
+#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC)
+#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT)
+#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT)
+#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT)
+#else
+#define DML_LOG_DEBUG(fmt, ...) ((void)0)
+#define DML_LOG_COMP_IF_ENTER() ((void)0)
+#define DML_LOG_COMP_IF_EXIT() ((void)0)
+#define DML_LOG_FUNC_ENTER() ((void)0)
+#define DML_LOG_FUNC_EXIT() ((void)0)
+#define DML_LOG_DEBUG_BOOL(field) ((void)0)
+#define DML_LOG_DEBUG_UINT(field) ((void)0)
+#define DML_LOG_DEBUG_INT(field) ((void)0)
+#define DML_LOG_DEBUG_DOUBLE(field) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0)
+#endif
+
+/* public macros for DML_LOG_LEVEL_VERBOSE */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
+#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
+#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */
+#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
index d94b310d6eec..1a6c0727cd2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
@@ -38,6 +38,12 @@ struct dml2_mcg_min_clock_table {
} max_clocks_khz;
struct {
+ unsigned int dispclk;
+ unsigned int dppclk;
+ unsigned int dtbclk;
+ } max_ss_clocks_khz;
+
+ struct {
unsigned int dprefclk;
unsigned int xtalclk;
unsigned int pcierefclk;
@@ -64,7 +70,6 @@ struct dml2_mcg_build_min_clock_table_params_in_out {
};
struct dml2_mcg_instance {
bool (*build_min_clock_table)(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
- bool (*unit_test)(void);
};
/*
@@ -110,7 +115,6 @@ struct dml2_dpmm_scratch {
struct dml2_dpmm_instance {
bool (*map_mode_to_soc_dpm)(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool (*map_watermarks)(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
- bool (*unit_test)(void);
struct dml2_dpmm_scratch dpmm_scratch;
};
@@ -148,7 +152,7 @@ struct core_plane_support_info {
int active_latency_hiding_us;
int mall_svp_size_requirement_ways;
int nominal_vblank_pstate_latency_hiding_us;
- unsigned int dram_change_vactive_det_fill_delay_us;
+ int vactive_det_fill_delay_us[dml2_pstate_type_count];
};
struct core_stream_support_info {
@@ -198,11 +202,14 @@ struct dml2_core_mode_support_result {
} active;
unsigned int dispclk_khz;
+ unsigned int dpprefclk_khz;
+ unsigned int dtbrefclk_khz;
unsigned int dcfclk_deepsleep_khz;
unsigned int socclk_khz;
unsigned int uclk_pstate_supported;
unsigned int fclk_pstate_supported;
+ struct dml2_core_internal_watermarks watermarks;
} global;
struct {
@@ -249,56 +256,70 @@ struct dml2_implicit_svp_meta {
unsigned long v_front_porch;
};
-struct dml2_fams2_per_method_common_meta {
+struct dml2_pstate_per_method_common_meta {
/* generic params */
- unsigned int allow_start_otg_vline;
- unsigned int allow_end_otg_vline;
+ int allow_start_otg_vline;
+ int allow_end_otg_vline;
/* scheduling params */
double allow_time_us;
double disallow_time_us;
double period_us;
};
-struct dml2_fams2_meta {
+struct dml2_pstate_meta {
bool valid;
double otg_vline_time_us;
- unsigned int scheduling_delay_otg_vlines;
- unsigned int vertical_interrupt_ack_delay_otg_vlines;
- unsigned int allow_to_target_delay_otg_vlines;
- unsigned int contention_delay_otg_vlines;
- unsigned int min_allow_width_otg_vlines;
- unsigned int nom_vtotal;
- unsigned int vblank_start;
+ int scheduling_delay_otg_vlines;
+ int vertical_interrupt_ack_delay_otg_vlines;
+ int allow_to_target_delay_otg_vlines;
+ int contention_delay_otg_vlines;
+ int min_allow_width_otg_vlines;
+ int nom_vtotal;
+ int vblank_start;
double nom_refresh_rate_hz;
double nom_frame_time_us;
- unsigned int max_vtotal;
+ int max_vtotal;
double min_refresh_rate_hz;
double max_frame_time_us;
- unsigned int dram_clk_change_blackout_otg_vlines;
+ int blackout_otg_vlines;
+ int max_allow_delay_otg_vlines;
+ double nom_vblank_time_us;
struct {
double max_vactive_det_fill_delay_us;
- unsigned int max_vactive_det_fill_delay_otg_vlines;
- struct dml2_fams2_per_method_common_meta common;
+ double vactive_latency_hiding_us;
+ double reserved_vblank_required_us;
+ int max_vactive_det_fill_delay_otg_vlines;
+ int reserved_blank_required_vlines;
+ struct dml2_pstate_per_method_common_meta common;
} method_vactive;
struct {
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_vblank;
struct {
- unsigned int programming_delay_otg_vlines;
- unsigned int df_throttle_delay_otg_vlines;
- unsigned int prefetch_to_mall_delay_otg_vlines;
+ int programming_delay_otg_vlines;
+ int df_throttle_delay_otg_vlines;
+ int prefetch_to_mall_delay_otg_vlines;
unsigned long phantom_vactive;
unsigned long phantom_vfp;
unsigned long phantom_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_subvp;
struct {
- unsigned int programming_delay_otg_vlines;
- unsigned int stretched_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ int programming_delay_otg_vlines;
+ int stretched_vtotal;
+ struct dml2_pstate_per_method_common_meta common;
} method_drr;
};
+/* mask of synchronized timings by stream index */
+struct dml2_pmo_synchronized_timing_groups {
+ unsigned int num_timing_groups;
+ unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES];
+ bool group_is_drr_enabled[DML2_MAX_PLANES];
+ bool group_is_drr_active[DML2_MAX_PLANES];
+ double group_line_time_us[DML2_MAX_PLANES];
+};
+
struct dml2_optimization_stage3_state {
bool performed;
bool success;
@@ -313,7 +334,7 @@ struct dml2_optimization_stage3_state {
// Meta-data for FAMS2
bool fams2_required;
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
int min_clk_index_for_latency;
};
@@ -357,8 +378,6 @@ struct dml2_pmo_pstate_strategy {
enum dml2_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
bool allow_state_increase;
};
-
-
struct dml2_core_mode_support_in_out {
/*
* Inputs
@@ -444,13 +463,17 @@ struct dml2_core_internal_state_intermediates {
};
struct dml2_core_mode_support_locals {
- struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ union {
+ struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params;
};
struct dml2_core_mode_programming_locals {
- struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ union {
+ struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
};
@@ -464,6 +487,7 @@ struct dml2_core_scratch {
};
struct dml2_core_instance {
+ enum dml2_project_id project_id;
struct dml2_mcg_min_clock_table *minimum_clock_table;
struct dml2_core_internal_state_inputs inputs;
struct dml2_core_internal_state_intermediates intermediates;
@@ -475,7 +499,6 @@ struct dml2_core_instance {
bool (*mode_programming)(struct dml2_core_mode_programming_in_out *in_out);
bool (*populate_informative)(struct dml2_core_populate_informative_in_out *in_out);
bool (*calculate_mcache_allocation)(struct dml2_calculate_mcache_allocation_in_out *in_out);
- bool (*unit_test)(void);
struct {
struct dml2_core_internal_display_mode_lib mode_lib;
@@ -612,6 +635,12 @@ struct dml2_pmo_optimize_for_stutter_in_out {
#define PMO_DCN4_MAX_NUM_VARIANTS 2
#define PMO_DCN4_MAX_BASE_STRATEGIES 10
+struct dml2_scheduling_check_locals {
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
+};
+
struct dml2_pmo_scratch {
union {
struct {
@@ -641,7 +670,7 @@ struct dml2_pmo_scratch {
// Stores all the implicit SVP meta information indexed by stream index of the display
// configuration under inspection, built at optimization stage init
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE];
unsigned int num_stutter_candidates;
@@ -656,7 +685,7 @@ struct dml2_pmo_scratch {
double group_line_time_us[DML2_MAX_PLANES];
/* scheduling check locals */
- struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
double group_phase_offset[DML2_MAX_PLANES];
@@ -723,8 +752,6 @@ struct dml2_pmo_instance {
bool (*test_for_stutter)(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool (*optimize_for_stutter)(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
- bool (*unit_test)(void);
-
struct dml2_pmo_init_data init_data;
struct dml2_pmo_scratch scratch;
};
@@ -949,7 +976,6 @@ struct dml2_top_funcs {
bool (*check_mode_supported)(struct dml2_check_mode_supported_in_out *in_out);
bool (*build_mode_programming)(struct dml2_build_mode_programming_in_out *in_out);
bool (*build_mcache_programming)(struct dml2_build_mcache_programming_in_out *in_out);
- bool (*unit_test)(void);
};
struct dml2_instance {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
index 1ed21c1b86a5..4cfe64aa8492 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
@@ -473,7 +473,6 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
{
bool sorted, swapped;
unsigned int cur_index;
- unsigned int temp;
int odm_slice_index;
for (odm_slice_index = 0; odm_slice_index < pipes->num_pipes_assigned_to_plane_for_odm_combine; odm_slice_index++) {
@@ -489,9 +488,8 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
swapped = false;
while (!sorted) {
if (pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] > pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]) {
- temp = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1] = temp;
+ swap(pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1],
+ pipes->pipes_assigned_to_plane[odm_slice_index][cur_index]);
swapped = true;
}
@@ -532,26 +530,6 @@ static void calculate_odm_slices(const struct dc_stream_state *stream, unsigned
odm_slice_end_x[odm_factor - 1] = stream->src.width - 1;
}
-static bool is_plane_in_odm_slice(const struct dc_plane_state *plane, unsigned int slice_index, unsigned int *odm_slice_end_x, unsigned int num_slices)
-{
- unsigned int slice_start_x, slice_end_x;
-
- if (slice_index == 0)
- slice_start_x = 0;
- else
- slice_start_x = odm_slice_end_x[slice_index - 1] + 1;
-
- slice_end_x = odm_slice_end_x[slice_index];
-
- if (plane->clip_rect.x + plane->clip_rect.width < slice_start_x)
- return false;
-
- if (plane->clip_rect.x > slice_end_x)
- return false;
-
- return true;
-}
-
static void add_odm_slice_to_odm_tree(struct dml2_context *ctx,
struct dc_state *state,
struct dc_pipe_mapping_scratch *scratch,
@@ -791,12 +769,6 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
sort_pipes_for_splitting(&scratch->pipe_pool);
for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) {
- // We build the tree for one ODM slice at a time.
- // Each ODM slice shares a common OPP
- if (!is_plane_in_odm_slice(plane, odm_slice_index, scratch->odm_info.odm_slice_end_x, scratch->odm_info.odm_factor)) {
- continue;
- }
-
// Now we have a list of all pipes to be used for this plane/stream, now setup the tree.
scratch->odm_info.next_higher_pipe_for_odm_slice[odm_slice_index] = add_plane_to_blend_tree(ctx, state,
plane,
@@ -1108,22 +1080,22 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
if (stream_disp_cfg_index >= disp_cfg_index_max)
continue;
- if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
- scratch.odm_info.odm_factor = 1;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
- scratch.odm_info.odm_factor = 2;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
- scratch.odm_info.odm_factor = 4;
- } else {
- ASSERT(false);
- scratch.odm_info.odm_factor = 1;
- }
-
+ if (ctx->architecture == dml2_architecture_20) {
+ if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
+ scratch.odm_info.odm_factor = 1;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
+ scratch.odm_info.odm_factor = 2;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
+ scratch.odm_info.odm_factor = 4;
+ } else {
+ ASSERT(false);
+ scratch.odm_info.odm_factor = 1;
+ }
+ } else if (ctx->architecture == dml2_architecture_21) {
/* After DML2.1 update, ODM interpretation needs to change and is no longer same as for DML2.0.
* This is not an issue with new resource management logic. This block ensure backcompat
* with legacy pipe management with updated DML.
* */
- if (ctx->architecture == dml2_architecture_21) {
if (ODMMode[stream_disp_cfg_index] == 1) {
scratch.odm_info.odm_factor = 1;
} else if (ODMMode[stream_disp_cfg_index] == 2) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
index 1538b708d8be..1538b708d8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
index 7ca7f2a743c2..7ca7f2a743c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
index 140ec01545db..55b3e3ca54f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
@@ -23,7 +23,7 @@
* Authors: AMD
*
*/
-
+
#ifndef __DML2_INTERNAL_TYPES_H__
#define __DML2_INTERNAL_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
index 3d29169dd6bb..66040c877d68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
@@ -24,6 +24,7 @@
*
*/
+
#include "dml2_dc_types.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -654,14 +655,14 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
unsigned int svp_height,
unsigned int svp_vstartup)
{
- unsigned int i, pipe_idx;
+ unsigned int i;
double line_time, fp_and_sync_width_time;
struct pipe_ctx *pipe;
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
static const double cvt_rb_vblank_max = ((double) 460 / (1000 * 1000));
// Find DML pipe index (pipe_idx) using dc_pipe_idx
- for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
+ for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &state->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -669,8 +670,6 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
if (i == dc_pipe_idx)
break;
-
- pipe_idx++;
}
// Calculate lines required for pstate allow width and FW processing delays
@@ -813,7 +812,7 @@ static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struc
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
@@ -868,7 +867,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
index 9d64851f54e7..9d64851f54e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
index c4c52173ef22..ef693f608d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
@@ -301,6 +301,7 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m
policy->AssumeModeSupportAtMaxPwrStateEvenDRAMClockChangeNotSupported = true; // TOREVIEW: What does this mean?
policy->AssumeModeSupportAtMaxPwrStateEvenFClockChangeNotSupported = true; // TOREVIEW: What does this mean?
if (project == dml_project_dcn35 ||
+ project == dml_project_dcn36 ||
project == dml_project_dcn351) {
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
policy->EnhancedPrefetchScheduleAccelerationFinal = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
index e83e05248592..e83e05248592 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
index b416320873e1..d834cb595afa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
@@ -107,6 +107,7 @@ void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, stru
case dml_project_dcn35:
case dml_project_dcn351:
+ case dml_project_dcn36:
out->rob_buffer_size_kbytes = 64;
out->config_return_buffer_size_in_kbytes = 1792;
out->compressed_buffer_segment_size_in_kbytes = 64;
@@ -292,6 +293,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
case dml_project_dcn35:
case dml_project_dcn351:
+ case dml_project_dcn36:
out->num_chans = 4;
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
@@ -299,6 +301,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
break;
+
case dml_project_dcn401:
out->pct_ideal_fabric_bw_after_urgent = 76; //67;
out->max_avg_sdp_bw_use_normal_percent = 75; //80;
@@ -422,6 +425,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->in_states->state_array[1].dcfclk_mhz = 1434.0;
p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock;
break;
+
+
case dml_project_dcn401:
p->in_states->num_states = 2;
transactions_per_mem_clock = 16;
@@ -506,6 +511,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->dcfclk_stas_mhz[3] = 1324;
p->dcfclk_stas_mhz[4] = p->in_states->state_array[1].dcfclk_mhz;
} else if (dml2->v20.dml_core_ctx.project != dml_project_dcn35 &&
+ dml2->v20.dml_core_ctx.project != dml_project_dcn36 &&
dml2->v20.dml_core_ctx.project != dml_project_dcn351) {
p->dcfclk_stas_mhz[0] = 300;
p->dcfclk_stas_mhz[1] = 615;
@@ -554,6 +560,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn36 ||
dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0,
max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0, max_socclk_mhz = 0;
@@ -786,7 +793,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
out->OutputEncoder[location] = dml_dp;
- if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ if (location < MAX_HPO_DP2_ENCODERS && dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
break;
case SIGNAL_TYPE_EDP:
@@ -892,7 +899,7 @@ static void populate_dummy_dml_surface_cfg(struct dml_surface_cfg_st *out, unsig
out->SurfaceWidthC[location] = in->timing.h_addressable;
out->SurfaceHeightC[location] = in->timing.v_addressable;
out->PitchY[location] = ((out->SurfaceWidthY[location] + 127) / 128) * 128;
- out->PitchC[location] = 0;
+ out->PitchC[location] = 1;
out->DCCEnable[location] = false;
out->DCCMetaPitchY[location] = 0;
out->DCCMetaPitchC[location] = 0;
@@ -949,6 +956,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
out->SourcePixelFormat[location] = dml_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
out->SourcePixelFormat[location] = dml_444_64;
@@ -969,7 +977,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
-static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out)
+static struct scaler_data *get_scaler_data_for_plane(
+ const struct dc_plane_state *in,
+ struct dc_state *context)
{
int i;
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
@@ -990,7 +1000,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
}
ASSERT(i < MAX_PIPES);
- memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
+ return &temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
@@ -1053,11 +1063,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
const struct dc_plane_state *in, struct dc_state *context,
const struct soc_bounding_box_st *soc)
{
- struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
- if (!scaler_data)
- return;
-
- get_scaler_data_for_plane(in, context, scaler_data);
+ struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context);
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
@@ -1122,8 +1128,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->DynamicMetadataTransmittedBytes[location] = 0;
out->NumberOfCursors[location] = 1;
-
- kfree(scaler_data);
}
static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,
@@ -1188,22 +1192,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2
return location;
}
-static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg)
-{
- int i;
-
- if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- ASSERT(state->stream_count == 1);
- dml_dispcfg->timing.DRRDisplay[0] = true;
- } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) {
-
- for (i = 0; i < dml_dispcfg->num_timings; i++) {
- if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id)
- dml_dispcfg->timing.DRRDisplay[i] = true;
- }
- }
-}
-
static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state)
{
unsigned int i;
@@ -1343,7 +1331,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_timings++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
@@ -1383,7 +1371,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
populate_dml_plane_cfg_from_plane_state(
@@ -1436,9 +1424,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
}
}
}
-
- if (!dml2->config.use_native_pstate_optimization)
- apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg);
}
void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
index d764773938f4..d764773938f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
index 9a33158b63bf..9a33158b63bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
index 04fcfe637119..04fcfe637119 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
index 340791d40ecb..9deb03a18ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
@@ -33,7 +33,6 @@
#include "dml2_dc_resource_mgmt.h"
#include "dml21_wrapper.h"
-
static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
if (dml2->config.use_native_soc_bb_construction)
@@ -72,6 +71,7 @@ static void map_hw_resources(struct dml2_context *dml2,
in_out_display_cfg->hw.NumberOfDSCSlices[i] = mode_support_info->NumberOfDSCSlices[i];
in_out_display_cfg->hw.DLGRefClkFreqMHz = 24;
if (dml2->v20.dml_core_ctx.project != dml_project_dcn35 &&
+ dml2->v20.dml_core_ctx.project != dml_project_dcn36 &&
dml2->v20.dml_core_ctx.project != dml_project_dcn351) {
/*dGPU default as 50Mhz*/
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
@@ -93,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2,
static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
const struct dml_display_cfg_st *display_cfg,
- struct dml_mode_support_info_st *evaluation_info)
+ struct dml_mode_support_info_st *evaluation_info,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
s->mode_support_params.in_display_cfg = display_cfg;
+ if (validate_mode == DC_VALIDATE_MODE_ONLY)
+ s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
+ else
+ s->mode_support_params.in_start_state_idx = 0;
s->mode_support_params.out_evaluation_info = evaluation_info;
memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
@@ -110,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
{
int unused_dpps = p->ip_params->max_num_dpp;
- int i, j;
- int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
- int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
- float frame_time_sec, max_frame_time_sec;
+ int i;
+ int odms_needed;
int largest_blend_and_timing = 0;
bool optimization_done = false;
@@ -128,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
if (p->new_display_config != p->cur_display_config)
*p->new_display_config = *p->cur_display_config;
- // Optimize P-State Support
- if (dml2->config.use_native_pstate_optimization) {
- if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
- // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
- subvp_timing_to_add = -1;
- subvp_surface_to_add = -1;
- max_frame_time_sec = 0;
- surface_count = 0;
- for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
- refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
- (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
- if (refresh_rate_hz < 120) {
- // Check its upstream surfaces to see if this one could be converted to subvp.
- dpps_needed = 0;
- for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
- if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
- p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
- dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
- subvp_surface_to_add = j;
- surface_count++;
- }
- }
-
- if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
- frame_time_sec = (float)1 / refresh_rate_hz;
- if (frame_time_sec > max_frame_time_sec) {
- max_frame_time_sec = frame_time_sec;
- subvp_timing_to_add = i;
- }
- }
- }
- }
- if (subvp_timing_to_add >= 0) {
- new_timing_index = p->new_display_config->num_timings++;
- new_surface_index = p->new_display_config->num_surfaces++;
- // Add a phantom pipe reflecting the main pipe's timing
- dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add);
-
- pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
- p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
- (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
- (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
-
- subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
-
- p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
- p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
- p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
-
- p->new_display_config->output.OutputDisabled[new_timing_index] = true;
-
- p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
-
- dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add);
- dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add);
-
- p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
-
- p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
- p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
-
- p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
-
- p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
-
- p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
-
- optimization_done = true;
- }
- }
- }
// Optimize Clocks
if (!optimization_done) {
@@ -224,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
return optimization_done;
}
-static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
+static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
@@ -266,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d
dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
}
- dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info);
+ dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info,
+ validate_mode);
if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
@@ -331,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const
}
static bool dml_mode_support_wrapper(struct dml2_context *dml2,
- struct dc_state *display_state)
+ struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
unsigned int result = 0, i;
@@ -367,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
@@ -388,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
dml2->v20.dml_core_ctx.policy = s->new_policy;
optimized_result = pack_and_call_dml_mode_support_ex(dml2,
&s->new_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (optimized_result)
optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
@@ -407,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
if (!optimized_result) {
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
}
}
@@ -417,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
return result;
}
-static int find_drr_eligible_stream(struct dc_state *display_state)
-{
- int i;
-
- for (i = 0; i < display_state->stream_count; i++) {
- if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
- && display_state->streams[i]->ignore_msa_timing_param) {
- // Use ignore_msa_timing_param flag to identify as DRR
- return i;
- }
- }
-
- return -1;
-}
-
-static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
-{
- struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
- bool pstate_optimization_done = false;
- bool pstate_optimization_success = false;
- bool result = false;
- int drr_display_index = 0, non_svp_streams = 0;
- bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
-
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
-
- result = dml_mode_support_wrapper(dml2, display_state);
-
- if (!result) {
- pstate_optimization_done = true;
- } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- }
-
- if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
-
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- non_svp_streams = display_state->stream_count;
-
- while (!pstate_optimization_done) {
- result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
-
- // Always try adding SVP first
- if (result)
- result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info);
- else
- pstate_optimization_done = true;
-
-
- if (result) {
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- pstate_optimization_done = true;
- }
-
- if (result) {
- non_svp_streams--;
-
- if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- } else {
- drr_display_index = find_drr_eligible_stream(display_state);
-
- // If there is only 1 remaining non SubVP pipe that is DRR, check static
- // schedulability for SubVP + DRR.
- if (non_svp_streams == 1 && drr_display_index >= 0) {
- if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) {
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- }
-
- if (pstate_optimization_success) {
- pstate_optimization_done = true;
- } else {
- pstate_optimization_done = false;
- }
- }
- }
- }
- }
-
- if (!pstate_optimization_success) {
- dml2_svp_remove_all_phantom_pipes(dml2, display_state);
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- return result;
-}
-
-static bool call_dml_mode_support_and_programming(struct dc_state *context)
+static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode)
{
unsigned int result = 0;
unsigned int min_state = 0;
@@ -542,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
if (!context->streams[0]->sink->link->dc->caps.is_apu) {
- min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context,
+ validate_mode);
ASSERT(min_state_for_g6_temp_read >= 0);
}
- if (!dml2->config.use_native_pstate_optimization) {
- result = optimize_pstate_with_svp_and_drr(dml2, context);
- } else {
- result = dml_mode_support_wrapper(dml2, context);
- }
+ result = dml_mode_support_wrapper(dml2, context, validate_mode);
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
@@ -573,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
return result;
}
-static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
+static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2 = context->bw_ctx.dml2;
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
@@ -609,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
- result = call_dml_mode_support_and_programming(context);
+ result = call_dml_mode_support_and_programming(context, validate_mode);
/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
* is required or not, the resource context needs to correctly reflect the number of active pipes. We would
* only know the correct number if active pipes after dml2_map_dc_pipes is called.
@@ -626,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
if (need_recalculation) {
/* Engage the DML again if recalculation is required. */
- call_dml_mode_support_and_programming(context);
+ call_dml_mode_support_and_programming(context, validate_mode);
if (!dml2->config.skip_hw_state_mapping) {
dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
}
@@ -661,7 +484,10 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_copy_clocks_to_dc_state(&out_clks, context);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
- memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
+ if (context->streams[0]->sink->link->dc->caps.is_apu)
+ dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.dml_core_ctx);
+ else
+ memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
//copy for deciding zstate use
@@ -679,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
return result;
}
-static bool dml2_validate_only(struct dc_state *context)
+static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2;
unsigned int result = 0;
@@ -703,12 +529,13 @@ static bool dml2_validate_only(struct dc_state *context)
result = pack_and_call_dml_mode_support_ex(dml2,
&dml2->v20.scratch.cur_display_config,
- &dml2->v20.scratch.mode_support_info);
+ &dml2->v20.scratch.mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
- return (result == 1) ? true : false;
+ return result == 1;
}
static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
@@ -718,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d
}
}
-bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
+bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -728,30 +556,34 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
/* DML2.1 validation path */
if (dml2->architecture == dml2_architecture_21) {
- out = dml21_validate(in_dc, context, dml2, fast_validate);
+ out = dml21_validate(in_dc, context, dml2, validate_mode);
return out;
}
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
- out = dml2_validate_only(context);
+ DC_FP_START();
+
+ /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ out = dml2_validate_only(context, validate_mode);
else
- out = dml2_validate_and_build_resource(in_dc, context);
+ out = dml2_validate_and_build_resource(in_dc, context, validate_mode);
+
+ DC_FP_END();
+
return out;
}
static inline struct dml2_context *dml2_allocate_memory(void)
{
- return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ return (struct dml2_context *) vzalloc(sizeof(struct dml2_context));
}
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
- }
+ }
// Store config options
(*dml2)->config = *config;
@@ -763,6 +595,9 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
case DCN_VERSION_3_51:
(*dml2)->v20.dml_core_ctx.project = dml_project_dcn351;
break;
+ case DCN_VERSION_3_6:
+ (*dml2)->v20.dml_core_ctx.project = dml_project_dcn36;
+ break;
case DCN_VERSION_3_2:
(*dml2)->v20.dml_core_ctx.project = dml_project_dcn32;
break;
@@ -777,19 +612,22 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
break;
}
+ DC_FP_START();
+
initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+
+ DC_FP_END();
}
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01))
return dml21_create(in_dc, dml2, config);
- }
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
@@ -809,7 +647,7 @@ void dml2_destroy(struct dml2_context *dml2)
if (dml2->architecture == dml2_architecture_21)
dml21_destroy(dml2);
- kfree(dml2);
+ vfree(dml2);
}
void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
@@ -857,9 +695,8 @@ void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
index 0f944fcfd5a5..c384e141cebc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
@@ -40,6 +40,7 @@ struct dc_sink;
struct dc_stream_state;
struct resource_context;
struct display_stream_compressor;
+struct dc_mcache_params;
// Configuration of the MALL on the SoC
struct dml2_soc_mall_info {
@@ -107,6 +108,7 @@ struct dml2_dc_callbacks {
unsigned int (*get_max_flickerless_instant_vtotal_increase)(
struct dc_stream_state *stream,
bool is_gaming);
+ bool (*allocate_mcache)(struct dc_state *context, const struct dc_mcache_params *mcache_params);
};
struct dml2_dc_svp_callbacks {
@@ -159,6 +161,7 @@ struct dml2_clks_table_entry {
unsigned int dtbclk_mhz;
unsigned int dispclk_mhz;
unsigned int dppclk_mhz;
+ unsigned int dram_speed_mts; /*which is based on wck_ratio*/
};
struct dml2_clks_num_entries {
@@ -237,7 +240,7 @@ struct dml2_configuration_options {
bool use_clock_dc_limits;
bool gpuvm_enable;
bool force_tdlut_enable;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
/*
@@ -269,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc,
* dml2_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx.
*
* DML1.0 compatible interface for validation.
*
@@ -292,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc,
bool dml2_validate(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml2,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info.
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
index 17f0972b1af7..17f0972b1af7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
index f7d30b47beff..d459f93cf40b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
@@ -31,3 +31,4 @@
*/
#include "os_types.h"
#include "cmntypes.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
index 00d22e542469..00d22e542469 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
index bf491cf0582d..bf491cf0582d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
index 2a2f84e07ca8..7fadbe6d7af4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
@@ -23,6 +23,7 @@
* Authors: AMD
*
*/
+
#ifndef __DML_LOGGING_H__
#define __DML_LOGGING_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index e1da48b05d00..ce91e5d28956 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -194,8 +194,13 @@ void dpp_reset(struct dpp *dpp_base)
dpp->filter_h = NULL;
dpp->filter_v = NULL;
+ memset(&dpp_base->pos, 0, sizeof(dpp_base->pos));
+ memset(&dpp_base->att, 0, sizeof(dpp_base->att));
+
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
+
+ dpp_base->cursor_offload = false;
}
@@ -480,10 +485,13 @@ void dpp1_set_cursor_position(
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
- REG_UPDATE(CURSOR0_CONTROL,
- CUR0_ENABLE, cur_en);
+ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ }
dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
@@ -493,8 +501,13 @@ void dpp1_cnv_set_optional_cursor_attributes(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias.bits.fp_bias = attr->bias;
+ dpp_base->att.fp_scale_bias.bits.fp_scale = attr->scale;
}
}
@@ -516,6 +529,15 @@ void dpp1_dppclk_control(
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
}
+void dpp_force_disable_cursor(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ /* Force disable cursor */
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = 0;
+}
+
static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index c48139bed11f..b12f34345a58 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1348,7 +1348,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_COLOR1; \
uint32_t DPP_CONTROL; \
uint32_t CM_HDR_MULT_COEF; \
- uint32_t CURSOR0_FP_SCALE_BIAS;
+ uint32_t CURSOR0_FP_SCALE_BIAS; \
+ uint32_t OBUF_CONTROL;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1450,7 +1451,6 @@ void dpp1_set_degamma(
void dpp1_set_degamma_pwl(struct dpp *dpp_base,
const struct pwl_params *params);
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
@@ -1525,4 +1525,6 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust);
+void dpp_force_disable_cursor(struct dpp *dpp_base);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
index f09cba8e29cc..85f359b5da67 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
@@ -26,7 +26,6 @@
#define __DCN20_DPP_H__
#include "dcn10/dcn10_dpp.h"
-#include "spl/dc_spl_types.h"
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 40acebd13e46..ef4a16117181 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -84,6 +84,22 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
}
}
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ dpp_reg_state->recout_start = REG_READ(RECOUT_START);
+ dpp_reg_state->recout_size = REG_READ(RECOUT_SIZE);
+ dpp_reg_state->scl_horz_filter_scale_ratio = REG_READ(SCL_HORZ_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_vert_filter_scale_ratio = REG_READ(SCL_VERT_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_mode = REG_READ(SCL_MODE);
+ dpp_reg_state->cm_control = REG_READ(CM_CONTROL);
+ dpp_reg_state->dpp_control = REG_READ(DPP_CONTROL);
+ dpp_reg_state->dscl_control = REG_READ(DSCL_CONTROL);
+ dpp_reg_state->obuf_control = REG_READ(OBUF_CONTROL);
+ dpp_reg_state->mpc_size = REG_READ(MPC_SIZE);
+}
+
/*program post scaler scs block in dpp CM*/
void dpp3_program_post_csc(
struct dpp *dpp_base,
@@ -396,17 +412,21 @@ void dpp3_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -425,11 +445,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
@@ -467,6 +482,12 @@ bool dpp3_get_optimal_number_of_taps(
else
scl_data->taps.h_taps_c = in_taps->h_taps_c;
+ // Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first
+ if (scl_data->viewport.width > scl_data->h_active &&
+ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+ return false;
+
/*Ensure we can support the requested number of vtaps*/
min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
@@ -577,9 +598,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
- } else {
- REG_SET(CM_MEM_PWR_CTRL, 0,
- BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}
@@ -789,8 +807,7 @@ static bool dpp3_program_blnd_lut(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_blnd_lut(dpp_base, false);
+ dpp3_power_on_blnd_lut(dpp_base, false);
return false;
}
@@ -1203,8 +1220,7 @@ static bool dpp3_program_shaper(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_shaper(dpp_base, false);
+ dpp3_power_on_shaper(dpp_base, false);
return false;
}
@@ -1398,8 +1414,7 @@ static bool dpp3_program_3dlut(struct dpp *dpp_base,
if (params == NULL) {
dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_hdr3dlut(dpp_base, false);
+ dpp3_power_on_hdr3dlut(dpp_base, false);
return false;
}
@@ -1496,6 +1511,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_force_disable_cursor = dpp_force_disable_cursor,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index f236824126e9..d4a70b4379ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -594,6 +594,8 @@ void dpp3_program_CM_dealpha(
void dpp30_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state);
+
bool dpp3_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
index fa67e54bf94e..8a5aa5e86850 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
@@ -134,6 +134,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_read_reg_state = dpp30_read_reg_state,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
index 992df172378c..f33dddbfcc31 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
@@ -27,7 +27,6 @@
#include "dcn20/dcn20_dpp.h"
#include "dcn30/dcn30_dpp.h"
-#include "spl/dc_spl_types.h"
bool dpp32_construct(struct dcn3_dpp *dpp3,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index 62b7012cda43..977d83bf7741 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -95,6 +95,7 @@ void dpp35_program_bias_and_scale_fcnv(
static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
.dpp_read_state = dpp30_read_state,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
@@ -138,7 +139,7 @@ bool dpp35_construct(
dpp->base.funcs = &dcn35_dpp_funcs;
// w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs
- if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10)
+ if (dpp->base.ctx->asic_id.hw_internal_rev < 0x40)
dpp->dispclk_r_gate_disable = true;
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 97bf26fa3573..96c2c853de42 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.dpp_program_regamma_pwl = NULL,
.dpp_set_pre_degam = dpp3_set_pre_degam,
.dpp_program_input_lut = NULL,
- .dpp_full_bypass = dpp401_full_bypass,
+ .dpp_full_bypass = NULL,
.dpp_setup = dpp401_dpp_setup,
.dpp_program_degamma_pwl = NULL,
.dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
@@ -248,6 +248,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.set_cursor_matrix = dpp401_set_cursor_matrix,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index 4bc85aaf17da..5f6b431ec398 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -567,80 +567,83 @@
type ISHARP_NLDELTA_SCLIP_PIVOT_N; \
type ISHARP_NLDELTA_SCLIP_SLOPE_N
+#define DPP_REG_VARIABLE_LIST_DCN401 \
+ DPP_DCN3_REG_VARIABLE_LIST_COMMON; \
+ uint32_t CURSOR0_FP_SCALE_BIAS_G_Y; \
+ uint32_t CURSOR0_FP_SCALE_BIAS_RB_CRCB; \
+ uint32_t CUR0_MATRIX_MODE; \
+ uint32_t CUR0_MATRIX_C11_C12_A; \
+ uint32_t CUR0_MATRIX_C13_C14_A; \
+ uint32_t CUR0_MATRIX_C21_C22_A; \
+ uint32_t CUR0_MATRIX_C23_C24_A; \
+ uint32_t CUR0_MATRIX_C31_C32_A; \
+ uint32_t CUR0_MATRIX_C33_C34_A; \
+ uint32_t CUR0_MATRIX_C11_C12_B; \
+ uint32_t CUR0_MATRIX_C13_C14_B; \
+ uint32_t CUR0_MATRIX_C21_C22_B; \
+ uint32_t CUR0_MATRIX_C23_C24_B; \
+ uint32_t CUR0_MATRIX_C31_C32_B; \
+ uint32_t CUR0_MATRIX_C33_C34_B; \
+ uint32_t DSCL_SC_MODE; \
+ uint32_t DSCL_EASF_H_MODE; \
+ uint32_t DSCL_EASF_H_BF_CNTL; \
+ uint32_t DSCL_EASF_H_RINGEST_EVENTAP_REDUCE; \
+ uint32_t DSCL_EASF_H_RINGEST_EVENTAP_GAIN; \
+ uint32_t DSCL_EASF_H_BF_FINAL_MAX_MIN; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG0; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG1; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG2; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG3; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG4; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG5; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG6; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG7; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG0; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG1; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG2; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG3; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG4; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG5; \
+ uint32_t DSCL_EASF_V_MODE; \
+ uint32_t DSCL_EASF_V_BF_CNTL; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL1; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL2; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL3; \
+ uint32_t DSCL_EASF_V_RINGEST_EVENTAP_REDUCE; \
+ uint32_t DSCL_EASF_V_RINGEST_EVENTAP_GAIN; \
+ uint32_t DSCL_EASF_V_BF_FINAL_MAX_MIN; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG0; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG1; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG2; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG3; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG4; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG5; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG6; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG7; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG0; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG1; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG2; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG3; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG4; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG5; \
+ uint32_t DSCL_SC_MATRIX_C0C1; \
+ uint32_t DSCL_SC_MATRIX_C2C3; \
+ uint32_t ISHARP_MODE; \
+ uint32_t ISHARP_NOISEDET_THRESHOLD; \
+ uint32_t ISHARP_NOISE_GAIN_PWL; \
+ uint32_t ISHARP_LBA_PWL_SEG0; \
+ uint32_t ISHARP_LBA_PWL_SEG1; \
+ uint32_t ISHARP_LBA_PWL_SEG2; \
+ uint32_t ISHARP_LBA_PWL_SEG3; \
+ uint32_t ISHARP_LBA_PWL_SEG4; \
+ uint32_t ISHARP_LBA_PWL_SEG5; \
+ uint32_t ISHARP_DELTA_CTRL; \
+ uint32_t ISHARP_DELTA_DATA; \
+ uint32_t ISHARP_DELTA_INDEX; \
+ uint32_t ISHARP_NLDELTA_SOFT_CLIP
+
struct dcn401_dpp_registers {
- DPP_DCN3_REG_VARIABLE_LIST_COMMON;
- uint32_t CURSOR0_FP_SCALE_BIAS_G_Y;
- uint32_t CURSOR0_FP_SCALE_BIAS_RB_CRCB;
- uint32_t CUR0_MATRIX_MODE;
- uint32_t CUR0_MATRIX_C11_C12_A;
- uint32_t CUR0_MATRIX_C13_C14_A;
- uint32_t CUR0_MATRIX_C21_C22_A;
- uint32_t CUR0_MATRIX_C23_C24_A;
- uint32_t CUR0_MATRIX_C31_C32_A;
- uint32_t CUR0_MATRIX_C33_C34_A;
- uint32_t CUR0_MATRIX_C11_C12_B;
- uint32_t CUR0_MATRIX_C13_C14_B;
- uint32_t CUR0_MATRIX_C21_C22_B;
- uint32_t CUR0_MATRIX_C23_C24_B;
- uint32_t CUR0_MATRIX_C31_C32_B;
- uint32_t CUR0_MATRIX_C33_C34_B;
- uint32_t DSCL_SC_MODE;
- uint32_t DSCL_EASF_H_MODE;
- uint32_t DSCL_EASF_H_BF_CNTL;
- uint32_t DSCL_EASF_H_RINGEST_EVENTAP_REDUCE;
- uint32_t DSCL_EASF_H_RINGEST_EVENTAP_GAIN;
- uint32_t DSCL_EASF_H_BF_FINAL_MAX_MIN;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG0;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG1;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG2;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG3;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG4;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG5;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG6;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG7;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG0;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG1;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG2;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG3;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG4;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG5;
- uint32_t DSCL_EASF_V_MODE;
- uint32_t DSCL_EASF_V_BF_CNTL;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL1;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL2;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL3;
- uint32_t DSCL_EASF_V_RINGEST_EVENTAP_REDUCE;
- uint32_t DSCL_EASF_V_RINGEST_EVENTAP_GAIN;
- uint32_t DSCL_EASF_V_BF_FINAL_MAX_MIN;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG0;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG1;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG2;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG3;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG4;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG5;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG6;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG7;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG0;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG1;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG2;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG3;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG4;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG5;
- uint32_t DSCL_SC_MATRIX_C0C1;
- uint32_t DSCL_SC_MATRIX_C2C3;
- uint32_t ISHARP_MODE;
- uint32_t ISHARP_NOISEDET_THRESHOLD;
- uint32_t ISHARP_NOISE_GAIN_PWL;
- uint32_t ISHARP_LBA_PWL_SEG0;
- uint32_t ISHARP_LBA_PWL_SEG1;
- uint32_t ISHARP_LBA_PWL_SEG2;
- uint32_t ISHARP_LBA_PWL_SEG3;
- uint32_t ISHARP_LBA_PWL_SEG4;
- uint32_t ISHARP_LBA_PWL_SEG5;
- uint32_t ISHARP_DELTA_CTRL;
- uint32_t ISHARP_DELTA_DATA;
- uint32_t ISHARP_DELTA_INDEX;
- uint32_t ISHARP_NLDELTA_SOFT_CLIP;
+ DPP_REG_VARIABLE_LIST_DCN401;
};
struct dcn401_dpp_shift {
@@ -670,6 +673,16 @@ struct dcn401_dpp {
struct pwl_params pwl_data;
};
+enum dcn401_dscl_mode_sel {
+ DCN401_DSCL_MODE_SCALING_444_BYPASS = 0,
+ DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DCN401_DSCL_MODE_DSCL_BYPASS = 6
+};
+
bool dpp401_construct(struct dcn401_dpp *dpp401,
struct dc_context *ctx,
uint32_t inst,
@@ -681,8 +694,6 @@ void dpp401_dscl_set_scaler_manual_scale(
struct dpp *dpp_base,
const struct scaler_data *scl_data);
-void dpp401_full_bypass(struct dpp *dpp_base);
-
void dpp401_dpp_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 3b6ca7974e18..62bf7cea21d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -88,30 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-void dpp401_full_bypass(struct dpp *dpp_base)
-{
- struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
-
- /* Input pixel format: ARGB8888 */
- REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
- CNVC_SURFACE_PIXEL_FORMAT, 0x8);
-
- /* Zero expansion */
- REG_SET_3(FORMAT_CONTROL, 0,
- CNVC_BYPASS, 0,
- FORMAT_CONTROL__ALPHA_EN, 0,
- FORMAT_EXPANSION_MODE, 0);
-
- /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
- if (dpp->tf_mask->CM_BYPASS_EN)
- REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
- else
- REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
-
- /* Setting degamma bypass for now */
- REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
-}
-
void dpp401_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes)
@@ -120,23 +96,28 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
- // DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- cur_rom_en = 1;
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -154,9 +135,13 @@ void dpp401_set_cursor_position(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
uint32_t cur_en = pos->enable ? 1 : 0;
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ }
dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp401_set_optional_cursor_attributes(
@@ -166,10 +151,17 @@ void dpp401_set_optional_cursor_attributes(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_bias_g_y = attr->bias;
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_scale_g_y = attr->scale;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb = attr->bias;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb = attr->scale;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 2f92e7d4981b..6df3419f825f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -78,16 +78,6 @@ enum dscl_autocal_mode {
AUTOCAL_MODE_AUTOREPLICATE = 3
};
-enum dscl_mode_sel {
- DSCL_MODE_SCALING_444_BYPASS = 0,
- DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
- DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
- DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
- DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
- DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
- DSCL_MODE_DSCL_BYPASS = 6
-};
-
static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
@@ -122,7 +112,7 @@ static bool dpp401_dscl_is_420_format(enum pixel_format format)
return false;
}
-static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
+static enum dcn401_dscl_mode_sel dpp401_dscl_get_dscl_mode(
struct dpp *dpp_base,
const struct scaler_data *data,
bool dbg_always_scale)
@@ -132,7 +122,7 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
if (data->format == PIXEL_FORMAT_FP16)
- return DSCL_MODE_DSCL_BYPASS;
+ return DCN401_DSCL_MODE_DSCL_BYPASS;
}
if (data->ratios.horz.value == one
@@ -140,20 +130,20 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !dbg_always_scale)
- return DSCL_MODE_SCALING_444_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_444_BYPASS;
if (!dpp401_dscl_is_420_format(data->format)) {
if (dpp401_dscl_is_video_format(data->format))
- return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE;
else
- return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE;
}
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS;
if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS;
- return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE;
}
static void dpp401_power_on_dscl(
@@ -1071,7 +1061,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
uint32_t v_num_taps_c = scl_data->taps.v_taps_c - 1;
uint32_t h_num_taps = scl_data->taps.h_taps - 1;
uint32_t h_num_taps_c = scl_data->taps.h_taps_c - 1;
- enum dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
+ enum dcn401_dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
@@ -1102,7 +1092,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
- dscl_mode = (enum dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
+ dscl_mode = (enum dcn401_dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
rect = (struct rect *)&scl_data->dscl_prog_data.recout;
mpc_width = scl_data->dscl_prog_data.mpc_size.width;
mpc_height = scl_data->dscl_prog_data.mpc_size.height;
@@ -1112,7 +1102,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
h_num_taps_c = scl_data->dscl_prog_data.taps.h_taps_c;
}
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) {
- if (dscl_mode != DSCL_MODE_DSCL_BYPASS)
+ if (dscl_mode != DCN401_DSCL_MODE_DSCL_BYPASS)
dpp401_power_on_dscl(dpp_base, true);
}
@@ -1139,7 +1129,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
/* SCL mode */
REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
- if (dscl_mode == DSCL_MODE_DSCL_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_DSCL_BYPASS) {
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl)
dpp401_power_on_dscl(dpp_base, false);
return;
@@ -1149,7 +1139,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
lb_config = dpp401_dscl_find_lb_memory_config(dpp, scl_data);
dpp401_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
- if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index d9aaebfa3a0a..e4144b244332 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -30,6 +30,12 @@
#include "rc_calc.h"
#include "fixed31_32.h"
+#include "clk_mgr.h"
+#include "resource.h"
+
+#define DC_LOGGER \
+ dsc->ctx->logger
+
/* This module's internal functions */
/* default DSC policy target bitrate limit is 16bpp */
@@ -146,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
}
/* Forward Declerations */
+static unsigned int get_min_dsc_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing);
+
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
@@ -180,6 +191,7 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slice_count,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -439,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
return true;
}
-
/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and
* timing's pixel clock and uncompressed bandwidth.
* If DSC is not possible, leave '*range' untouched.
@@ -455,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
+ unsigned int min_dsc_slice_count;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config = {0};
@@ -466,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range(
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, link_encoding, &config);
+ &options, link_encoding, min_dsc_slice_count, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -480,20 +494,195 @@ bool dc_dsc_compute_bandwidth_range(
return is_dsc_possible;
}
+void dc_dsc_dump_encoder_caps(const struct display_stream_compressor *dsc,
+ const struct dc_crtc_timing *timing)
+{
+ struct dsc_enc_caps dsc_enc_caps;
+
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ DC_LOG_DSC("dsc encoder caps:");
+ DC_LOG_DSC("\tdsc_version 0x%x", dsc_enc_caps.dsc_version);
+ DC_LOG_DSC("\tslice_caps 0x%x", dsc_enc_caps.slice_caps.raw);
+ DC_LOG_DSC("\tlb_bit_depth %d", dsc_enc_caps.lb_bit_depth);
+ DC_LOG_DSC("\tis_block_pred_supported %d", dsc_enc_caps.is_block_pred_supported);
+ DC_LOG_DSC("\tcolor_formats 0x%x", dsc_enc_caps.color_formats.raw);
+ DC_LOG_DSC("\tcolor_depth 0x%x", dsc_enc_caps.color_depth.raw);
+ DC_LOG_DSC("\tmax_total_throughput_mps %d", dsc_enc_caps.max_total_throughput_mps);
+ DC_LOG_DSC("\tmax_slice_width %d", dsc_enc_caps.max_slice_width);
+ DC_LOG_DSC("\tbpp_increment_div %d", dsc_enc_caps.bpp_increment_div);
+}
+
+void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
+ const struct dsc_dec_dpcd_caps *dsc_sink_caps)
+{
+ DC_LOG_DSC("dsc decoder caps:");
+ DC_LOG_DSC("\tis_dsc_supported %d", dsc_sink_caps->is_dsc_supported);
+ DC_LOG_DSC("\tdsc_version 0x%x", dsc_sink_caps->dsc_version);
+ DC_LOG_DSC("\trc_buffer_size %d", dsc_sink_caps->rc_buffer_size);
+ DC_LOG_DSC("\tslice_caps1 0x%x", dsc_sink_caps->slice_caps1.raw);
+ DC_LOG_DSC("\tslice_caps2 0x%x", dsc_sink_caps->slice_caps2.raw);
+ DC_LOG_DSC("\tlb_bit_depth %d", dsc_sink_caps->lb_bit_depth);
+ DC_LOG_DSC("\tis_block_pred_supported %d", dsc_sink_caps->is_block_pred_supported);
+ DC_LOG_DSC("\tedp_max_bits_per_pixel %d", dsc_sink_caps->edp_max_bits_per_pixel);
+ DC_LOG_DSC("\tcolor_formats 0x%x", dsc_sink_caps->color_formats.raw);
+ DC_LOG_DSC("\tthroughput_mode_0_mps %d", dsc_sink_caps->throughput_mode_0_mps);
+ DC_LOG_DSC("\tthroughput_mode_1_mps %d", dsc_sink_caps->throughput_mode_1_mps);
+ DC_LOG_DSC("\tmax_slice_width %d", dsc_sink_caps->max_slice_width);
+ DC_LOG_DSC("\tbpp_increment_div %d", dsc_sink_caps->bpp_increment_div);
+ DC_LOG_DSC("\tbranch_overall_throughput_0_mps %d", dsc_sink_caps->branch_overall_throughput_0_mps);
+ DC_LOG_DSC("\tbranch_overall_throughput_1_mps %d", dsc_sink_caps->branch_overall_throughput_1_mps);
+ DC_LOG_DSC("\tbranch_max_line_width %d", dsc_sink_caps->branch_max_line_width);
+ DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
+}
+
+
+static void build_dsc_enc_combined_slice_caps(
+ const struct dsc_enc_caps *single_dsc_enc_caps,
+ struct dsc_enc_caps *dsc_enc_caps,
+ unsigned int max_odm_combine_factor)
+{
+ /* 1-16 slice configurations, single DSC */
+ dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw;
+
+ /* 2x DSC's */
+ if (max_odm_combine_factor >= 2) {
+ /* 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+
+ /* 8 + 8 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;
+ }
+
+ /* 3x DSC's */
+ if (max_odm_combine_factor >= 3) {
+ /* 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+
+ /* 4x DSC's */
+ if (max_odm_combine_factor >= 4) {
+ /* 1 + 1 + 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 + 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 3 + 3 + 3 + 3 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3;
+
+ /* 4 + 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+}
+
+static void build_dsc_enc_caps(
+ const struct display_stream_compressor *dsc,
+ struct dsc_enc_caps *dsc_enc_caps)
+{
+ unsigned int max_dscclk_khz;
+ unsigned int num_dsc;
+ unsigned int max_odm_combine_factor;
+ struct dsc_enc_caps single_dsc_enc_caps;
+
+ struct dc *dc;
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps)
+ return;
+
+ dc = dsc->ctx->dc;
+
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc)
+ return;
+
+ /* get max DSCCLK from clk_mgr */
+ max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK);
+
+ dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz);
+
+ /* global capabilities */
+ dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version;
+ dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth;
+ dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported;
+ dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width;
+ dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div;
+ dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw;
+ dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw;
+
+ /* expand per DSC capabilities to global */
+ max_odm_combine_factor = dc->caps.max_odm_combine_factor;
+ num_dsc = dc->res_pool->res_cap->num_dsc;
+ max_odm_combine_factor = min(max_odm_combine_factor, num_dsc);
+ dsc_enc_caps->max_total_throughput_mps =
+ single_dsc_enc_caps.max_total_throughput_mps *
+ max_odm_combine_factor;
+
+ /* check slice counts possible for with ODM combine */
+ build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor);
+}
+
+static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
+{
+ return (value + 9) / 10;
+}
+
+static unsigned int get_min_dsc_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing)
+{
+ unsigned int max_dispclk_khz;
+
+ /* get max pixel rate and combine caps */
+ max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000;
+ if (dsc && dsc->ctx->dc) {
+ if (dsc->ctx->dc->clk_mgr &&
+ dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) {
+ /* dispclk is available */
+ max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK);
+ }
+ }
+
+ /* validate parameters */
+ if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0)
+ return 1;
+
+ /* consider minimum odm slices required due to
+ * 1) display pipe throughput (dispclk)
+ * 2) max image width per slice
+ */
+ return dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)),
+ max_dispclk_khz), // throughput
+ dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right),
+ dsc_enc_caps->max_slice_width))); // slice width
+}
+
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
- // This is a static HW query, so we can use any DSC
-
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
- if (dsc) {
- if (!dsc->ctx->dc->debug.disable_dsc)
- dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
- if (dsc->ctx->dc->debug.native422_support)
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc)
+ return;
+
+ /* check if reported cap global or only for a single DCN DSC enc */
+ if (dsc->funcs->dsc_get_enc_caps) {
+ dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ } else {
+ build_dsc_enc_caps(dsc, dsc_enc_caps);
}
+
+ if (dsc->ctx->dc->debug.native422_support)
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@@ -576,11 +765,6 @@ static bool intersect_dsc_caps(
return true;
}
-static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
-{
- return (value + 9) / 10;
-}
-
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
@@ -865,11 +1049,11 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slices_h,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
int max_slices_h = 0;
- int min_slices_h = 0;
int num_slices_h = 0;
int pic_width;
int slice_width;
@@ -973,12 +1157,14 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- min_slices_h = pic_width / dsc_common_caps.max_slice_width;
- if (pic_width % dsc_common_caps.max_slice_width)
- min_slices_h++;
+ /* increase miniumum slice count to meet sink slice width limitations */
+ min_slices_h = dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(pic_width), dsc_common_caps.max_slice_width), // sink min
+ dc_fixpt_from_int(min_slices_h))); // source min
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
+ /* increase minimum slice count to meet sink throughput limitations */
while (min_slices_h <= max_slices_h) {
int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h;
if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000)
@@ -987,14 +1173,12 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
- is_dsc_possible = (min_slices_h <= max_slices_h);
-
- if (pic_width % min_slices_h != 0)
- min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
-
- if (min_slices_h == 0 && max_slices_h == 0)
- is_dsc_possible = false;
+ /* increase minimum slice count to meet divisibility requirements */
+ while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) {
+ min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
+ }
+ is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0;
if (!is_dsc_possible)
goto done;
@@ -1117,12 +1301,19 @@ bool dc_dsc_compute_config(
{
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
-
+ unsigned int min_dsc_slice_count;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, options, link_encoding, dsc_cfg);
+ timing,
+ options,
+ link_encoding,
+ min_dsc_slice_count,
+ dsc_cfg);
return is_dsc_possible;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 75128fd34306..242f1e6f0d8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -35,6 +35,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -57,13 +58,6 @@ static const struct dsc_funcs dcn20_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
-
/* API functions (external or via structure->function_pointer) */
void dsc2_construct(struct dcn20_dsc *dsc,
@@ -162,6 +156,13 @@ void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state
DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ dccg_reg_state->dsc_top_control = REG_READ(DSC_TOP_CONTROL);
+ dccg_reg_state->dscc_interrupt_control_status = REG_READ(DSCC_INTERRUPT_CONTROL_STATUS);
+}
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
@@ -413,9 +414,10 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
+ // Need to find the ceiling value for the slice width
+ dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dsc_padding + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
- dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index 1fb90b52b814..2337c3a97235 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -457,6 +457,12 @@
type DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
type DSCRM_DSC_FORWARD_EN_STATUS
+enum dsc_bits_per_comp {
+ DSC_BPC_8 = 8,
+ DSC_BPC_10 = 10,
+ DSC_BPC_12 = 12,
+ DSC_BPC_UNKNOWN
+};
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
@@ -600,6 +606,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc,
uint8_t *dsc_packed_pps);
void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 6f4f5a3c4861..e712985f7abd 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
@@ -28,10 +28,11 @@
#include "reg_helper.h"
static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe);
+static void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn35_dsc_funcs = {
- .dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -39,6 +40,7 @@ static const struct dsc_funcs dcn35_dsc_funcs = {
.dsc_disable = dsc2_disable,
.dsc_disconnect = dsc2_disconnect,
.dsc_wait_disconnect_pending_clear = dsc2_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc35_get_single_enc_caps,
};
/* Macro definitios for REG_SET macros*/
@@ -110,3 +112,31 @@ void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable)
{
REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable);
}
+
+void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
+{
+ dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
+
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
+
+ dsc_enc_caps->lb_bit_depth = 13;
+ dsc_enc_caps->is_block_pred_supported = true;
+
+ dsc_enc_caps->color_formats.bits.RGB = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
+
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
+
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
+
+ dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
+ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 61678b0a5a1e..c1bdbb38c690 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -9,26 +9,14 @@
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
-#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000
-#define MAX_DSC_UNIT_COMBINE 4
-
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
/* Object I/F functions */
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
-static void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
-static bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
-static void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
- struct dsc_optc_config *dsc_optc_cfg);
//static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
-static void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
-static void dsc401_disable(struct display_stream_compressor *dsc);
-static void dsc401_disconnect(struct display_stream_compressor *dsc);
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn401_dsc_funcs = {
- .dsc_get_enc_caps = dsc401_get_enc_caps,
.dsc_read_state = dsc401_read_state,
.dsc_validate_stream = dsc401_validate_stream,
.dsc_set_config = dsc401_set_config,
@@ -37,6 +25,8 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_disable = dsc401_disable,
.dsc_disconnect = dsc401_disconnect,
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
+ .dsc_read_reg_state = dsc2_read_reg_state
};
/* Macro definitios for REG_SET macros*/
@@ -52,12 +42,6 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
/* API functions (external or via structure->function_pointer) */
@@ -79,22 +63,14 @@ void dsc401_construct(struct dcn401_dsc *dsc,
dsc->max_image_width = 5184;
}
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
{
- int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ;
-
dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
- /* 1 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0;
- /* 2 slice is only supported with 1 or 2 DSC units */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0;
- /* 3 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1;
dsc_enc_caps->lb_bit_depth = 13;
dsc_enc_caps->is_block_pred_supported = true;
@@ -108,7 +84,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
- dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE;
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
@@ -117,7 +93,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
-static void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
+void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
@@ -134,7 +110,7 @@ static void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_
}
-static bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
+bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
struct dsc_optc_config dsc_optc_cfg;
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
@@ -145,7 +121,7 @@ static bool dsc401_validate_stream(struct display_stream_compressor *dsc, const
return dsc_prepare_config(dsc_cfg, &dsc401->reg_vals, &dsc_optc_cfg);
}
-static void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg)
{
bool is_config_ok;
@@ -160,7 +136,7 @@ static void dsc401_set_config(struct display_stream_compressor *dsc, const struc
dsc_write_to_registers(dsc, &dsc401->reg_vals);
}
-static void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe)
+void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
int dsc_clock_en;
@@ -185,7 +161,7 @@ static void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe)
}
-static void dsc401_disable(struct display_stream_compressor *dsc)
+void dsc401_disable(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
int dsc_clock_en;
@@ -204,14 +180,14 @@ static void dsc401_disable(struct display_stream_compressor *dsc)
DSC_CLOCK_EN, 0);
}
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
REG_WAIT(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN_STATUS, 0, 2, 50000);
}
-static void dsc401_disconnect(struct display_stream_compressor *dsc)
+void dsc401_disconnect(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
index 3c9fa8988974..7acd57eb4f42 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
@@ -334,5 +334,13 @@ void dsc401_construct(struct dcn401_dsc *dsc,
void dsc401_set_fgcg(struct dcn401_dsc *dsc401, bool enable);
+void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
+void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+ struct dsc_optc_config *dsc_optc_cfg);
+void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
+void dsc401_disable(struct display_stream_compressor *dsc);
+void dsc401_disconnect(struct display_stream_compressor *dsc);
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index 1ebce5426a58..81c83d5fe042 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -41,6 +41,7 @@ struct dsc_config {
enum dc_color_depth color_depth; /* Bits per component */
bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
+ uint32_t dsc_padding;
};
@@ -65,6 +66,10 @@ struct dcn_dsc_state {
uint32_t dsc_opp_source;
};
+struct dcn_dsc_reg_state {
+ uint32_t dsc_top_control;
+ uint32_t dscc_interrupt_control_status;
+};
/* DSC encoder capabilities
* They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps.
@@ -99,6 +104,7 @@ struct dsc_enc_caps {
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+ void (*dsc_read_reg_state)(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
@@ -108,6 +114,7 @@ struct dsc_funcs {
void (*dsc_disable)(struct display_stream_compressor *dsc);
void (*dsc_disconnect)(struct display_stream_compressor *dsc);
void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc);
+ void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index b099989d9364..942d9f0b6df2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -411,6 +411,20 @@ enum dc_irq_source dal_irq_get_rx_source(
}
}
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_DCI2C_RR_DDC1 +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 9a0952f9004f..8bc67ca42197 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -112,6 +112,7 @@ bool dal_hw_factory_init(
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
case DCN_VERSION_3_51:
+ case DCN_VERSION_3_6:
dal_hw_factory_dcn32_init(factory);
return true;
case DCN_VERSION_4_01:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
index 3f13a744d07d..01ec451004f7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -62,7 +62,7 @@ static void dal_hw_hpd_destroy(
*ptr = NULL;
}
-static enum gpio_result get_value(
+static enum gpio_result dal_hw_hpd_get_value(
const struct hw_gpio_pin *ptr,
uint32_t *value)
{
@@ -85,7 +85,7 @@ static enum gpio_result get_value(
return dal_hw_gpio_get_value(ptr, value);
}
-static enum gpio_result set_config(
+static enum gpio_result dal_hw_hpd_set_config(
struct hw_gpio_pin *ptr,
const struct gpio_config_data *config_data)
{
@@ -104,9 +104,9 @@ static enum gpio_result set_config(
static const struct hw_gpio_pin_funcs funcs = {
.destroy = dal_hw_hpd_destroy,
.open = dal_hw_gpio_open,
- .get_value = get_value,
+ .get_value = dal_hw_hpd_get_value,
.set_value = dal_hw_gpio_set_value,
- .set_config = set_config,
+ .set_config = dal_hw_hpd_set_config,
.change_mode = dal_hw_gpio_change_mode,
.close = dal_hw_gpio_close,
};
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 9832247ee739..cb79a2832287 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -113,6 +113,7 @@ bool dal_hw_translate_init(
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
case DCN_VERSION_3_51:
+ case DCN_VERSION_3_6:
dal_hw_translate_dcn32_init(translate);
return true;
case DCN_VERSION_4_01:
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 1313a7c5d87b..73a1e6a03719 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -28,7 +28,7 @@
#include "include/hdcp_msg_types.h"
#include "include/signal_types.h"
#include "core_types.h"
-#include "link.h"
+#include "link_service.h"
#include "link_hwss.h"
#include "link/protocols/link_dpcd.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
index 03b4ac2f1991..0d2ae21abbdd 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -262,7 +262,7 @@ void dcn31_hpo_dp_link_enc_set_link_test_pattern(
}
}
-static void fill_stream_allocation_row_info(
+void dcn31_fill_stream_allocation_row_info(
const struct link_mst_stream_allocation *stream_allocation,
uint32_t *src,
uint32_t *slots)
@@ -296,7 +296,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
/* we should clean-up table each time */
if (table->stream_count >= 1) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[0],
&src,
&slots);
@@ -310,7 +310,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 2) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[1],
&src,
&slots);
@@ -324,7 +324,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 3) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[2],
&src,
&slots);
@@ -338,7 +338,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 4) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[3],
&src,
&slots);
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
index 51f5781325e8..40859660e4dc 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
@@ -226,4 +226,10 @@ void dcn31_hpo_dp_link_enc_set_ffe(
const struct dc_link_settings *link_settings,
uint8_t ffe_preset);
+
+void dcn31_fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots);
+
#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
index 678db949cfe3..759b453385c4 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -323,7 +323,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -336,6 +336,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c
index 8af01f579690..de3ec4fcade2 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c
@@ -41,7 +41,7 @@
#define CTX \
enc3->base.ctx
-static bool dcn32_hpo_dp_link_enc_is_in_alt_mode(
+bool dcn32_hpo_dp_link_enc_is_in_alt_mode(
struct hpo_dp_link_encoder *enc)
{
struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
index 176b1537d2a1..bea4e1a8ff90 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -54,6 +54,7 @@
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh)
+bool dcn32_hpo_dp_link_enc_is_in_alt_mode(struct hpo_dp_link_encoder *enc);
void hpo_dp_link_encoder32_construct(struct dcn31_hpo_dp_link_encoder *enc31,
struct dc_context *ctx,
uint32_t inst,
@@ -61,4 +62,7 @@ void hpo_dp_link_encoder32_construct(struct dcn31_hpo_dp_link_encoder *enc31,
const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask);
+bool dcn32_hpo_dp_link_enc_is_in_alt_mode(
+ struct hpo_dp_link_encoder *enc);
+
#endif // __DAL_DCN32_HPO_DP_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c
index d738a36f2132..7847c1c4927b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c
@@ -679,24 +679,6 @@ void hubbub1_update_dchub(
dh_data->dchub_info_valid = false;
}
-void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
-{
- struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
-
- uint32_t watermark_change_req;
-
- REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
-
- if (watermark_change_req)
- watermark_change_req = 0;
- else
- watermark_change_req = 1;
-
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
-}
-
void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
index 9fbd45c7dfef..fa5c4c18ed59 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
@@ -489,9 +489,6 @@ void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow);
bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubub);
-void hubbub1_toggle_watermark_change_req(
- struct hubbub *hubbub);
-
void hubbub1_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
index 2546224b326a..e4496ad203b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
@@ -132,9 +132,9 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
-
+ if (!hubbub1->base.ctx->dc->config.skip_riommu_prefetch_wa) {
+ dcn21_dchvm_init(hubbub);
+ }
return hubbub1->num_vmid;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index fe741100c0f8..181a93dc46e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -129,7 +129,8 @@ bool hubbub3_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
@@ -439,6 +440,17 @@ void hubbub3_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg);
}
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ hubbub_reg_state->det0_ctrl = REG_READ(DCHUBBUB_DET0_CTRL);
+ hubbub_reg_state->det1_ctrl = REG_READ(DCHUBBUB_DET1_CTRL);
+ hubbub_reg_state->det2_ctrl = REG_READ(DCHUBBUB_DET2_CTRL);
+ hubbub_reg_state->det3_ctrl = REG_READ(DCHUBBUB_DET3_CTRL);
+ hubbub_reg_state->compbuf_ctrl = REG_READ(DCHUBBUB_COMPBUF_CTRL);
+}
+
static const struct hubbub_funcs hubbub30_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -456,6 +468,7 @@ static const struct hubbub_funcs hubbub30_funcs = {
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub3_construct(struct dcn20_hubbub *hubbub3,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
index ca6233e8f1f4..9e14de3ccaee 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
@@ -133,4 +133,6 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub,
void hubbub3_init_watermarks(struct hubbub *hubbub);
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index 7fb5523f9722..5a03758e3de6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -750,7 +750,8 @@ static bool hubbub31_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
@@ -932,8 +933,8 @@ int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
+ if (hubbub->funcs->dchvm_init)
+ hubbub->funcs->dchvm_init(hubbub);
return NUM_VMID;
}
@@ -1070,6 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,
+ .hubbub_read_reg_state = hubbub3_read_reg_state,
+ .dchvm_init = dcn21_dchvm_init
};
void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 5264dc26cce1..237331b35378 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -28,6 +28,7 @@
#include "dcn32_hubbub.h"
#include "dm_services.h"
#include "reg_helper.h"
+#include "dal_asic_id.h"
#define CTX \
@@ -72,6 +73,14 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
+static void hubbub32_set_sdp_control(struct hubbub *hubbub, bool dc_control)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
+ SDPIF_PORT_CONTROL, dc_control);
+}
+
void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -754,8 +763,18 @@ static bool hubbub32_program_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ struct dc *dc = hubbub->ctx->dc;
bool wm_pending = false;
+ if (!safe_to_lower && dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ /* before raising watermarks, SDP control give to DF, stutter must be disabled */
+ wm_pending = true;
+ hubbub32_set_sdp_control(hubbub, false);
+ hubbub1_allow_self_refresh_control(hubbub, false);
+ }
+
if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
@@ -786,9 +805,20 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower) {
+ /* after lowering watermarks, stutter setting is restored, SDP control given to DC */
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+
+ if (dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ hubbub32_set_sdp_control(hubbub, true);
+ }
+ } else if (dc->debug.disable_stutter) {
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ }
- hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ hubbub32_force_usr_retraining_allow(hubbub, dc->debug.force_usr_allow);
return wm_pending;
}
@@ -973,8 +1003,7 @@ void hubbub32_init(struct hubbub *hubbub)
ignore the "df_pre_cstate_req" from the SDP port control.
only the DCN will determine when to connect the SDP port
*/
- REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
- SDPIF_PORT_CONTROL, 1);
+ hubbub32_set_sdp_control(hubbub, true);
/*Set SDP's max outstanding request to 512
must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
@@ -1008,6 +1037,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.get_mall_en = hubbub32_get_mall_en,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 5eb3da8d5206..43ba399f4822 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -46,7 +46,7 @@
#define DCN35_CRB_SEGMENT_SIZE_KB 64
-static void dcn35_init_crb(struct hubbub *hubbub)
+void dcn35_init_crb(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -71,7 +71,7 @@ static void dcn35_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x5FF);
}
-static void dcn35_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
+void dcn35_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
unsigned int compbuf_size_segments = (compbuf_size_kb + DCN35_CRB_SEGMENT_SIZE_KB - 1) / DCN35_CRB_SEGMENT_SIZE_KB;
@@ -255,7 +255,7 @@ static bool hubbub35_program_stutter_z8_watermarks(
return wm_pending;
}
-static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
+void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
unsigned int dccg_ref_freq_inKhz,
unsigned int *dchub_ref_freq_inKhz)
{
@@ -295,7 +295,7 @@ static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
}
-static bool hubbub35_program_watermarks(
+bool hubbub35_program_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -326,7 +326,8 @@ static bool hubbub35_program_watermarks(
DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
@@ -334,7 +335,7 @@ static bool hubbub35_program_watermarks(
}
/* Copy values from WM set A to all other sets */
-static void hubbub35_init_watermarks(struct hubbub *hubbub)
+void hubbub35_init_watermarks(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t reg;
@@ -396,7 +397,7 @@ static void hubbub35_init_watermarks(struct hubbub *hubbub)
}
-static void hubbub35_wm_read_state(struct hubbub *hubbub,
+void hubbub35_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -513,7 +514,7 @@ static void hubbub35_set_fgcg(struct dcn20_hubbub *hubbub2, bool enable)
REG_UPDATE(DCHUBBUB_CLOCK_CNTL, DCHUBBUB_FGCG_REP_DIS, !enable);
}
-static void hubbub35_init(struct hubbub *hubbub)
+void hubbub35_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
/*Enable clock gaters*/
@@ -548,6 +549,55 @@ static void hubbub35_init(struct hubbub *hubbub)
memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate));
}
+void dcn35_dchvm_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
+
+ //Init DCHVM block
+ REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
+
+ //Poll until RIOMMU_ACTIVE = 1
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
+
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ // Disable gating and memory power requests
+ REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 1);
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 1,
+ HVM_DISPCLK_G_GATE_DIS, 1,
+ HVM_DCFCLK_R_GATE_DIS, 1,
+ HVM_DCFCLK_G_GATE_DIS, 1);
+
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+
+ //Enable memory power requests
+ REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 0);
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
+ hubbub->riommu_active = true;
+ }
+}
+
/*static void hubbub35_set_request_limit(struct hubbub *hubbub,
int memory_channel_count,
int words_per_channel)
@@ -588,6 +638,8 @@ static const struct hubbub_funcs hubbub35_funcs = {
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
+ .hubbub_read_reg_state = hubbub3_read_reg_state,
+ .dchvm_init = dcn35_dchvm_init
};
void hubbub35_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
index 54cf00ffceb8..9f65fff1bd4d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
@@ -152,4 +152,21 @@ void hubbub35_construct(struct dcn20_hubbub *hubbub2,
int det_size_kb,
int pixel_chunk_size_kb,
int config_return_buffer_size_kb);
+
+void hubbub35_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm);
+void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz);
+bool hubbub35_program_watermarks(
+ struct hubbub *hubbub,
+ union dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub35_init_watermarks(struct hubbub *hubbub);
+void dcn35_program_compbuf_size(struct hubbub *hubbub,
+ unsigned int compbuf_size_kb, bool safe_to_increase);
+void dcn35_init_crb(struct hubbub *hubbub);
+void hubbub35_init(struct hubbub *hubbub);
+void dcn35_dchvm_init(struct hubbub *hubbub);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 92fab471b183..d11afd1ce72a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1247,6 +1247,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn401_program_arbiter,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index f0ba944553df..6378e3fd7249 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -140,7 +140,7 @@ void hubp1_vready_workaround(struct hubp *hubp,
void hubp1_program_tiling(
struct hubp *hubp,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -546,10 +546,17 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
}
+void hubp_reset(struct hubp *hubp)
+{
+ memset(&hubp->pos, 0, sizeof(hubp->pos));
+ memset(&hubp->att, 0, sizeof(hubp->att));
+ hubp->cursor_offload = false;
+}
+
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -1351,8 +1358,9 @@ static void hubp1_wait_pipe_read_start(struct hubp *hubp)
void hubp1_init(struct hubp *hubp)
{
- //do nothing
+ hubp_reset(hubp);
}
+
static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
@@ -1365,6 +1373,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_hubp_blank_en = hubp1_set_hubp_blank_en,
.set_cursor_attributes = hubp1_cursor_set_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index 631350cd4f2e..f2571076fc50 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -104,7 +104,10 @@
SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
- SRI(HUBP_CLK_CNTL, HUBP, id)
+ SRI(HUBP_CLK_CNTL, HUBP, id),\
+ SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DCFCLK, HUBP, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DPPCLK, HUBP, id)
/* Register address initialization macro for ASICs with VM */
#define HUBP_REG_LIST_DCN_VM(id)\
@@ -249,7 +252,20 @@
uint32_t CURSOR_POSITION; \
uint32_t CURSOR_HOT_SPOT; \
uint32_t CURSOR_DST_OFFSET; \
- uint32_t HUBP_CLK_CNTL
+ uint32_t HUBP_CLK_CNTL; \
+ uint32_t HUBPRET_READ_LINE_VALUE; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DCFCLK; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DPPCLK; \
+ uint32_t HUBPRET_INTERRUPT; \
+ uint32_t HUBPRET_MEM_PWR_CTRL; \
+ uint32_t HUBPRET_MEM_PWR_STATUS; \
+ uint32_t HUBPRET_READ_LINE_CTRL0; \
+ uint32_t HUBPRET_READ_LINE_CTRL1; \
+ uint32_t HUBPRET_READ_LINE0; \
+ uint32_t HUBPRET_READ_LINE1; \
+ uint32_t HUBPREQ_MEM_PWR_CTRL; \
+ uint32_t HUBPREQ_MEM_PWR_STATUS
+
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -622,6 +638,8 @@
type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type PIPE_READ_LINE;\
+ type HUBP_SEG_ALLOC_ERR_STATUS;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -666,10 +684,147 @@ struct dcn_mi_mask {
DCN_HUBP_REG_FIELD_LIST(uint32_t);
};
+struct dcn_fl_regs_st {
+ uint32_t lut_enable;
+ uint32_t lut_done;
+ uint32_t lut_addr_mode;
+ uint32_t lut_width;
+ uint32_t lut_mpc_width;
+ uint32_t lut_tmz;
+ uint32_t lut_crossbar_sel_r;
+ uint32_t lut_crossbar_sel_g;
+ uint32_t lut_crossbar_sel_b;
+ uint32_t lut_addr_hi;
+ uint32_t lut_addr_lo;
+ uint32_t refcyc_3dlut_group;
+ uint32_t lut_fl_bias;
+ uint32_t lut_fl_scale;
+ uint32_t lut_fl_mode;
+ uint32_t lut_fl_format;
+};
+struct dcn_hubp_reg_state {
+ uint32_t hubp_cntl;
+ uint32_t mall_config;
+ uint32_t mall_sub_vp;
+ uint32_t hubp_req_size_config;
+ uint32_t hubp_req_size_config_c;
+ uint32_t vmpg_config;
+ uint32_t addr_config;
+ uint32_t pri_viewport_dimension;
+ uint32_t pri_viewport_dimension_c;
+ uint32_t pri_viewport_start;
+ uint32_t pri_viewport_start_c;
+ uint32_t sec_viewport_dimension;
+ uint32_t sec_viewport_dimension_c;
+ uint32_t sec_viewport_start;
+ uint32_t sec_viewport_start_c;
+ uint32_t surface_config;
+ uint32_t tiling_config;
+ uint32_t clk_cntl;
+ uint32_t mall_status;
+ uint32_t measure_win_ctrl_dcfclk;
+ uint32_t measure_win_ctrl_dppclk;
+
+ uint32_t blank_offset_0;
+ uint32_t blank_offset_1;
+ uint32_t cursor_settings;
+ uint32_t dcn_cur0_ttu_cntl0;
+ uint32_t dcn_cur0_ttu_cntl1;
+ uint32_t dcn_cur1_ttu_cntl0;
+ uint32_t dcn_cur1_ttu_cntl1;
+ uint32_t dcn_dmdat_vm_cntl;
+ uint32_t dcn_expansion_mode;
+ uint32_t dcn_global_ttu_cntl;
+ uint32_t dcn_surf0_ttu_cntl0;
+ uint32_t dcn_surf0_ttu_cntl1;
+ uint32_t dcn_surf1_ttu_cntl0;
+ uint32_t dcn_surf1_ttu_cntl1;
+ uint32_t dcn_ttu_qos_wm;
+ uint32_t dcn_vm_mx_l1_tlb_cntl;
+ uint32_t dcn_vm_system_aperture_high_addr;
+ uint32_t dcn_vm_system_aperture_low_addr;
+ uint32_t dcsurf_flip_control;
+ uint32_t dcsurf_flip_control2;
+ uint32_t dcsurf_primary_meta_surface_address;
+ uint32_t dcsurf_primary_meta_surface_address_c;
+ uint32_t dcsurf_primary_meta_surface_address_high;
+ uint32_t dcsurf_primary_meta_surface_address_high_c;
+ uint32_t dcsurf_primary_surface_address;
+ uint32_t dcsurf_primary_surface_address_c;
+ uint32_t dcsurf_primary_surface_address_high;
+ uint32_t dcsurf_primary_surface_address_high_c;
+ uint32_t dcsurf_secondary_meta_surface_address;
+ uint32_t dcsurf_secondary_meta_surface_address_c;
+ uint32_t dcsurf_secondary_meta_surface_address_high;
+ uint32_t dcsurf_secondary_meta_surface_address_high_c;
+ uint32_t dcsurf_secondary_surface_address;
+ uint32_t dcsurf_secondary_surface_address_c;
+ uint32_t dcsurf_secondary_surface_address_high;
+ uint32_t dcsurf_secondary_surface_address_high_c;
+ uint32_t dcsurf_surface_control;
+ uint32_t dcsurf_surface_earliest_inuse;
+ uint32_t dcsurf_surface_earliest_inuse_c;
+ uint32_t dcsurf_surface_earliest_inuse_high;
+ uint32_t dcsurf_surface_earliest_inuse_high_c;
+ uint32_t dcsurf_surface_flip_interrupt;
+ uint32_t dcsurf_surface_inuse;
+ uint32_t dcsurf_surface_inuse_c;
+ uint32_t dcsurf_surface_inuse_high;
+ uint32_t dcsurf_surface_inuse_high_c;
+ uint32_t dcsurf_surface_pitch;
+ uint32_t dcsurf_surface_pitch_c;
+ uint32_t dst_after_scaler;
+ uint32_t dst_dimensions;
+ uint32_t dst_y_delta_drq_limit;
+ uint32_t flip_parameters_0;
+ uint32_t flip_parameters_1;
+ uint32_t flip_parameters_2;
+ uint32_t flip_parameters_3;
+ uint32_t flip_parameters_4;
+ uint32_t flip_parameters_5;
+ uint32_t flip_parameters_6;
+ uint32_t hubpreq_mem_pwr_ctrl;
+ uint32_t hubpreq_mem_pwr_status;
+ uint32_t nom_parameters_0;
+ uint32_t nom_parameters_1;
+ uint32_t nom_parameters_2;
+ uint32_t nom_parameters_3;
+ uint32_t nom_parameters_4;
+ uint32_t nom_parameters_5;
+ uint32_t nom_parameters_6;
+ uint32_t nom_parameters_7;
+ uint32_t per_line_delivery;
+ uint32_t per_line_delivery_pre;
+ uint32_t prefetch_settings;
+ uint32_t prefetch_settings_c;
+ uint32_t ref_freq_to_pix_freq;
+ uint32_t uclk_pstate_force;
+ uint32_t vblank_parameters_0;
+ uint32_t vblank_parameters_1;
+ uint32_t vblank_parameters_2;
+ uint32_t vblank_parameters_3;
+ uint32_t vblank_parameters_4;
+ uint32_t vblank_parameters_5;
+ uint32_t vblank_parameters_6;
+ uint32_t vmid_settings_0;
+
+ uint32_t hubpret_control;
+ uint32_t hubpret_interrupt;
+ uint32_t hubpret_mem_pwr_ctrl;
+ uint32_t hubpret_mem_pwr_status;
+ uint32_t hubpret_read_line_ctrl0;
+ uint32_t hubpret_read_line_ctrl1;
+ uint32_t hubpret_read_line_status;
+ uint32_t hubpret_read_line_value;
+ uint32_t hubpret_read_line0;
+ uint32_t hubpret_read_line1;
+};
+
struct dcn_hubp_state {
struct _vcs_dpi_display_dlg_regs_st dlg_attr;
struct _vcs_dpi_display_ttu_regs_st ttu_attr;
struct _vcs_dpi_display_rq_regs_st rq_regs;
+ struct dcn_fl_regs_st fl_regs;
uint32_t pixel_format;
uint32_t inuse_addr_hi;
uint32_t inuse_addr_lo;
@@ -694,7 +849,6 @@ struct dcn_hubp_state {
uint32_t hubp_cntl;
uint32_t flip_control;
};
-
struct dcn10_hubp {
struct hubp base;
struct dcn_hubp_state state;
@@ -706,7 +860,7 @@ struct dcn10_hubp {
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -739,13 +893,15 @@ void hubp1_program_rotation(
void hubp1_program_tiling(
struct hubp *hubp,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp1_dcc_control(struct hubp *hubp,
bool enable,
enum hubp_ind_block_size independent_64b_blks);
+void hubp_reset(struct hubp *hubp);
+
bool hubp1_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 200194544bf0..92288de4cc10 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -310,7 +310,7 @@ void hubp2_setup_interdependent(
*/
static void hubp2_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_3(DCSURF_ADDR_CONFIG,
@@ -550,7 +550,7 @@ void hubp2_program_pixel_format(
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -613,26 +613,28 @@ void hubp2_cursor_set_attributes(
hubp->curs_attr = *attr;
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+ }
hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
hubp->att.SURFACE_ADDR = attr->address.low_part;
@@ -1058,22 +1060,29 @@ void hubp2_cursor_set_position(
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+ bool cursor_not_programmed = hubp->att.SURFACE_ADDR == 0 && hubp->att.SURFACE_ADDR_HIGH == 0;
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (cur_en && cursor_not_programmed)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en);
+ }
+
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
@@ -1674,6 +1683,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp2_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 7fd9240868c3..7062e6653062 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -145,7 +145,8 @@
uint32_t FLIP_PARAMETERS_2;\
uint32_t DCN_CUR1_TTU_CNTL0;\
uint32_t DCN_CUR1_TTU_CNTL1;\
- uint32_t VMID_SETTINGS_0
+ uint32_t VMID_SETTINGS_0;\
+ uint32_t DST_Y_DELTA_DRQ_LIMIT
/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
@@ -176,7 +177,10 @@
uint32_t HUBP_3DLUT_CONTROL;\
uint32_t HUBP_3DLUT_DLG_PARAM;\
uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\
- uint32_t DCHUBP_MCACHEID_CONFIG
+ uint32_t DCHUBP_MCACHEID_CONFIG;\
+ uint32_t DCHUBP_MALL_SUB_VP;\
+ uint32_t DCHUBP_ADDR_CONFIG;\
+ uint32_t HUBP_MALL_STATUS
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
@@ -264,6 +268,7 @@
type HUBP_3DLUT_DONE;\
type HUBP_3DLUT_ADDRESSING_MODE;\
type HUBP_3DLUT_WIDTH;\
+ type HUBP_3DLUT_MPC_WIDTH;\
type HUBP_3DLUT_TMZ;\
type HUBP_3DLUT_CROSSBAR_SELECT_Y_G;\
type HUBP_3DLUT_CROSSBAR_SELECT_CB_B;\
@@ -280,9 +285,8 @@
type MCACHEID_MALL_PREF_1H_P0;\
type MCACHEID_MALL_PREF_2H_P0;\
type MCACHEID_MALL_PREF_1H_P1;\
- type MCACHEID_MALL_PREF_2H_P1
-
-
+ type MCACHEID_MALL_PREF_2H_P1;\
+ type HUBP_FGCG_REP_DIS
struct dcn_hubp2_registers {
DCN401_HUBP_REG_COMMON_VARIABLE_LIST;
@@ -382,7 +386,7 @@ void hubp2_program_pixel_format(
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
index d910e4a54c34..ec88ee424a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
@@ -42,7 +42,7 @@
static void hubp201_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -121,6 +121,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
.set_cursor_position = hubp1_cursor_set_position,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index edbdb8c88d5c..08ea0a1b9e7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -73,8 +73,6 @@
* On any mode switch, if the new reg values are smaller than the current values,
* then update the regs with the new values.
*
- * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142
- *
*/
void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct hubp *hubp,
@@ -811,6 +809,8 @@ static void hubp21_init(struct hubp *hubp)
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+
+ hubp_reset(hubp);
}
static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
@@ -823,6 +823,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = hubp21_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 3b16c3cda2c3..0cc6f4558989 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -318,7 +318,7 @@ bool hubp3_program_surface_flip_and_addr(
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_4(DCSURF_ADDR_CONFIG,
@@ -411,7 +411,7 @@ void hubp3_dmdata_set_attributes(
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -476,6 +476,126 @@ void hubp3_read_state(struct hubp *hubp)
}
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ reg_state->hubp_cntl = REG_READ(DCHUBP_CNTL);
+ reg_state->mall_config = REG_READ(DCHUBP_MALL_CONFIG);
+ reg_state->mall_sub_vp = REG_READ(DCHUBP_MALL_SUB_VP);
+ reg_state->hubp_req_size_config = REG_READ(DCHUBP_REQ_SIZE_CONFIG);
+ reg_state->hubp_req_size_config_c = REG_READ(DCHUBP_REQ_SIZE_CONFIG_C);
+ reg_state->vmpg_config = REG_READ(DCHUBP_VMPG_CONFIG);
+ reg_state->addr_config = REG_READ(DCSURF_ADDR_CONFIG);
+ reg_state->pri_viewport_dimension = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION);
+ reg_state->pri_viewport_dimension_c = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION_C);
+ reg_state->pri_viewport_start = REG_READ(DCSURF_PRI_VIEWPORT_START);
+ reg_state->pri_viewport_start_c = REG_READ(DCSURF_PRI_VIEWPORT_START_C);
+ reg_state->sec_viewport_dimension = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION);
+ reg_state->sec_viewport_dimension_c = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION_C);
+ reg_state->sec_viewport_start = REG_READ(DCSURF_SEC_VIEWPORT_START);
+ reg_state->sec_viewport_start_c = REG_READ(DCSURF_SEC_VIEWPORT_START_C);
+ reg_state->surface_config = REG_READ(DCSURF_SURFACE_CONFIG);
+ reg_state->tiling_config = REG_READ(DCSURF_TILING_CONFIG);
+ reg_state->clk_cntl = REG_READ(HUBP_CLK_CNTL);
+ reg_state->mall_status = REG_READ(HUBP_MALL_STATUS);
+ reg_state->measure_win_ctrl_dcfclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DCFCLK);
+ reg_state->measure_win_ctrl_dppclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DPPCLK);
+
+ reg_state->blank_offset_0 = REG_READ(BLANK_OFFSET_0);
+ reg_state->blank_offset_1 = REG_READ(BLANK_OFFSET_1);
+ reg_state->cursor_settings = REG_READ(CURSOR_SETTINGS);
+ reg_state->dcn_cur0_ttu_cntl0 = REG_READ(DCN_CUR0_TTU_CNTL0);
+ reg_state->dcn_cur0_ttu_cntl1 = REG_READ(DCN_CUR0_TTU_CNTL1);
+ reg_state->dcn_cur1_ttu_cntl0 = REG_READ(DCN_CUR1_TTU_CNTL0);
+ reg_state->dcn_cur1_ttu_cntl1 = REG_READ(DCN_CUR1_TTU_CNTL1);
+ reg_state->dcn_dmdat_vm_cntl = REG_READ(DCN_DMDATA_VM_CNTL);
+ reg_state->dcn_expansion_mode = REG_READ(DCN_EXPANSION_MODE);
+ reg_state->dcn_global_ttu_cntl = REG_READ(DCN_GLOBAL_TTU_CNTL);
+ reg_state->dcn_surf0_ttu_cntl0 = REG_READ(DCN_SURF0_TTU_CNTL0);
+ reg_state->dcn_surf0_ttu_cntl1 = REG_READ(DCN_SURF0_TTU_CNTL1);
+ reg_state->dcn_surf1_ttu_cntl0 = REG_READ(DCN_SURF1_TTU_CNTL0);
+ reg_state->dcn_surf1_ttu_cntl1 = REG_READ(DCN_SURF1_TTU_CNTL1);
+ reg_state->dcn_ttu_qos_wm = REG_READ(DCN_TTU_QOS_WM);
+ reg_state->dcn_vm_mx_l1_tlb_cntl = REG_READ(DCN_VM_MX_L1_TLB_CNTL);
+ reg_state->dcn_vm_system_aperture_high_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR);
+ reg_state->dcn_vm_system_aperture_low_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_LOW_ADDR);
+ reg_state->dcsurf_flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+ reg_state->dcsurf_flip_control2 = REG_READ(DCSURF_FLIP_CONTROL2);
+ reg_state->dcsurf_primary_meta_surface_address = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_meta_surface_address_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_meta_surface_address_high = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_meta_surface_address_high_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_primary_surface_address = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_surface_address_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_surface_address_high = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_surface_address_high_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_meta_surface_address = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_meta_surface_address_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_meta_surface_address_high = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_meta_surface_address_high_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_surface_address = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_surface_address_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_surface_address_high = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_surface_address_high_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_surface_control = REG_READ(DCSURF_SURFACE_CONTROL);
+ reg_state->dcsurf_surface_earliest_inuse = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE);
+ reg_state->dcsurf_surface_earliest_inuse_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_C);
+ reg_state->dcsurf_surface_earliest_inuse_high = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH);
+ reg_state->dcsurf_surface_earliest_inuse_high_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_flip_interrupt = REG_READ(DCSURF_SURFACE_FLIP_INTERRUPT);
+ reg_state->dcsurf_surface_inuse = REG_READ(DCSURF_SURFACE_INUSE);
+ reg_state->dcsurf_surface_inuse_c = REG_READ(DCSURF_SURFACE_INUSE_C);
+ reg_state->dcsurf_surface_inuse_high = REG_READ(DCSURF_SURFACE_INUSE_HIGH);
+ reg_state->dcsurf_surface_inuse_high_c = REG_READ(DCSURF_SURFACE_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_pitch = REG_READ(DCSURF_SURFACE_PITCH);
+ reg_state->dcsurf_surface_pitch_c = REG_READ(DCSURF_SURFACE_PITCH_C);
+ reg_state->dst_after_scaler = REG_READ(DST_AFTER_SCALER);
+ reg_state->dst_dimensions = REG_READ(DST_DIMENSIONS);
+ reg_state->dst_y_delta_drq_limit = REG_READ(DST_Y_DELTA_DRQ_LIMIT);
+ reg_state->flip_parameters_0 = REG_READ(FLIP_PARAMETERS_0);
+ reg_state->flip_parameters_1 = REG_READ(FLIP_PARAMETERS_1);
+ reg_state->flip_parameters_2 = REG_READ(FLIP_PARAMETERS_2);
+ reg_state->flip_parameters_3 = REG_READ(FLIP_PARAMETERS_3);
+ reg_state->flip_parameters_4 = REG_READ(FLIP_PARAMETERS_4);
+ reg_state->flip_parameters_5 = REG_READ(FLIP_PARAMETERS_5);
+ reg_state->flip_parameters_6 = REG_READ(FLIP_PARAMETERS_6);
+ reg_state->hubpreq_mem_pwr_ctrl = REG_READ(HUBPREQ_MEM_PWR_CTRL);
+ reg_state->hubpreq_mem_pwr_status = REG_READ(HUBPREQ_MEM_PWR_STATUS);
+ reg_state->nom_parameters_0 = REG_READ(NOM_PARAMETERS_0);
+ reg_state->nom_parameters_1 = REG_READ(NOM_PARAMETERS_1);
+ reg_state->nom_parameters_2 = REG_READ(NOM_PARAMETERS_2);
+ reg_state->nom_parameters_3 = REG_READ(NOM_PARAMETERS_3);
+ reg_state->nom_parameters_4 = REG_READ(NOM_PARAMETERS_4);
+ reg_state->nom_parameters_5 = REG_READ(NOM_PARAMETERS_5);
+ reg_state->nom_parameters_6 = REG_READ(NOM_PARAMETERS_6);
+ reg_state->nom_parameters_7 = REG_READ(NOM_PARAMETERS_7);
+ reg_state->per_line_delivery = REG_READ(PER_LINE_DELIVERY);
+ reg_state->per_line_delivery_pre = REG_READ(PER_LINE_DELIVERY_PRE);
+ reg_state->prefetch_settings = REG_READ(PREFETCH_SETTINGS);
+ reg_state->prefetch_settings_c = REG_READ(PREFETCH_SETTINGS_C);
+ reg_state->ref_freq_to_pix_freq = REG_READ(REF_FREQ_TO_PIX_FREQ);
+ reg_state->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE);
+ reg_state->vblank_parameters_0 = REG_READ(VBLANK_PARAMETERS_0);
+ reg_state->vblank_parameters_1 = REG_READ(VBLANK_PARAMETERS_1);
+ reg_state->vblank_parameters_2 = REG_READ(VBLANK_PARAMETERS_2);
+ reg_state->vblank_parameters_3 = REG_READ(VBLANK_PARAMETERS_3);
+ reg_state->vblank_parameters_4 = REG_READ(VBLANK_PARAMETERS_4);
+ reg_state->vblank_parameters_5 = REG_READ(VBLANK_PARAMETERS_5);
+ reg_state->vblank_parameters_6 = REG_READ(VBLANK_PARAMETERS_6);
+ reg_state->vmid_settings_0 = REG_READ(VMID_SETTINGS_0);
+ reg_state->hubpret_control = REG_READ(HUBPRET_CONTROL);
+ reg_state->hubpret_interrupt = REG_READ(HUBPRET_INTERRUPT);
+ reg_state->hubpret_mem_pwr_ctrl = REG_READ(HUBPRET_MEM_PWR_CTRL);
+ reg_state->hubpret_mem_pwr_status = REG_READ(HUBPRET_MEM_PWR_STATUS);
+ reg_state->hubpret_read_line_ctrl0 = REG_READ(HUBPRET_READ_LINE_CTRL0);
+ reg_state->hubpret_read_line_ctrl1 = REG_READ(HUBPRET_READ_LINE_CTRL1);
+ reg_state->hubpret_read_line_status = REG_READ(HUBPRET_READ_LINE_STATUS);
+ reg_state->hubpret_read_line_value = REG_READ(HUBPRET_READ_LINE_VALUE);
+ reg_state->hubpret_read_line0 = REG_READ(HUBPRET_READ_LINE0);
+ reg_state->hubpret_read_line1 = REG_READ(HUBPRET_READ_LINE1);
+}
+
void hubp3_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
@@ -499,6 +619,10 @@ void hubp3_init(struct hubp *hubp)
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
+
+ hubp_reset(hubp);
}
static struct hubp_funcs dcn30_hubp_funcs = {
@@ -513,6 +637,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -529,6 +654,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index cfb01bf340a1..c767e9f4f9b3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -243,7 +243,8 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh)
bool hubp3_construct(
struct dcn20_hubp *hubp2,
@@ -264,7 +265,7 @@ bool hubp3_program_surface_flip_and_addr(
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -280,7 +281,7 @@ void hubp3_setup(
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp3_dcc_control(struct hubp *hubp, bool enable,
@@ -295,10 +296,17 @@ void hubp3_dmdata_set_attributes(
void hubp3_read_state(struct hubp *hubp);
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
+
void hubp3_init(struct hubp *hubp);
void hubp3_clear_tiling(struct hubp *hubp);
+uint32_t hubp3_get_current_read_line(struct hubp *hubp);
+
+uint32_t hubp3_get_underflow_status(struct hubp *hubp);
+
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 46b804ed05fb..189045f85039 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -44,7 +44,7 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable)
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable);
- REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable);
+ REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, 1);
}
void hubp31_soft_reset(struct hubp *hubp, bool reset)
@@ -68,6 +68,18 @@ void hubp31_program_extended_blank_value(
hubp31_program_extended_blank(hubp, min_dst_y_next_start_optimized);
}
+uint32_t hubp31_get_det_config_error(struct hubp *hubp)
+{
+ uint32_t config_error = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_SEG_ALLOC_ERR_STATUS,
+ &config_error);
+
+ return config_error;
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -79,6 +91,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -97,6 +110,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
index d688db79b750..5952c4671507 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
@@ -228,7 +228,9 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
bool hubp31_construct(
@@ -246,4 +248,6 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable);
void hubp31_program_extended_blank_value(
struct hubp *hubp, unsigned int min_dst_y_next_start_optimized);
+uint32_t hubp31_get_det_config_error(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index 8b5bd73b8094..a781085b046b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -118,29 +118,7 @@ void hubp32_cursor_set_attributes(
uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
uint32_t cursor_height = attr->height;
uint32_t cursor_size = cursor_width * cursor_height;
-
- hubp->curs_attr = *attr;
-
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ bool use_mall_for_cursor;
switch (attr->color_format) {
case CURSOR_MODE_MONO:
@@ -158,16 +136,56 @@ void hubp32_cursor_set_attributes(
cursor_size *= 8;
break;
}
+ use_mall_for_cursor = cursor_size > 16384 ? 1 : 0;
+
+ hubp->curs_attr = *attr;
- if (cursor_size > 16384)
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
- else
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, use_mall_for_cursor);
+ }
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+
+ hubp->cur_rect.w = attr->width;
+ hubp->cur_rect.h = attr->height;
+
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
+ hubp->use_mall_for_cursor = use_mall_for_cursor;
}
void hubp32_init(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
}
static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
@@ -181,6 +199,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -203,6 +222,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index eb62042dfafc..79c583e258c7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -45,7 +45,7 @@ void hubp35_set_fgcg(struct hubp *hubp, bool enable)
REG_UPDATE(HUBP_CLK_CNTL, HUBP_FGCG_REP_DIS, !enable);
}
-static void hubp35_init(struct hubp *hubp)
+void hubp35_init(struct hubp *hubp)
{
hubp3_init(hubp);
@@ -172,7 +172,7 @@ void hubp35_program_pixel_format(
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -199,6 +199,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -208,6 +209,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp3_read_state,
+ .hubp_read_reg_state = hubp3_read_reg_state,
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp35_init,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
index 586b43aa5834..934836717f32 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
@@ -65,11 +65,12 @@ void hubp35_program_pixel_format(
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level);
+void hubp35_init(struct hubp *hubp);
#endif /* __DC_HUBP_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 09f730cfbf8e..f01eae50d02f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
}
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
+ REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits);
}
void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
@@ -127,6 +127,43 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format);
}
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1};
+ uint32_t width = {cfg->width};
+
+ if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR)
+ width = (cfg->width == 17) ? 4916 : 35940;
+
+ REG_UPDATE_2(_3DLUT_FL_CONFIG,
+ HUBP0_3DLUT_FL_MODE, cfg->mode,
+ HUBP0_3DLUT_FL_FORMAT, cfg->format);
+
+ REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE,
+ HUBP0_3DLUT_FL_BIAS, cfg->bias,
+ HUBP0_3DLUT_FL_SCALE, cfg->scale);
+
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH,
+ HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part);
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW,
+ HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part);
+
+ //cross bar
+ REG_UPDATE_8(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_MPC_WIDTH, mpc_width,
+ HUBP_3DLUT_WIDTH, width,
+ HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r,
+ HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g,
+ HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b,
+ HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode,
+ HUBP_3DLUT_TMZ, cfg->protection_bits,
+ HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0);
+}
+
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -141,34 +178,48 @@ void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor
void hubp401_init(struct hubp *hubp)
{
- //For now nothing to do, HUBPREQ_DEBUG_DB register is removed on DCN4x.
+ hubp_reset(hubp);
}
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing)
{
- uint32_t value = 0;
+ unsigned int vstartup_lines = pipe_global_sync->dcn4x.vstartup_lines;
+ unsigned int vupdate_offset_pixels = pipe_global_sync->dcn4x.vupdate_offset_pixels;
+ unsigned int vupdate_width_pixels = pipe_global_sync->dcn4x.vupdate_vupdate_width_pixels;
+ unsigned int vready_offset_pixels = pipe_global_sync->dcn4x.vready_offset_pixels;
+ unsigned int htotal = timing->h_total;
+ unsigned int vblank_start = 0;
+ unsigned int vblank_end = 0;
+ unsigned int pixel_width = 0;
+ uint32_t reg_value = 0;
+ bool is_vready_at_or_after_vsync = false;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
/*
* if (VSTARTUP_START - (VREADY_OFFSET+VUPDATE_WIDTH+VUPDATE_OFFSET)/htotal) <= OTG_V_BLANK_END
* Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 1
* else
* Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if (pipe_dest->htotal != 0) {
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (htotal != 0) {
+ vblank_start = timing->v_total - timing->v_front_porch;
+ vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
+ pixel_width = vready_offset_pixels + vupdate_width_pixels + vupdate_offset_pixels;
+
+ is_vready_at_or_after_vsync = (vstartup_lines - pixel_width / htotal) <= vblank_end;
+
+ if (is_vready_at_or_after_vsync)
+ reg_value = 1;
}
- REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, reg_value);
}
void hubp401_program_requestor(
struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs)
+ struct dml2_display_rq_regs *rq_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -196,8 +247,8 @@ void hubp401_program_requestor(
void hubp401_program_deadline(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+ struct dml2_display_dlg_regs *dlg_attr,
+ struct dml2_display_ttu_regs *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -294,66 +345,64 @@ void hubp401_program_deadline(
void hubp401_setup(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
- struct _vcs_dpi_display_rq_regs_st *rq_regs,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing)
{
/* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
- hubp401_vready_at_or_After_vsync(hubp, pipe_dest);
- hubp401_program_requestor(hubp, rq_regs);
- hubp401_program_deadline(hubp, dlg_attr, ttu_attr);
+ hubp401_vready_at_or_After_vsync(hubp, pipe_global_sync, timing);
+ hubp401_program_requestor(hubp, &pipe_regs->rq_regs);
+ hubp401_program_deadline(hubp, &pipe_regs->dlg_regs, &pipe_regs->ttu_regs);
}
void hubp401_setup_interdependent(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+ struct dml2_dchub_per_pipe_register_set *pipe_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_SET_2(PREFETCH_SETTINGS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+ DST_Y_PREFETCH, pipe_regs->dlg_regs.dst_y_prefetch,
+ VRATIO_PREFETCH, pipe_regs->dlg_regs.vratio_prefetch);
REG_SET(PREFETCH_SETTINGS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+ VRATIO_PREFETCH_C, pipe_regs->dlg_regs.vratio_prefetch_c);
REG_SET_2(VBLANK_PARAMETERS_0, 0,
- DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
- DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+ DST_Y_PER_VM_VBLANK, pipe_regs->dlg_regs.dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, pipe_regs->dlg_regs.dst_y_per_row_vblank);
REG_SET_2(FLIP_PARAMETERS_0, 0,
- DST_Y_PER_VM_FLIP, dlg_attr->dst_y_per_vm_flip,
- DST_Y_PER_ROW_FLIP, dlg_attr->dst_y_per_row_flip);
+ DST_Y_PER_VM_FLIP, pipe_regs->dlg_regs.dst_y_per_vm_flip,
+ DST_Y_PER_ROW_FLIP, pipe_regs->dlg_regs.dst_y_per_row_flip);
REG_SET(VBLANK_PARAMETERS_3, 0,
- REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+ REFCYC_PER_META_CHUNK_VBLANK_L, pipe_regs->dlg_regs.refcyc_per_meta_chunk_vblank_l);
REG_SET(VBLANK_PARAMETERS_4, 0,
- REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+ REFCYC_PER_META_CHUNK_VBLANK_C, pipe_regs->dlg_regs.refcyc_per_meta_chunk_vblank_c);
REG_SET(FLIP_PARAMETERS_2, 0,
- REFCYC_PER_META_CHUNK_FLIP_L, dlg_attr->refcyc_per_meta_chunk_flip_l);
+ REFCYC_PER_META_CHUNK_FLIP_L, pipe_regs->dlg_regs.refcyc_per_meta_chunk_flip_l);
REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
- REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
- REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+ REFCYC_PER_LINE_DELIVERY_PRE_L, pipe_regs->dlg_regs.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, pipe_regs->dlg_regs.refcyc_per_line_delivery_pre_c);
REG_SET(DCN_SURF0_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_l);
+ pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_l);
REG_SET(DCN_SURF1_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_c);
+ pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_c);
REG_SET(DCN_CUR0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+ REFCYC_PER_REQ_DELIVERY_PRE, pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_cur0);
REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
- MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
- QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+ MIN_TTU_VBLANK, pipe_regs->ttu_regs.min_ttu_vblank,
+ QoS_LEVEL_FLIP, pipe_regs->ttu_regs.qos_level_flip);
}
@@ -532,7 +581,7 @@ void hubp401_dcc_control(struct hubp *hubp,
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
/* DCSURF_ADDR_CONFIG still shows up in reg spec, but does not need to be programmed for DCN4x
@@ -580,7 +629,7 @@ void hubp401_program_size(
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -730,23 +779,27 @@ void hubp401_cursor_set_position(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
-
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, x_pos,
- CURSOR_Y_POSITION, y_pos);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+ }
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, x_pos,
+ CURSOR_Y_POSITION, y_pos);
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
hubp->pos.position.bits.x_pos = pos->x;
@@ -981,11 +1034,12 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_surface_flip_and_addr = hubp401_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp401_program_surface_config,
.hubp_is_flip_pending = hubp2_is_flip_pending,
- .hubp_setup = hubp401_setup,
- .hubp_setup_interdependent = hubp401_setup_interdependent,
+ .hubp_setup2 = hubp401_setup,
+ .hubp_setup_interdependent2 = hubp401_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = hubp401_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
.set_cursor_position = hubp401_cursor_set_position,
@@ -1017,7 +1071,9 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
- .hubp_clear_tiling = hubp2_clear_tiling,
+ .hubp_clear_tiling = hubp401_clear_tiling,
+ .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 9b200a55bf9d..4570b8016de5 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -31,7 +31,7 @@
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
#include "dcn32/dcn32_hubp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
#define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\
SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\
@@ -252,33 +252,21 @@
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P1, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
-void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
-
-void hubp401_program_requestor(
- struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs);
-
-void hubp401_program_deadline(
- struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
-
void hubp401_setup(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
- struct _vcs_dpi_display_rq_regs_st *rq_regs,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
void hubp401_setup_interdependent(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+ struct dml2_dchub_per_pipe_register_set *pipe_regs);
bool hubp401_program_surface_flip_and_addr(
struct hubp *hubp,
@@ -290,7 +278,7 @@ void hubp401_dcc_control(struct hubp *hubp,
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp401_program_size(
@@ -302,7 +290,7 @@ void hubp401_program_size(
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -347,7 +335,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits);
void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
@@ -363,6 +351,23 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg);
+
void hubp401_clear_tiling(struct hubp *hubp);
+void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
+
+void hubp401_program_requestor(
+ struct hubp *hubp,
+ struct dml2_display_rq_regs *rq_regs);
+
+void hubp401_program_deadline(
+ struct hubp *hubp,
+ struct dml2_display_dlg_regs *dlg_attr,
+ struct dml2_display_ttu_regs *ttu_attr);
+
#endif /* __DC_HUBP_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 40ecebea1ba0..bee617ca0838 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+HWSS_DCE60 = dce60_hwseq.o
+
+AMD_DAL_HWSS_DCE60 = $(addprefix $(AMDDALPATH)/dc/hwss/dce60/,$(HWSS_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE60)
+endif
+
+###############################################################################
+
+HWSS_DCE80 = dce80_hwseq.o
+
+AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
+
+###############################################################################
+
HWSS_DCE = dce_hwseq.o
AMD_DAL_HWSS_DCE = $(addprefix $(AMDDALPATH)/dc/hwss/dce/,$(HWSS_DCE))
@@ -65,14 +83,6 @@ AMD_DAL_HWSS_DCE120 = $(addprefix $(AMDDALPATH)/dc/hwss/dce120/,$(HWSS_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE120)
-###############################################################################
-
-HWSS_DCE80 = dce80_hwseq.o
-
-AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
index 84c8f8707c5d..f66a38f43a09 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
@@ -644,10 +644,18 @@ struct dce_hwseq_registers {
uint32_t DPP_TOP0_DPP_CRC_CTRL;
uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
uint32_t DPP_TOP0_DPP_CRC_VAL_B_A;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_R;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_G;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_B;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_A;
uint32_t MPC_CRC_CTRL;
uint32_t MPC_CRC_RESULT_GB;
uint32_t MPC_CRC_RESULT_C;
uint32_t MPC_CRC_RESULT_AR;
+ uint32_t MPC_CRC_RESULT_R;
+ uint32_t MPC_CRC_RESULT_G;
+ uint32_t MPC_CRC_RESULT_B;
+ uint32_t MPC_CRC_RESULT_A;
uint32_t D1VGA_CONTROL;
uint32_t D2VGA_CONTROL;
uint32_t D3VGA_CONTROL;
@@ -1236,6 +1244,7 @@ struct dce_hwseq_registers {
type DOMAIN24_PGFSM_PWR_STATUS; \
type DOMAIN25_PGFSM_PWR_STATUS; \
type DOMAIN_DESIRED_PWR_STATE;
+
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
HWSEQ_DCN_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c
index f1f14796a3da..0d7e28260db1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c
@@ -138,5 +138,35 @@ void dce100_hw_sequencer_construct(struct dc *dc)
dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
+ dc->hwss.clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling;
+}
+
+/**
+ * dce100_reset_surface_dcc_and_tiling - Set DCC and tiling in DCE to their disable mode.
+ *
+ * @pipe_ctx: Pointer to the pipe context structure.
+ * @plane_state: Surface state
+ * @clear_tiling: If true set tiling to Linear, otherwise does not change tiling
+ *
+ * This function is responsible for call the HUBP block to disable DCC and set
+ * tiling to the linear mode.
+ */
+void dce100_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state,
+ bool clear_tiling)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+
+ if (!mi)
+ return;
+
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && mi->funcs->mem_input_clear_tiling)
+ mi->funcs->mem_input_clear_tiling(mi);
+
+ /* force page flip to see the new content of the framebuffer */
+ mi->funcs->mem_input_program_surface_flip_and_addr(mi,
+ &plane_state->address,
+ true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h
index 34518da20009..fadfa794f96b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h
@@ -46,5 +46,9 @@ bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
struct dc_bios *dcb,
enum pipe_gating_control power_gating);
+void dce100_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state,
+ bool clear_tiling);
+
#endif /* __DC_HWSS_DCE100_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 81f4c386c287..8fe399939220 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -33,6 +33,7 @@
#include "dce110_hwseq.h"
#include "dce110/dce110_timing_generator.h"
#include "dce/dce_hwseq.h"
+#include "dce100/dce100_hwseq.h"
#include "gpio_service_interface.h"
#include "dce110/dce110_compressor.h"
@@ -47,7 +48,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dccg.h"
#include "clock_source.h"
#include "clk_mgr.h"
@@ -658,6 +659,20 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
}
}
+static void
+dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_encoder_control encoder_control = {0};
+
+ encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE;
+ encoder_control.engine_id = link->link_enc->analog_engine;
+ encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+
+ bios->funcs->encoder_control(bios, &encoder_control);
+}
+
void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
@@ -687,6 +702,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
early_control = lane_count;
tg->funcs->set_early_control(tg, early_control);
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, true);
}
static enum bp_result link_transmitter_control(
@@ -951,8 +969,8 @@ void dce110_edp_backlight_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
uint8_t pwrseq_instance = 0;
- unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
- unsigned int post_T7_delay = OLED_POST_T7_DELAY;
+ unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0);
+ unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0);
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@@ -1065,9 +1083,11 @@ void dce110_edp_backlight_control(
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
- if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
+ if (!enable) {
+ /*follow oem panel config's requirement*/
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
- msleep(pre_T11_delay);
+ if (pre_T11_delay)
+ msleep(pre_T11_delay);
}
}
@@ -1082,6 +1102,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (!pipe_ctx->stream)
return;
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ return;
+
dc = pipe_ctx->stream->ctx->dc;
clk_mgr = dc->clk_mgr;
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
@@ -1118,6 +1141,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (!pipe_ctx || !pipe_ctx->stream)
return;
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ return;
+
dc = pipe_ctx->stream->ctx->dc;
clk_mgr = dc->clk_mgr;
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
@@ -1152,9 +1178,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct dtbclk_dto_params dto_params = {0};
int dp_hpo_inst;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc);
@@ -1180,13 +1209,18 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- if (dccg && dccg->funcs->set_dtbclk_dto)
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) {
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, false);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1215,8 +1249,11 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
+ if (hws && hws->wa_state.skip_blank_stream)
+ return;
+
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- if (!link->skip_implict_edp_power_control)
+ if (!link->skip_implict_edp_power_control && hws)
hws->funcs.edp_backlight_control(link, false);
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
@@ -1258,7 +1295,7 @@ void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
}
-static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
{
switch (crtc_id) {
case CONTROLLER_ID_D0:
@@ -1278,7 +1315,7 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
-static void populate_audio_dp_link_info(
+void populate_audio_dp_link_info(
const struct pipe_ctx *pipe_ctx,
struct audio_dp_link_info *dp_link_info)
{
@@ -1370,7 +1407,7 @@ static void populate_audio_dp_link_info(
}
}
-static void build_audio_output(
+void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output)
@@ -1569,6 +1606,51 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
+static void
+dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_crtc_source_select crtc_source_select = {0};
+ enum engine_id engine_id = link->link_enc->preferred_engine;
+ uint8_t bit_depth;
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ engine_id = link->link_enc->analog_engine;
+
+ switch (pipe_ctx->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_UNDEFINED:
+ bit_depth = 0;
+ break;
+ case COLOR_DEPTH_666:
+ bit_depth = 6;
+ break;
+ default:
+ case COLOR_DEPTH_888:
+ bit_depth = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bit_depth = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bit_depth = 16;
+ break;
+ }
+
+ crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
+ crtc_source_select.bit_depth = bit_depth;
+ crtc_source_select.engine_id = engine_id;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+
+ bios->funcs->select_crtc_source(bios, &crtc_source_select);
+}
+
enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -1588,6 +1670,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) {
+ dce110_select_crtc_source(pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output = {0};
@@ -1654,9 +1740,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
@@ -1669,7 +1753,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal) &&
+ !dc_is_rgb_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1677,6 +1762,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ /* Temporary workaround to perform DSC programming ahead of stream enablement
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, true);
+ }
+
if (!stream->dpms_off)
dc->link_srv->set_dpms_on(context, pipe_ctx);
@@ -1834,10 +1932,10 @@ static void clean_up_dsc_blocks(struct dc *dc)
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
int i;
- if (dc->ctx->dce_version != DCN_VERSION_3_5 &&
- dc->ctx->dce_version != DCN_VERSION_3_51)
+ if (!dc->caps.is_apu ||
+ dc->ctx->dce_version < DCN_VERSION_3_15)
return;
-
+ /*VBIOS supports dsc starts from dcn315*/
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
struct dcn_dsc_state s = {0};
@@ -1890,6 +1988,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ bool should_clean_dsc_block = true;
struct dc_bios *dcb = dc->ctx->dc_bios;
DC_LOGGER_INIT();
@@ -1902,10 +2001,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_streams(context, edp_streams, &edp_stream_num);
- // Check fastboot support, disable on DCE8 because of blank screens
- if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
- dc->ctx->dce_version != DCE_VERSION_8_1 &&
- dc->ctx->dce_version != DCE_VERSION_8_3) {
+ /* Check fastboot support, disable on DCE 6-8 because of blank screens */
+ if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
if (edp_link != edp_streams[0]->link)
@@ -1918,6 +2015,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
can_apply_edp_fast_boot = dc_validate_boot_timing(dc,
edp_stream->sink, &edp_stream->timing);
+
+ // For Mux-platform, the default value is false.
+ // Disable fast boot during mux switching.
+ // The flag would be clean after switching done.
+ if (dc->is_switch_in_progress_dest && edp_link->is_dds)
+ can_apply_edp_fast_boot = false;
+
edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
if (can_apply_edp_fast_boot) {
DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n");
@@ -1961,6 +2065,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (edp_with_sink_num)
edp_link_with_sink = edp_links_with_sink[0];
+ // During a mux switch, powering down the HW blocks and then enabling
+ // the link via a DPCD SET_POWER write causes a brief flash
+ keep_edp_vdd_on |= dc->is_switch_in_progress_dest;
+
if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
if (edp_link_with_sink && !keep_edp_vdd_on) {
/*turn off backlight before DP_blank and encoder powered down*/
@@ -1973,9 +2081,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
power_down_all_hw_blocks(dc);
/* DSC could be enabled on eDP during VBIOS post.
- * To clean up dsc blocks if eDP is in link but not active.
+ * To clean up dsc blocks if all eDP dpms_off is true.
*/
- if (edp_link_with_sink && (edp_stream_num == 0))
+ for (i = 0; i < edp_stream_num; i++) {
+ if (!edp_streams[i]->dpms_off) {
+ should_clean_dsc_block = false;
+ }
+ }
+
+ if (should_clean_dsc_block)
clean_up_dsc_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
@@ -2104,8 +2218,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
@@ -2222,7 +2335,7 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(
+void enable_fbc(
struct dc *dc,
struct dc_state *context)
{
@@ -2761,12 +2874,12 @@ static void dce110_enable_per_frame_crtc_position_reset(
}
-static void init_pipes(struct dc *dc, struct dc_state *context)
+static void dce110_init_pipes(struct dc *dc, struct dc_state *context)
{
// Do nothing
}
-static void init_hw(struct dc *dc)
+static void dce110_init_hw(struct dc *dc)
{
int i;
struct dc_bios *bp;
@@ -3325,12 +3438,13 @@ void dce110_disable_link_output(struct dc_link *link,
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
- .init_hw = init_hw,
+ .init_hw = dce110_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.post_unlock_program_front_end = dce110_post_unlock_program_front_end,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
+ .clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dce110_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset,
@@ -3368,7 +3482,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
};
static const struct hwseq_private_funcs dce110_private_funcs = {
- .init_pipes = init_pipes,
+ .init_pipes = dce110_init_pipes,
.set_input_transfer_func = dce110_set_input_transfer_func,
.set_output_transfer_func = dce110_set_output_transfer_func,
.power_down = dce110_power_down,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 06789ac3a224..9c032e449481 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -110,5 +110,16 @@ void dce110_enable_dp_link_output(
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
+void build_audio_output(
+ struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output);
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id);
+void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info);
+void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
index 22ee304ef9cf..2a62f63d0357 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
@@ -29,6 +29,7 @@
#include "dce120_hwseq.h"
#include "dce/dce_hwseq.h"
+#include "dce100/dce100_hwseq.h"
#include "dce110/dce110_hwseq.h"
#include "dce/dce_12_0_offset.h"
@@ -264,5 +265,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
+ dc->hwss.clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
index 1fdeef47e4dc..a08e9f9eec17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
-#include "dce60_hw_sequencer.h"
+#include "dce60_hwseq.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hwseq.h"
@@ -428,5 +428,6 @@ void dce60_hw_sequencer_construct(struct dc *dc)
dc->hwss.pipe_control_lock = dce60_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
+ dc->hwss.clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
index f3b2d8b60d5b..f3b2d8b60d5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c
index 0a054e880801..76fd45550c5e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c
@@ -50,5 +50,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
+ dc->hwss.clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 681bb92c6069..fa62e40a9858 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -55,7 +55,7 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -94,6 +94,128 @@ static void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
+/*
+ * Delay until we passed busy-until-point to which we can
+ * do necessary locking/programming on consecutive full updates
+ */
+void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
+{
+ struct crtc_position position;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int vpos, frame_count;
+ uint32_t vupdate_start, vupdate_end, vblank_start;
+ unsigned int lines_to_vupdate, us_to_vupdate;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ if (pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream)
+ return;
+
+ if (!pipe_ctx->wait_is_required)
+ return;
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ frame_count = tg->funcs->get_frame_count(tg);
+
+ if (frame_count - pipe_ctx->wait_frame_count > 2)
+ return;
+
+ vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
+
+ if (vpos >= vupdate_start && vupdate_start >= vblank_start)
+ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
+ else
+ lines_to_vupdate = vupdate_start - vpos;
+
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+
+ if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+ us_to_vupdate = 0;
+
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+
+ if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
+ //surface updates come in at high irql
+ pipe_ctx->wait_is_required = true;
+ return;
+ }
+
+ fsleep(us_to_vupdate + us_vupdate);
+
+ //clear
+ pipe_ctx->next_vupdate = 0;
+ pipe_ctx->wait_frame_count = 0;
+ pipe_ctx->wait_is_required = false;
+}
+
+/*
+ * On pipe unlock and programming, indicate pipe will be busy
+ * until some frame and line (vupdate), this is required for consecutive
+ * full updates, need to wait for updates
+ * to latch to try and program the next update
+ */
+void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ uint32_t vupdate_start, vupdate_end;
+ struct crtc_position position;
+ unsigned int vpos, cur_frame;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(optc1->max_frame_count != 0);
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ pipe_ctx->next_vupdate = vupdate_start;
+
+ cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (vpos < vupdate_start) {
+ pipe_ctx->wait_frame_count = cur_frame;
+ } else {
+ if (cur_frame + 1 > optc1->max_frame_count)
+ pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
+ else
+ pipe_ctx->wait_frame_count = cur_frame + 1;
+ }
+
+ pipe_ctx->wait_is_required = true;
+}
+
void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
@@ -205,6 +327,46 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
}
+ DTN_INFO("\n=======HUBP FL======\n");
+ static const char * const pLabels[] = {
+ "inst", "Enabled ", "Done ", "adr_mode ", "width ", "mpc_width ",
+ "tmz", "xbar_sel_R", "xbar_sel_G", "xbar_sel_B", "adr_hi ",
+ "adr_low", "REFCYC", "Bias", "Scale", "Mode",
+ "Format", "prefetch"};
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
+
+ if (!s->blank_en) {
+ uint32_t values[] = {
+ pool->hubps[i]->inst,
+ fl_regs->lut_enable,
+ fl_regs->lut_done,
+ fl_regs->lut_addr_mode,
+ fl_regs->lut_width,
+ fl_regs->lut_mpc_width,
+ fl_regs->lut_tmz,
+ fl_regs->lut_crossbar_sel_r,
+ fl_regs->lut_crossbar_sel_g,
+ fl_regs->lut_crossbar_sel_b,
+ fl_regs->lut_addr_hi,
+ fl_regs->lut_addr_lo,
+ fl_regs->refcyc_3dlut_group,
+ fl_regs->lut_fl_bias,
+ fl_regs->lut_fl_scale,
+ fl_regs->lut_fl_mode,
+ fl_regs->lut_fl_format,
+ dlg_regs->dst_y_prefetch};
+
+ int num_elements = 18;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
+ }
+ }
+
DTN_INFO("\n=========RQ========\n");
DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
" min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
@@ -389,6 +551,60 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.num_3dluts,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
+ DTN_INFO("===== MPC RMCM 3DLUT =====\n");
+ static const char * const pLabels[] = {
+ "MPCC", "SIZE", "MODE", "MODE_CUR", "RD_SEL",
+ "30BIT_EN", "WR_EN_MASK", "RAM_SEL", "OUT_NORM_FACTOR", "FL_SEL",
+ "OUT_OFFSET", "OUT_SCALE", "FL_DONE", "SOFT_UNDERFLOW", "HARD_UNDERFLOW",
+ "MEM_PWR_ST", "FORCE", "DIS", "MODE"};
+
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf) {
+ uint32_t values[] = {
+ i,
+ s.rmcm_regs.rmcm_3dlut_size,
+ s.rmcm_regs.rmcm_3dlut_mode,
+ s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel,
+ s.rmcm_regs.rmcm_3dlut_30bit_en,
+ s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel,
+ s.rmcm_regs.rmcm_3dlut_out_norm_factor,
+ s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r,
+ s.rmcm_regs.rmcm_3dlut_out_scale_r,
+ s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow,
+ s.rmcm_regs.rmcm_3dlut_fl_hard_underflow,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_dis,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_mode};
+
+ int num_elements = 19;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
+ }
+ }
+ DTN_INFO("\n");
+ DTN_INFO("===== MPC RMCM Shaper =====\n");
+ DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n");
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n",
+ i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur,
+ s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b,
+ s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b,
+ s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state,
+ s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode);
+ }
}
void dcn10_log_hw_state(struct dc *dc,
@@ -415,7 +631,8 @@ void dcn10_log_hw_state(struct dc *dc,
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
/* Read shared OTG state registers for all DCNx */
- optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ if (tg->funcs->read_otg_state)
+ tg->funcs->read_otg_state(tg, &s);
/*
* For DCN2 and greater, a register on the OPP is used to
@@ -1112,9 +1329,7 @@ static void dcn10_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
@@ -1286,6 +1501,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -1447,6 +1663,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -1990,20 +2207,11 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
dc->hwss.get_position(&pipe_ctx, 1, &position);
vpos = position.vertical_count;
- /* Avoid wraparound calculation issues */
- vupdate_start += stream->timing.v_total;
- vupdate_end += stream->timing.v_total;
- vpos += stream->timing.v_total;
-
if (vpos <= vupdate_start) {
/* VPOS is in VACTIVE or back porch. */
lines_to_vupdate = vupdate_start - vpos;
- } else if (vpos > vupdate_end) {
- /* VPOS is in the front porch. */
- return;
} else {
- /* VPOS is in VUPDATE. */
- lines_to_vupdate = 0;
+ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
}
/* Calculate time until VUPDATE in microseconds. */
@@ -2011,13 +2219,18 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
us_to_vupdate = lines_to_vupdate * us_per_line;
+ /* Stall out until the cursor update completes. */
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+
+ /* Position is in the range of vupdate start and end*/
+ if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+ us_to_vupdate = 0;
+
/* 70 us is a conservative estimate of cursor update time*/
if (us_to_vupdate > 70)
return;
- /* Stall out until the cursor update completes. */
- if (vupdate_end < vupdate_start)
- vupdate_end += stream->timing.v_total;
us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
udelay(us_to_vupdate + us_vupdate);
}
@@ -2032,7 +2245,7 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
if (lock)
delay_cursor_until_vupdate(dc, pipe);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -2667,7 +2880,6 @@ void dcn10_update_visual_confirm_color(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
if (mpc->funcs->set_bg_color) {
- memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
}
}
@@ -2878,6 +3090,9 @@ static void dcn10_update_dchubp_dpp(
}
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
@@ -3135,7 +3350,7 @@ void dcn10_prepare_bandwidth(
context,
false);
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -3219,8 +3434,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
@@ -3429,52 +3643,6 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
hubbub->funcs->update_dchub(hubbub, dh_data);
}
-static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state ||
- !test_pipe->plane_state->visible ||
- test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
-}
-
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3498,6 +3666,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int y_plane = pipe_ctx->plane_state->dst_rect.y;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ int clip_x = pipe_ctx->plane_state->clip_rect.x;
+ int clip_width = pipe_ctx->plane_state->clip_rect.width;
if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
@@ -3516,7 +3686,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
*/
/**
- * Translate cursor from stream space to plane space.
+ * Translate cursor and clip offset from stream space to plane space.
*
* If the cursor is scaled then we need to scale the position
* to be in the approximately correct place. We can't do anything
@@ -3533,6 +3703,10 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ clip_x = (clip_x - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ clip_width = clip_width * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
}
/**
@@ -3574,35 +3748,23 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
- if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
+ if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
if (param.rotation == ROTATION_ANGLE_0) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
if (param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * viewport_width - temp_x;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
}
// Swap axis and mirror horizontally
@@ -3672,30 +3834,17 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
-
if (!param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
/**
@@ -3967,3 +4116,32 @@ void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
dcc_en_bits[i] = s->dcc_en ? 1 : 0;
}
}
+
+/**
+ * dcn10_reset_surface_dcc_and_tiling - Set DCC and tiling in DCN to their disable mode.
+ *
+ * @pipe_ctx: Pointer to the pipe context structure.
+ * @plane_state: Surface state
+ * @clear_tiling: If true set tiling to Linear, otherwise does not change tiling
+ *
+ * This function is responsible for call the HUBP block to disable DCC and set
+ * tiling to the linear mode.
+ */
+void dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state,
+ bool clear_tiling)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+
+ if (!hubp)
+ return;
+
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && hubp->funcs->hubp_clear_tiling)
+ hubp->funcs->hubp_clear_tiling(hubp);
+
+ /* force page flip to see the new content of the framebuffer */
+ hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
+ &plane_state->address,
+ true);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index bc5dd68a2408..57d30ea225f2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -50,6 +50,13 @@ void dcn10_optimize_bandwidth(
void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dcn10_wait_for_pipe_update_if_needed(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool is_surface_update_only);
+void dcn10_set_wait_for_update_needed_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -207,4 +214,8 @@ void dcn10_update_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
int mpcc_id);
+void dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state,
+ bool clear_tiling);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
index 5e51e1761707..079c226c1097 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
@@ -40,6 +40,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.program_output_csc = dcn10_program_output_csc,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index b029ec1b26d3..c8ff8ae85a03 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -54,7 +54,7 @@
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -76,6 +76,7 @@ void dcn20_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
+ bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth"
@@ -89,15 +90,15 @@ void dcn20_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
- dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ if (dpp->funcs->dpp_get_gamut_remap) {
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ is_gamut_remap_available = true;
+ }
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld",
+ DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s",
dpp->inst,
(s.dgam_lut_mode == 0) ? "Bypass" :
((s.dgam_lut_mode == 1) ? "sRGB" :
@@ -114,10 +115,17 @@ void dcn20_log_color_state(struct dc *dc,
(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
(s.rgam_lut_mode == 1) ? "RAM A" :
- ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"),
+ ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"));
+
+ if (is_gamut_remap_available) {
+ DTN_INFO(" %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+
(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
- ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
- "SW"),
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
s.gamut_remap.temperature_matrix[0].value,
s.gamut_remap.temperature_matrix[1].value,
s.gamut_remap.temperature_matrix[2].value,
@@ -130,6 +138,8 @@ void dcn20_log_color_state(struct dc *dc,
s.gamut_remap.temperature_matrix[9].value,
s.gamut_remap.temperature_matrix[10].value,
s.gamut_remap.temperature_matrix[11].value);
+ }
+
DTN_INFO("\n");
}
DTN_INFO("\n");
@@ -273,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock(
}
/* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) {
pipe_ctx->stream_res.tg->funcs->set_gsl(
pipe_ctx->stream_res.tg,
&gsl);
-
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL)
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
} else
BREAK_TO_DEBUGGER();
}
@@ -605,6 +614,14 @@ void dcn20_dpp_pg_control(
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
* 1, 1000);
*/
+
+ /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
+ if (!power_on) {
+ struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
+ dpp5->funcs->dpp_force_disable_cursor(dpp5);
+ }
+
break;
default:
BREAK_TO_DEBUGGER();
@@ -946,15 +963,13 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
params.vertical_total_mid = stream->adjust.v_total_mid;
params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
@@ -1266,14 +1281,18 @@ static void dcn20_power_on_plane_resources(
struct dce_hwseq *hws,
struct pipe_ctx *pipe_ctx)
{
+ uint32_t org_ip_request_cntl = 0;
+
DC_LOGGER_INIT(hws->ctx->logger);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
if (REG(DC_IP_REQUEST_CNTL)) {
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
@@ -1281,14 +1300,16 @@ static void dcn20_power_on_plane_resources(
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 0);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
DC_LOG_DEBUG(
"Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst);
}
}
-static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
//if (dc->debug.sanity_checks) {
@@ -1436,7 +1457,7 @@ void dcn20_pipe_control_lock(
!flip_immediate)
dcn20_setup_gsl_group_as_lock(dc, pipe, false);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -1467,7 +1488,7 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_detect_pipe_changes(struct dc_state *old_state,
+void dcn20_detect_pipe_changes(struct dc_state *old_state,
struct dc_state *new_state,
struct pipe_ctx *old_pipe,
struct pipe_ctx *new_pipe)
@@ -1655,7 +1676,7 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
}
}
-static void dcn20_update_dchubp_dpp(
+void dcn20_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -1678,25 +1699,41 @@ static void dcn20_update_dchubp_dpp(
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
*/
+
if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
- hubp->funcs->hubp_setup(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs,
- &pipe_ctx->rq_regs,
- &pipe_ctx->pipe_dlg_param);
+ if (hubp->funcs->hubp_setup2) {
+ hubp->funcs->hubp_setup2(
+ hubp,
+ &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync,
+ &pipe_ctx->stream->timing);
+ } else {
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
}
if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
- if (pipe_ctx->update_flags.bits.hubp_interdependent)
- hubp->funcs->hubp_setup_interdependent(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2) {
+ hubp->funcs->hubp_setup_interdependent2(
+ hubp,
+ &pipe_ctx->hubp_regs);
+ } else {
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+ }
+ }
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
@@ -1756,15 +1793,17 @@ static void dcn20_update_dchubp_dpp(
&pipe_ctx->plane_res.scl_data.viewport_c);
viewport_changed = true;
}
- if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
- hubp->funcs->hubp_program_mcache_id_and_split_coordinate(
- hubp,
- &pipe_ctx->mcache_regs);
+
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, &pipe_ctx->mcache_regs);
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
@@ -1838,7 +1877,7 @@ static void dcn20_update_dchubp_dpp(
hubp->funcs->phantom_hubp_post_enable(hubp);
}
-static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+static int dcn20_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
{
struct pipe_ctx *other_pipe;
int vready_offset = pipe->pipe_dlg_param.vready_offset;
@@ -1864,6 +1903,30 @@ static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
return vready_offset;
}
+static void dcn20_program_tg(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dce_hwseq *hws)
+{
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn20_calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+}
+
static void dcn20_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -1874,33 +1937,17 @@ static void dcn20_program_pipe(
/* Only need to unblank on top pipe */
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.odm ||
- pipe_ctx->stream->update_flags.bits.abm_level)
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
hws->funcs.blank_pixel_data(dc, pipe_ctx,
- !pipe_ctx->plane_state ||
- !pipe_ctx->plane_state->visible);
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
}
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
- && !pipe_ctx->prev_odm_pipe) {
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- calculate_vready_offset_for_group(pipe_ctx),
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width,
- pipe_ctx->pipe_dlg_param.pstate_keepout);
-
- if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
-
- if (hws->funcs.setup_vupdate_interrupt)
- hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
- }
+ && !pipe_ctx->prev_odm_pipe)
+ dcn20_program_tg(dc, pipe_ctx, context, hws);
if (pipe_ctx->update_flags.bits.odm)
hws->funcs.update_odm(dc, context, pipe_ctx);
@@ -1931,22 +1978,14 @@ static void dcn20_program_pipe(
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
- (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change ||
- pipe_ctx->plane_state->update_flags.bits.lut_3d ||
- pipe_ctx->update_flags.bits.enable))
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
@@ -1954,10 +1993,8 @@ static void dcn20_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -1966,7 +2003,7 @@ static void dcn20_program_pipe(
* causes a different pipe to be chosen to odm combine with.
*/
if (pipe_ctx->update_flags.bits.enable
- || pipe_ctx->update_flags.bits.opp_changed) {
+ || pipe_ctx->update_flags.bits.opp_changed) {
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
@@ -1996,14 +2033,14 @@ static void dcn20_program_pipe(
memset(&params, 0, sizeof(params));
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
dc->hwss.set_disp_pattern_generator(dc,
- pipe_ctx,
- pipe_ctx->stream_res.test_pattern_params.test_pattern,
- pipe_ctx->stream_res.test_pattern_params.color_space,
- pipe_ctx->stream_res.test_pattern_params.color_depth,
- NULL,
- pipe_ctx->stream_res.test_pattern_params.width,
- pipe_ctx->stream_res.test_pattern_params.height,
- pipe_ctx->stream_res.test_pattern_params.offset);
+ pipe_ctx,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ NULL,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
}
}
@@ -2012,11 +2049,12 @@ void dcn20_program_front_end_for_ctx(
struct dc_state *context)
{
int i;
- struct dce_hwseq *hws = dc->hwseq;
- DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
- struct pipe_ctx *pipe;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *pipe = NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
@@ -2025,11 +2063,11 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe, pipe->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -2044,30 +2082,31 @@ void dcn20_program_front_end_for_ctx(
if (prev_hubp_count == 0 && hubp_count > 0) {
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, false);
+ dc->res_pool->hubbub, true, false);
udelay(500);
}
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
- &context->res_ctx.pipe_ctx[i]);
+ &context->res_ctx.pipe_ctx[i]);
/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
* buffer updates properly)
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
- dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc) {
- if (dc->hwseq->funcs.blank_pixel_data) {
+ if (dc->hwseq->funcs.blank_pixel_data)
dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
- }
+
tg->funcs->enable_crtc(tg);
}
}
@@ -2075,15 +2114,15 @@ void dcn20_program_front_end_for_ctx(
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
- && !context->res_ctx.pipe_ctx[i].top_pipe
- && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
- && context->res_ctx.pipe_ctx[i].stream)
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
- || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
struct hubbub *hubbub = dc->res_pool->hubbub;
/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
@@ -2093,13 +2132,18 @@ void dcn20_program_front_end_for_ctx(
* DET allocation.
*/
if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) {
+ (context->res_ctx.pipe_ctx[i].plane_state &&
+ dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i])
+ == SUBVP_PHANTOM))) {
if (hubbub->funcs->program_det_size)
- hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ hubbub->funcs->program_det_size(hubbub,
+ dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
if (dc->res_pool->hubbub->funcs->program_det_segments)
- dc->res_pool->hubbub->funcs->program_det_segments(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
}
- hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -2107,9 +2151,9 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- !resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->update_flags.bits.odm &&
- hws->funcs.update_odm)
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
hws->funcs.update_odm(dc, context, pipe);
}
@@ -2127,25 +2171,28 @@ void dcn20_program_front_end_for_ctx(
else {
/* Don't program phantom pipes in the regular front end programming sequence.
* There is an MPO transition case where a pipe being used by a video plane is
- * transitioned directly to be a phantom pipe when closing the MPO video. However
- * the phantom pipe will program a new HUBP_VTG_SEL (update takes place right away),
- * but the MPO still exists until the double buffered update of the main pipe so we
- * will get a frame of underflow if the phantom pipe is programmed here.
+ * transitioned directly to be a phantom pipe when closing the MPO video.
+ * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
+ * right away) but the MPO still exists until the double buffered update of the
+ * main pipe so we will get a frame of underflow if the phantom pipe is
+ * programmed here.
*/
- if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
+ if (pipe->stream &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
dcn20_program_pipe(dc, pipe, context);
}
pipe = pipe->bottom_pipe;
}
}
+
/* Program secondary blending tree and writeback pipes */
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->top_pipe && !pipe->prev_odm_pipe
- && pipe->stream && pipe->stream->num_wb_info > 0
- && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
- || pipe->stream->update_flags.raw)
- && hws->funcs.program_all_writeback_pipes_in_tree)
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
@@ -2164,7 +2211,7 @@ void dcn20_program_front_end_for_ctx(
* buffered pending status clear and reset opp head pipe's none double buffered
* registers to their initial state.
*/
-static void post_unlock_reset_opp(struct dc *dc,
+void dcn20_post_unlock_reset_opp(struct dc *dc,
struct pipe_ctx *opp_head)
{
struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
@@ -2201,16 +2248,17 @@ void dcn20_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context)
{
- int i;
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000;
+ // Timeout for pipe enable
+ unsigned int timeout_us = 100000;
unsigned int polling_interval_us = 1;
struct dce_hwseq *hwseq = dc->hwseq;
+ int i;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
- !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
- post_unlock_reset_opp(dc,
- &dc->current_state->res_ctx.pipe_ctx[i]);
+ !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
+ dcn20_post_unlock_reset_opp(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -2226,11 +2274,12 @@ void dcn20_post_unlock_program_front_end(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
- dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
udelay(polling_interval_us);
}
}
@@ -2244,15 +2293,14 @@ void dcn20_post_unlock_program_front_end(
* before we've transitioned to 2:1 or 4:1
*/
if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
- resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
- dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
int j = 0;
struct timing_generator *tg = pipe->stream_res.tg;
-
if (tg->funcs->get_optc_double_buffer_pending) {
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
udelay(polling_interval_us);
}
}
@@ -2260,7 +2308,7 @@ void dcn20_post_unlock_program_front_end(
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, false, false);
+ dc->res_pool->hubbub, false, false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -2291,11 +2339,11 @@ void dcn20_post_unlock_program_front_end(
return;
/* P-State support transitions:
- * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
- * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
- * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
- * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
- * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
*/
if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
@@ -2310,12 +2358,11 @@ void dcn20_post_unlock_program_front_end(
if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-
/* WA for stutter underflow during MPO transitions when adding 2nd plane */
if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
if (dc->current_state->stream_status[0].plane_count == 1 &&
- context->stream_status[0].plane_count > 1) {
+ context->stream_status[0].plane_count > 1) {
struct timing_generator *tg = dc->res_pool->timing_generators[0];
@@ -2352,10 +2399,10 @@ void dcn20_prepare_bandwidth(
}
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2368,10 +2415,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -2445,7 +2492,7 @@ bool dcn20_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2463,7 +2510,7 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
- calculate_vready_offset_for_group(pipe_ctx),
+ dcn20_calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
@@ -2739,7 +2786,8 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (is_two_pixels_per_container || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
+ if (pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine)
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
@@ -2768,6 +2816,8 @@ void dcn20_reset_back_end_for_pipe(
{
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct dtbclk_dto_params dto_params = {0};
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
@@ -2816,9 +2866,7 @@ void dcn20_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
@@ -2830,6 +2878,13 @@ void dcn20_reset_back_end_for_pipe(
&pipe_ctx->link_res, pipe_ctx->stream->signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg
+ && dc->ctx->dce_version >= DCN_VERSION_3_5) {
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
/*
@@ -2980,9 +3035,12 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
enum phyd32clk_clock_source phyd32clk;
int dp_hpo_inst;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
@@ -2994,7 +3052,11 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
phyd32clk = get_phyd32clk_src(link);
- dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ } else {
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ }
} else {
if (dccg->funcs->enable_symclk_se)
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
@@ -3076,7 +3138,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
// Specific to FPGA dccg and registers
REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index 5c874f7b0683..9d1ad3b29ca5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -154,6 +154,21 @@ void dcn20_setup_gsl_group_as_lock(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool enable);
-
+void dcn20_detect_pipe_changes(
+ struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+void dcn20_enable_plane(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn20_post_unlock_reset_opp(
+ struct dc *dc,
+ struct pipe_ctx *opp_head);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index 32707b344f0b..ad253c586ea1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
@@ -36,6 +36,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
index 78351408e864..dec57fb4c05c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
@@ -36,6 +36,7 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
.update_plane_addr = dcn201_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 61efb15572ff..e2269211553c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -35,7 +35,7 @@
#include "hw/clk_mgr.h"
#include "dc_dmub_srv.h"
#include "abm.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
index e044e9e0a3a1..c7701a8b574a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
@@ -37,6 +37,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index e89ebfda4873..81bcadf5e57e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -50,10 +50,11 @@
#include "dpcd_defs.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
-
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
#define DC_LOGGER_INIT(logger)
@@ -74,6 +75,7 @@ void dcn30_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
+ bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode"
@@ -88,16 +90,16 @@ void dcn30_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
- dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (dpp->funcs->dpp_get_gamut_remap) {
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ is_gamut_remap_available = true;
+ }
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s"
- " %12s "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld",
+ DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s",
dpp->inst,
s.pre_dgam_mode,
(s.pre_dgam_select == 0) ? "sRGB" :
@@ -121,7 +123,14 @@ void dcn30_log_color_state(struct dc *dc,
(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
(s.rgam_lut_mode == 0) ? "Bypass" :
((s.rgam_lut_mode == 1) ? "RAM A" :
- "RAM B"),
+ "RAM B"));
+
+ if (is_gamut_remap_available) {
+ DTN_INFO(" %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+
(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
"SW"),
@@ -137,6 +146,8 @@ void dcn30_log_color_state(struct dc *dc,
s.gamut_remap.temperature_matrix[9].value,
s.gamut_remap.temperature_matrix[10].value,
s.gamut_remap.temperature_matrix[11].value);
+ }
+
DTN_INFO("\n");
}
DTN_INFO("\n");
@@ -1218,3 +1229,54 @@ void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
}
}
}
+
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (hubbub) {
+ if (hubbub->funcs->hubbub_read_reg_state) {
+ hubbub->funcs->hubbub_read_reg_state(hubbub, out_data->hubbub_reg_state);
+ }
+ }
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+ struct output_pixel_processor *opp = dc->res_pool->opps[i];
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct timing_generator *optc = dc->res_pool->timing_generators[i];
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ if (hubp)
+ if (hubp->funcs->hubp_read_reg_state)
+ hubp->funcs->hubp_read_reg_state(hubp, out_data->hubp_reg_state[i]);
+
+ if (dpp)
+ if (dpp->funcs->dpp_read_reg_state)
+ dpp->funcs->dpp_read_reg_state(dpp, out_data->dpp_reg_state[i]);
+
+ if (opp)
+ if (opp->funcs->opp_read_reg_state)
+ opp->funcs->opp_read_reg_state(opp, out_data->opp_reg_state[i]);
+
+ if (dsc)
+ if (dsc->funcs->dsc_read_reg_state)
+ dsc->funcs->dsc_read_reg_state(dsc, out_data->dsc_reg_state[i]);
+
+ if (mpc)
+ if (mpc->funcs->mpc_read_reg_state)
+ mpc->funcs->mpc_read_reg_state(mpc, i, out_data->mpc_reg_state[i]);
+
+ if (optc)
+ if (optc->funcs->optc_read_reg_state)
+ optc->funcs->optc_read_reg_state(optc, out_data->optc_reg_state[i]);
+
+ if (dccg)
+ if (dccg->funcs->dccg_read_reg_state)
+ dccg->funcs->dccg_read_reg_state(dccg, out_data->dccg_reg_state[i]);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 4b90b781c4f2..40afbbfb5b9c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -29,6 +29,7 @@
#include "hw_sequencer_private.h"
struct dc;
+struct dc_underflow_debug_data;
void dcn30_init_hw(struct dc *dc);
void dcn30_program_all_writeback_pipes_in_tree(
@@ -98,4 +99,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx);
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index c32764aef884..d7ff55669bac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -37,6 +37,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
@@ -109,6 +110,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.is_abm_supported = dcn21_is_abm_supported,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index dcb27cdbce73..8d7ceb7b32b8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
@@ -39,6 +39,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 03ba01f4ace1..d1ecdb92b072 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -45,7 +45,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "dcn21/dcn21_hwseq.h"
#include "inc/link_enc_cfg.h"
@@ -396,6 +396,11 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
else if (pipe_ctx->stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -521,9 +526,15 @@ static void dcn31_reset_back_end_for_pipe(
link = pipe_ctx->stream->link;
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = false;
+
if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) &&
- (link->connector_signal == SIGNAL_TYPE_EDP))
+ (link->connector_signal == SIGNAL_TYPE_EDP)) {
dc->hwss.blank_stream(pipe_ctx);
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = true;
+ }
pipe_ctx->stream_res.tg->funcs->set_dsc_config(
pipe_ctx->stream_res.tg,
@@ -538,9 +549,7 @@ static void dcn31_reset_back_end_for_pipe(
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
@@ -553,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe(
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
+ /* Temporary workaround to perform DSC programming ahead of pipe reset
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
+ }
+
/* free acquired resources */
if (pipe_ctx->stream_res.audio) {
/*disable az_endpoint*/
@@ -567,7 +589,8 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.audio = NULL;
}
}
-
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = false;
pipe_ctx->stream = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
@@ -616,7 +639,8 @@ void dcn31_reset_hw_ctx_wrap(
}
/* New dc_state in the process of being applied to hardware. */
- link_enc_cfg_set_transient_mode(dc, dc->current_state, context);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc_cfg_set_transient_mode(dc, dc->current_state, context);
}
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
@@ -686,7 +710,8 @@ bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
- dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
+ if (backlight_level_params->control_type != BACKLIGHT_CONTROL_AMD_AUX)
+ dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index fb2ffb637931..5a6a459da224 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -40,6 +40,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
@@ -111,6 +112,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index be26c925fdfa..4ee6ed610de0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -84,6 +84,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
@@ -94,6 +108,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
@@ -514,3 +529,75 @@ void dcn314_disable_link_output(struct dc_link *link,
apply_symclk_on_tx_off_wa(link);
}
+
+/**
+ * dcn314_dpp_pg_control - DPP power gate control.
+ *
+ * @hws: dce_hwseq reference.
+ * @dpp_inst: DPP instance reference.
+ * @power_on: true if we want to enable power gate, false otherwise.
+ *
+ * Enable or disable power gate in the specific DPP instance.
+ * If power gating is disabled, will force disable cursor in the DPP instance.
+ */
+void dcn314_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate) {
+ /* Workaround for DCN314 with disabled power gating */
+ if (!power_on) {
+
+ /* Force disable cursor if power gating is disabled */
+ struct dpp *dpp = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp && dpp->funcs->dpp_force_disable_cursor)
+ dpp->funcs->dpp_force_disable_cursor(dpp);
+ }
+ return;
+ }
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
index 2305ad282f21..6c072d0274ea 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
@@ -47,4 +47,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
+void dcn314_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 21ef03a76229..79faab1125d4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -42,6 +42,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
@@ -114,6 +115,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
@@ -140,6 +142,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
.hubp_pg_control = dcn31_hubp_pg_control,
+ .dpp_pg_control = dcn314_dpp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 2e6ecf939fbd..bf19ba65d09a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -49,7 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32/dcn32_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "../dcn20/dcn20_hwseq.h"
#include "dc_state_priv.h"
@@ -316,10 +316,12 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.cab_alloc_ways = (uint8_t)ways;
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ DC_LOG_MALL("enable scanout from MALL");
return true;
}
+ DC_LOG_MALL("surface cannot fit in CAB, disabling scanout from MALL\n");
return false;
}
@@ -1050,7 +1052,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
@@ -1059,17 +1061,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
+ if (!odm_dsc)
+ continue;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
@@ -1179,6 +1184,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
+ struct dce_hwseq *hws = stream->ctx->dc->hwseq;
two_pix_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
@@ -1199,7 +1205,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if ((odm_combine_factor == 2) || (hws->funcs.is_dp_dig_pixel_rate_div_policy &&
+ hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx)))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -1322,7 +1329,8 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
params.timing.pix_clk_100hz /= 2;
params.pix_per_cycle = 2;
}
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
+ if (pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine)
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.pix_per_cycle > 1);
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
@@ -1399,12 +1407,12 @@ void dcn32_disable_link_output(struct dc_link *link,
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
-
- if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_power_control &&
- !link->skip_implict_edp_power_control)
- link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index e4d149eff10f..c19ef075c882 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -39,6 +39,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
@@ -120,6 +121,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index d5f76cc69c73..7aa0f452e8f7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -113,6 +113,14 @@ static void enable_memory_low_power(struct dc *dc)
}
#endif
+static void print_pg_status(struct dc *dc, const char *debug_func, const char *debug_log)
+{
+ if (dc->debug.enable_pg_cntl_debug_logs && dc->res_pool->pg_cntl) {
+ if (dc->res_pool->pg_cntl->funcs->print_pg_status)
+ dc->res_pool->pg_cntl->funcs->print_pg_status(dc->res_pool->pg_cntl, debug_func, debug_log);
+ }
+}
+
void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
{
REG_UPDATE_3(DMU_CLK_CNTL,
@@ -137,6 +145,8 @@ void dcn35_init_hw(struct dc *dc)
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
+ print_pg_status(dc, __func__, ": start");
+
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -200,10 +210,7 @@ void dcn35_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
-/*
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
-*/
+
if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
/* If taking control over from VBIOS, we may want to optimize our first
@@ -236,15 +243,13 @@ void dcn35_init_hw(struct dc *dc)
}
hws->funcs.init_pipes(dc, dc->current_state);
- if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ print_pg_status(dc, __func__, ": after init_pipes");
+
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
- if (res_pool->dccg->funcs->dccg_root_gate_disable_control) {
- for (i = 0; i < res_pool->pipe_count; i++)
- res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0);
- }
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -316,6 +321,7 @@ void dcn35_init_hw(struct dc *dc)
if (dc->res_pool->pg_cntl->funcs->init_pg_status)
dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
}
+ print_pg_status(dc, __func__, ": after init_pg_status");
}
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
@@ -358,6 +364,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
@@ -504,97 +511,6 @@ void dcn35_physymclk_root_clock_control(struct dce_hwseq *hws, unsigned int phy_
}
}
-void dcn35_dsc_pg_control(
- struct dce_hwseq *hws,
- unsigned int dsc_inst,
- bool power_on)
-{
- uint32_t power_gate = power_on ? 0 : 1;
- uint32_t pwr_status = power_on ? 0 : 2;
- uint32_t org_ip_request_cntl = 0;
-
- if (hws->ctx->dc->debug.disable_dsc_power_gate)
- return;
- if (hws->ctx->dc->debug.ignore_pg)
- return;
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
-
- switch (dsc_inst) {
- case 0: /* DSC0 */
- REG_UPDATE(DOMAIN16_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN16_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 1: /* DSC1 */
- REG_UPDATE(DOMAIN17_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN17_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 2: /* DSC2 */
- REG_UPDATE(DOMAIN18_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN18_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 3: /* DSC3 */
- REG_UPDATE(DOMAIN19_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN19_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- default:
- BREAK_TO_DEBUGGER();
- break;
- }
-
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
-}
-
-void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
-{
- bool force_on = true; /* disable power gating */
- uint32_t org_ip_request_cntl = 0;
-
- if (hws->ctx->dc->debug.disable_hubp_power_gate)
- return;
- if (hws->ctx->dc->debug.ignore_pg)
- return;
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
- /* DCHUBP0/1/2/3/4/5 */
- REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- /* DPP0/1/2/3/4/5 */
- REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-
- force_on = true; /* disable power gating */
- if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
- force_on = false;
-
- /* DCS0/1/2/3/4 */
- REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-
-
-}
-
/* In headless boot cases, DIG may be turned
* on which causes HW/SW discrepancies.
* To avoid this, power down hardware on boot
@@ -800,6 +716,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -899,12 +816,15 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
-
+ /*make sure DPPCLK is on*/
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
@@ -921,6 +841,7 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) =\n", __func__, dpp->inst);
if (!pipe_ctx->top_pipe
&& pipe_ctx->plane_state
@@ -937,6 +858,7 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
/* In flip immediate with pipe splitting case GSL is used for
@@ -953,9 +875,10 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
-/*to do, need to support both case*/
+
hubp->power_gated = true;
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream = NULL;
@@ -964,6 +887,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->top_pipe = NULL;
pipe_ctx->bottom_pipe = NULL;
pipe_ctx->plane_state = NULL;
+ //DC_LOG_DEBUG("%s: dpp_inst(%d)=\n", __func__, dpp->inst);
+
}
void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
@@ -1032,8 +957,22 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
- if (pipe_ctx->stream_res.dsc)
+ if (pipe_ctx->stream_res.dsc) {
update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
+ if (dc->caps.sequential_ono) {
+ update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
+ update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
+ pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+ }
+ }
+ }
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
@@ -1178,6 +1117,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
if (dc->caps.sequential_ono) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
+ update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
+ update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
+ update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (new_pipe->plane_res.hubp &&
+ new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+ }
+ }
+ }
+
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
@@ -1410,6 +1368,8 @@ void dcn35_prepare_bandwidth(
}
dcn20_prepare_bandwidth(dc, context);
+
+ print_pg_status(dc, __func__, ": after rcg and power up");
}
void dcn35_optimize_bandwidth(
@@ -1418,6 +1378,8 @@ void dcn35_optimize_bandwidth(
{
struct pg_block_update pg_update_state;
+ print_pg_status(dc, __func__, ": before rcg and power up");
+
dcn20_optimize_bandwidth(dc, context);
if (dc->hwss.calc_blocks_to_gate) {
@@ -1429,6 +1391,8 @@ void dcn35_optimize_bandwidth(
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, false);
}
+
+ print_pg_status(dc, __func__, ": after rcg and power up");
}
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
@@ -1465,8 +1429,7 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
num_frames = 2 * (frame_rate % 60);
}
}
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
@@ -1536,7 +1499,7 @@ static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
const struct dc *dc = pipe_ctx->stream->link->dc;
- if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false)
return false;
// Not necessary for MST configurations
@@ -1591,3 +1554,175 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
return false;
}
+
+/*
+ * Set powerup to true for every pipe to match pre-OS configuration.
+ */
+static void dcn35_calc_blocks_to_ungate_for_hw_release(struct dc *dc, struct pg_block_update *update_state)
+{
+ int i = 0, j = 0;
+
+ memset(update_state, 0, sizeof(struct pg_block_update));
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
+ update_state->pg_pipe_res_update[j][i] = true;
+
+ update_state->pg_res_update[PG_HPO] = true;
+ update_state->pg_res_update[PG_DWB] = true;
+}
+
+/*
+ * The purpose is to power up all gatings to restore optimization to pre-OS env.
+ * Re-use hwss func and existing PG&RCG flags to decide powerup sequence.
+ */
+void dcn35_hardware_release(struct dc *dc)
+{
+ struct pg_block_update pg_update_state;
+
+ dcn35_calc_blocks_to_ungate_for_hw_release(dc, &pg_update_state);
+
+ if (dc->hwss.root_clock_control)
+ dc->hwss.root_clock_control(dc, &pg_update_state, true);
+ /*power up required HW block*/
+ if (dc->hwss.hw_block_power_up)
+ dc->hwss.hw_block_power_up(dc, &pg_update_state);
+}
+
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ /*
+ * Insert a blank update to modify the write index and set pipe_mask to 0.
+ *
+ * While the DMU is interlocked with driver full pipe programming via
+ * the DMU HW lock, if the cursor update begins to execute after a full
+ * pipe programming occurs there are two possible issues:
+ *
+ * 1. Outdated cursor information is programmed, replacing the current update
+ * 2. The cursor update in firmware holds the cursor lock, preventing
+ * the current update from being latched atomically in the same frame
+ * as the rest of the update.
+ *
+ * This blank update, treated as a no-op, will allow the firmware to skip
+ * the programming.
+ */
+
+ if (dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe);
+
+ if (dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe);
+}
+
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = true;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = true;
+}
+
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = false;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = false;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1]
+ .data.cursor_offload_v1.offload_streams[stream_idx];
+
+ shared_stream->last_write_idx = write_idx;
+
+ cs->offload_streams[stream_idx].write_idx = write_idx;
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx;
+}
+
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ dc_dmub_srv_control_cursor_offload(dc, context, stream, true);
+}
+
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ dc_dmub_srv_program_cursor_now(dc, pipe);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index e27b3609020f..1ff41dba556c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -99,4 +99,14 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+void dcn35_hardware_release(struct dc *dc);
+
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 5ca8db2b2d03..5a66c9db2670 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -44,6 +44,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
@@ -85,6 +86,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn35_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
@@ -114,7 +121,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
- .update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
.hw_block_power_up = dcn35_hw_block_power_up,
@@ -122,6 +128,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .hardware_release = dcn35_hardware_release,
+ .detect_pipe_changes = dcn20_detect_pipe_changes,
+ .enable_plane = dcn20_enable_plane,
+ .update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
@@ -144,7 +156,6 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.plane_atomic_disable = dcn35_plane_atomic_disable,
//.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
//.hubp_pg_control = dcn35_hubp_pg_control,
- .enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
.dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.physymclk_root_clock_control = dcn35_physymclk_root_clock_control,
@@ -159,9 +170,10 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
- .dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn35_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 4f73e7f551ac..09e60158f0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -43,6 +43,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
@@ -113,7 +114,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
- .update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn351_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate,
.hw_block_power_up = dcn351_hw_block_power_up,
@@ -122,6 +122,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
@@ -144,7 +145,6 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.plane_atomic_disable = dcn35_plane_atomic_disable,
//.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
//.hubp_pg_control = dcn35_hubp_pg_control,
- .enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
.dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.physymclk_root_clock_control = dcn35_physymclk_root_clock_control,
@@ -157,10 +157,11 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.set_mcm_luts = dcn32_set_mcm_luts,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
- .dsc_pg_control = dcn35_dsc_pg_control,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn351_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 8cb0fbd301d8..2fbc22afb89c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -2,7 +2,10 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "os_types.h"
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
@@ -22,10 +25,12 @@
#include "dpcd_defs.h"
#include "clk_mgr.h"
#include "dsc.h"
-#include "link.h"
+#include "link_service.h"
+#include "custom_float.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn10/dcn10_cm_common.h"
+#include "dcn10/dcn10_hubbub.h"
#include "dcn20/dcn20_optc.h"
#include "dcn30/dcn30_cm_common.h"
#include "dcn32/dcn32_hwseq.h"
@@ -33,6 +38,7 @@
#include "dcn401/dcn401_resource.h"
#include "dc_state_priv.h"
#include "link_enc_cfg.h"
+#include "../hw_sequencer.h"
#define DC_LOGGER_INIT(logger)
@@ -48,7 +54,7 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void dcn401_initialize_min_clocks(struct dc *dc)
+void dcn401_initialize_min_clocks(struct dc *dc)
{
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
@@ -126,91 +132,6 @@ void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
}
-struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc, uint8_t region)
-{
- struct dce_hwseq *hws = dc->hwseq;
- struct ips_ono_region_state state = {0, 0};
-
- switch (region) {
- case 0:
- /* dccg, dio, dcio */
- REG_GET_2(DOMAIN22_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 1:
- /* dchubbub, dchvm, dchubbubmem */
- REG_GET_2(DOMAIN23_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 2:
- /* mpc, opp, optc, dwb */
- REG_GET_2(DOMAIN24_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 3:
- /* hpo */
- REG_GET_2(DOMAIN25_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 4:
- /* dchubp0, dpp0 */
- REG_GET_2(DOMAIN0_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 5:
- /* dsc0 */
- REG_GET_2(DOMAIN16_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 6:
- /* dchubp1, dpp1 */
- REG_GET_2(DOMAIN1_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 7:
- /* dsc1 */
- REG_GET_2(DOMAIN17_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 8:
- /* dchubp2, dpp2 */
- REG_GET_2(DOMAIN2_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 9:
- /* dsc2 */
- REG_GET_2(DOMAIN18_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 10:
- /* dchubp3, dpp3 */
- REG_GET_2(DOMAIN3_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 11:
- /* dsc3 */
- REG_GET_2(DOMAIN19_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- default:
- break;
- }
-
- return state;
-}
-
void dcn401_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
@@ -227,13 +148,8 @@ void dcn401_init_hw(struct dc *dc)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// mark dcmode limits present if any clock has distinct AC and DC values from SMU
- dc->caps.dcmode_power_limits_present =
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz);
+ dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
+ dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
}
// Initialize the dccg
@@ -287,6 +203,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@@ -491,17 +410,16 @@ void dcn401_populate_mcm_luts(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
union mcm_lut_params m_lut_params;
enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format;
+ enum hubp_3dlut_fl_format format = 0;
enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_width width = 0;
enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
- bool is_17x17x17 = true;
bool rval;
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
@@ -526,7 +444,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
/* Shaper */
- if (mcm_luts.shaper) {
+ if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.shaper->pwl;
@@ -538,11 +456,11 @@ void dcn401_populate_mcm_luts(struct dc *dc,
m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->mcm.populate_lut)
+ mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
}
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id);
}
/* 3DLUT */
@@ -551,6 +469,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (hubp->funcs->hubp_enable_3dlut_fl)
hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
+
if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
if (mpc->funcs->populate_lut)
@@ -560,16 +479,35 @@ void dcn401_populate_mcm_luts(struct dc *dc,
mpcc_id);
}
break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ width = hubp_3dlut_fl_width_17;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ width = hubp_3dlut_fl_width_transformed;
+ break;
+ default:
+ //TODO: handle default case
+ break;
+ }
+
+ //check for support
+ if (mpc->funcs->mcm.is_config_supported &&
+ !mpc->funcs->mcm.is_config_supported(width))
+ break;
if (mpc->funcs->program_lut_read_write_control)
mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
if (mpc->funcs->program_lut_mode)
mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_3dlut_size)
- mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id);
+
if (hubp->funcs->hubp_program_3dlut_fl_addr)
hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
+
+ if (mpc->funcs->mcm.program_bit_depth)
+ mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+
switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
mode = hubp_3dlut_fl_mode_native_1;
@@ -596,7 +534,6 @@ void dcn401_populate_mcm_luts(struct dc *dc,
switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- default:
format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
break;
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
@@ -608,11 +545,19 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
if (hubp->funcs->hubp_program_3dlut_fl_format)
hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale)
+ if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
+ mpc->funcs->mcm.program_bias_scale) {
+ mpc->funcs->mcm.program_bias_scale(mpc,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
+ mpcc_id);
hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
+ }
+ //navi 4x has a bug and r and blue are swapped and need to be worked around here in
+ //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
default:
@@ -624,21 +569,16 @@ void dcn401_populate_mcm_luts(struct dc *dc,
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
+ crossbar_bit_slice_cr_r,
crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b,
- crossbar_bit_slice_cr_r);
+ crossbar_bit_slice_cb_b);
+
+ if (mpc->funcs->mcm.program_lut_read_write_control)
+ mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
+
+ if (mpc->funcs->mcm.program_3dlut_size)
+ mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- default:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_width)
- hubp->funcs->hubp_program_3dlut_fl_width(hubp, width);
if (mpc->funcs->update_3dlut_fast_load_select)
mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
@@ -876,20 +816,23 @@ enum dc_status dcn401_enable_stream_timing(
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
- /* if we are borrowing from hblank, h_addressable needs to be adjusted */
- if (dc->debug.enable_hblank_borrow)
- patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
+ /* if we are padding, h_addressable needs to be adjusted */
+ if (dc->debug.enable_hblank_borrow) {
+ patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
+ patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
+ patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ }
pipe_ctx->stream_res.tg->funcs->program_timing(
- pipe_ctx->stream_res.tg,
- &patched_crtc_timing,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width,
- pipe_ctx->pipe_dlg_param.pstate_keepout,
- pipe_ctx->stream->signal,
- true);
+ pipe_ctx->stream_res.tg,
+ &patched_crtc_timing,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
+ pipe_ctx->stream->signal,
+ true);
for (i = 0; i < opp_cnt; i++) {
opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
@@ -914,10 +857,7 @@ enum dc_status dcn401_enable_stream_timing(
}
hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
-
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
/* Event triggers and num frames initialized for DRR, but can be
* later updated for PSR use. Note DRR trigger events are generated
@@ -1011,17 +951,23 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
int dp_hpo_inst = 0;
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
unsigned int unused_div = PIXEL_RATE_DIV_NA;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
&tmds_div, &early_control);
if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
-
- dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ } else {
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ }
} else {
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
@@ -1056,52 +1002,6 @@ void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
}
-static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state ||
- !test_pipe->plane_state->visible ||
- test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
-}
-
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
{
if (cursor_width <= 128) {
@@ -1292,7 +1192,7 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
pos_cpy.x = (uint32_t)x_pos;
pos_cpy.y = (uint32_t)y_pos;
- if (pos_cpy.enable && dcn401_can_pipe_disable_cursor(pipe_ctx))
+ if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
x_pos = pos_cpy.x - param.recout.x;
@@ -1487,30 +1387,30 @@ void dcn401_prepare_bandwidth(struct dc *dc,
false);
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
/* update timeout thresholds */
if (hubbub->funcs->program_arbiter) {
- dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
+ dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
}
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
- dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
+ dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
}
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, false);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
@@ -1529,9 +1429,9 @@ void dcn401_optimize_bandwidth(
/* enable fams2 if needed */
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, true);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
/* program dchubbub watermarks */
@@ -1570,14 +1470,17 @@ void dcn401_optimize_bandwidth(
}
}
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock)
{
/* use always for now */
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
- if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
+ if (!dc->ctx || !dc->ctx->dmub_srv)
+ return;
+
+ if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc))
return;
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1587,12 +1490,12 @@ void dcn401_fams2_global_control_lock(struct dc *dc,
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
}
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params)
{
- struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
- bool lock = params->fams2_global_control_lock_fast_params.lock;
+ struct dc *dc = params->dmub_hw_control_lock_fast_params.dc;
+ bool lock = params->dmub_hw_control_lock_fast_params.lock;
- if (params->fams2_global_control_lock_fast_params.is_required) {
+ if (params->dmub_hw_control_lock_fast_params.is_required) {
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1699,6 +1602,143 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context,
dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
}
+static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *old_pipe;
+ struct pipe_ctx *new_pipe;
+ struct pipe_ctx *old_opp_heads[MAX_PIPES];
+ struct pipe_ctx *old_otg_master;
+ int old_opp_head_count = 0;
+ int i;
+
+ old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
+
+ if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
+ old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
+ &dc->current_state->res_ctx,
+ old_opp_heads);
+ } else {
+ old_otg_master = NULL;
+ }
+
+ /* Process new DSC configuration if DSC is enabled */
+ if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) {
+ struct dc_stream_state *stream = otg_master->stream;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int last_dsc_calc = 0;
+ bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) &&
+ stream->timing.pix_clk_100hz > 480000;
+
+ /* Count ODM pipes */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt;
+
+ /* Step 1: Set DTO DSCCLK for main DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ otg_master->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Step 2: Calculate and set DSC config for main DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt);
+
+ /* Step 3: Enable main DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, otg_master);
+
+ /* Step 4: Configure and enable ODM DSC blocks */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ if (!odm_pipe->stream_res.dsc)
+ continue;
+
+ /* Set DTO DSCCLK for ODM DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ odm_pipe->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Calculate and set DSC config for ODM DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt);
+
+ /* Enable ODM DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, odm_pipe);
+ }
+
+ /* Step 5: Configure DSC in timing generator */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg,
+ &seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true);
+ } else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) {
+ /* Disable DSC in OPTC */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false);
+
+ hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc);
+ }
+
+ /* Disable DSC for old pipes that no longer need it */
+ if (old_otg_master && old_otg_master->stream_res.dsc) {
+ for (i = 0; i < old_opp_head_count; i++) {
+ old_pipe = old_opp_heads[i];
+ new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
+
+ /* If old pipe had DSC but new pipe doesn't, disable the old DSC */
+ if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
+ /* Then disconnect DSC block */
+ hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc);
+ }
+ }
+ }
+}
+
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ int opp_inst[MAX_PIPES] = {0};
+ int opp_head_count;
+ int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
+ int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
+ int i;
+
+ opp_head_count = resource_get_opp_heads_for_otg_master(
+ otg_master, &context->res_ctx, opp_heads);
+
+ for (i = 0; i < opp_head_count; i++)
+ opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
+
+ /* Add ODM combine/bypass operation to sequence */
+ if (opp_head_count > 1) {
+ hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst,
+ opp_head_count, odm_slice_width, last_odm_slice_width);
+ } else {
+ hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing);
+ }
+
+ /* Add OPP operations to sequence */
+ for (i = 0; i < opp_head_count; i++) {
+ /* Add OPP pipe clock control operation */
+ hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true);
+
+ /* Add OPP program left edge extra pixel operation */
+ hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp,
+ opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER));
+ }
+
+ /* Add DSC update operations to sequence */
+ dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state);
+
+ /* Add blank pixel data operation if needed */
+ if (!resource_is_pipe_type(otg_master, DPP_PIPE)) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(
+ dc, otg_master, true, seq_state);
+ }
+}
+
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
@@ -1728,20 +1768,28 @@ void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
void dcn401_hardware_release(struct dc *dc)
{
- dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
-
- /* If pstate unsupported, or still supported
- * by firmware, force it supported by dcn
- */
- if (dc->current_state) {
- if ((!dc->clk_mgr->clks.p_state_change_support ||
- dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
- dc->res_pool->hubbub->funcs->force_pstate_change_control)
- dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, true);
-
- dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
- dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
+
+ /* If pstate unsupported, or still supported
+ * by firmware, force it supported by dcn
+ */
+ if (dc->current_state) {
+ if ((!dc->clk_mgr->clks.p_state_change_support ||
+ dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
+ dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, true);
+
+ dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ } else {
+ if (dc->current_state) {
+ dc->clk_mgr->clks.p_state_change_support = false;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
}
}
@@ -1947,9 +1995,8 @@ void dcn401_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
+
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
@@ -2013,3 +2060,1986 @@ void dcn401_reset_hw_ctx_wrap(
}
}
}
+
+static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+{
+ struct pipe_ctx *other_pipe;
+ unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
+
+ /* Always use the largest vready_offset of all connected pipes */
+ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+
+ return vready_offset;
+}
+
+static void dcn401_program_tg(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dce_hwseq *hws)
+{
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+}
+
+void dcn401_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx,
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe)
+ dcn401_program_tg(dc, pipe_ctx, context, hws);
+
+ if (pipe_ctx->update_flags.bits.odm)
+ hws->funcs.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (hws->funcs.enable_plane)
+ hws->funcs.enable_plane(dc, pipe_ctx, context);
+ else
+ dc->hwss.enable_plane(dc, pipe_ctx, context);
+
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw))
+ dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
+ pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+ struct bit_depth_reduction_params params;
+
+ memset(&params, 0, sizeof(params));
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ dc->hwss.set_disp_pattern_generator(dc,
+ pipe_ctx,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ NULL,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+}
+
+/*
+ * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe
+ *
+ * This function creates a sequence-based version of the original dcn401_program_pipe
+ * function. Instead of directly calling hardware programming functions, it appends
+ * sequence steps to the provided block_sequence array that can later be executed
+ * as part of hwss_execute_sequence.
+ *
+ */
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx,
+ !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible,
+ seq_state);
+ }
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
+
+ /* Step 1: Program global sync */
+ hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ /* Step 2: Wait for VACTIVE state (if not phantom pipe) */
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ /* Step 3: Set VTG params */
+ hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ /* Step 4: Setup vupdate interrupt (if available) */
+ if (hws->funcs.setup_vupdate_interrupt)
+ dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm) {
+ if (hws->funcs.update_odm_sequence)
+ hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (dc->hwss.enable_plane_sequence)
+ dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size) {
+ hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ }
+
+ if (dc->res_pool->hubbub->funcs->program_det_segments) {
+ hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw)) {
+
+ if (dc->hwss.update_dchubp_dpp_sequence)
+ dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult)) {
+
+ hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable)) {
+
+ hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state);
+ }
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf) {
+ hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream);
+ }
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal);
+
+ hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+
+ hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_opp,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ (struct tg_color){0},
+ false,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+
+}
+
+void dcn401_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ unsigned int prev_hubp_count = 0;
+ unsigned int hubp_count = 0;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *pipe = NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (resource_is_pipe_topology_changed(dc->current_state, context))
+ resource_log_pipe_topology_update(dc, context);
+
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state) {
+ if (pipe->plane_state->triplebuffer_flips)
+ BREAK_TO_DEBUGGER();
+
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ prev_hubp_count++;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ hubp_count++;
+ }
+
+ if (prev_hubp_count == 0 && hubp_count > 0) {
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, false);
+ udelay(500);
+ }
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+
+ /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
+ * buffer updates properly)
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+ if (tg->funcs->enable_crtc) {
+ if (dc->hwseq->funcs.blank_pixel_data)
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
+
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+ }
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
+ * then we want to do the programming here (effectively it's being disabled). If we do
+ * the programming later the DET won't be updated until the OTG for the phantom pipe is
+ * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
+ * DET allocation.
+ */
+ if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
+ (context->res_ctx.pipe_ctx[i].plane_state &&
+ dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
+ SUBVP_PHANTOM))) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub,
+ dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ }
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
+ }
+
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ if (hws->funcs.program_pipe)
+ hws->funcs.program_pipe(dc, pipe, context);
+ else {
+ /* Don't program phantom pipes in the regular front end programming sequence.
+ * There is an MPO transition case where a pipe being used by a video plane is
+ * transitioned directly to be a phantom pipe when closing the MPO video.
+ * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
+ * right away) but the MPO still exists until the double buffered update of the
+ * main pipe so we will get a frame of underflow if the phantom pipe is
+ * programmed here.
+ */
+ if (pipe->stream &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
+ dcn401_program_pipe(dc, pipe, context);
+ }
+
+ pipe = pipe->bottom_pipe;
+ }
+ }
+
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+
+ /* Avoid underflow by check of pipe line read when adding 2nd plane. */
+ if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
+ }
+ }
+}
+
+void dcn401_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ // Timeout for pipe enable
+ unsigned int timeout_us = 100000;
+ unsigned int polling_interval_us = 1;
+ struct dce_hwseq *hwseq = dc->hwseq;
+ int i;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
+ !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
+ dc->hwss.post_unlock_reset_opp(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ /*
+ * If we are enabling a pipe, we need to wait for pending clear as this is a critical
+ * part of the enable operation otherwise, DM may request an immediate flip which
+ * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
+ * is unsupported on DCN.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ // Don't check flip pending on phantom pipes
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* When going from a smaller ODM slice count to larger, we must ensure double
+ * buffer update completes before we return to ensure we don't reduce DISPCLK
+ * before we've transitioned to 2:1 or 4:1
+ */
+ if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ int j = 0;
+ struct timing_generator *tg = pipe->stream_res.tg;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+ }
+ }
+
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, false, false);
+
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ /* Program phantom pipe here to prevent a frame of underflow in the MPO transition
+ * case (if a pipe being used for a video plane transitions to a phantom pipe, it
+ * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
+ * programming sequence).
+ */
+ while (pipe) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ /* When turning on the phantom pipe we want to run through the
+ * entire enable sequence, so apply all the "enable" flags.
+ */
+ if (dc->hwss.apply_update_flags_for_phantom)
+ dc->hwss.apply_update_flags_for_phantom(pipe);
+ if (dc->hwss.update_phantom_vp_position)
+ dc->hwss.update_phantom_vp_position(dc, context, pipe);
+ dcn401_program_pipe(dc, pipe, context);
+ }
+ pipe = pipe->bottom_pipe;
+ }
+ }
+ }
+
+ if (!hwseq)
+ return;
+
+ /* P-State support transitions:
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ */
+ if (hwseq->funcs.update_force_pstate)
+ dc->hwseq->funcs.update_force_pstate(dc, context);
+ /* Only program the MALL registers after all the main and phantom pipes
+ * are done programming.
+ */
+ if (hwseq->funcs.program_mall_pipe_config)
+ hwseq->funcs.program_mall_pipe_config(dc, context);
+
+ /* WA to apply WM setting*/
+ if (hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
+
+
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
+ tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
+bool dcn401_update_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* recalculate DML parameters */
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
+ return false;
+
+ /* apply updated bandwidth parameters */
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ /* update hubp configs for all pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe == NULL) {
+ bool blank = !is_pipe_tree_visible(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
+
+ if (pipe_ctx->prev_odm_pipe == NULL)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync,
+ &pipe_ctx->stream->timing);
+ }
+
+ return true;
+}
+
+void dcn401_detect_pipe_changes(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe)
+{
+ bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
+ bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
+
+ unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
+ unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
+ unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
+ unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
+ unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
+ unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
+ unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
+ unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
+
+ new_pipe->update_flags.raw = 0;
+
+ /* If non-phantom pipe is being transitioned to a phantom pipe,
+ * set disable and return immediately. This is because the pipe
+ * that was previously in use must be fully disabled before we
+ * can "enable" it as a phantom pipe (since the OTG will certainly
+ * be different). The post_unlock sequence will set the correct
+ * update flags to enable the phantom pipe.
+ */
+ if (old_pipe->plane_state && !old_is_phantom &&
+ new_pipe->plane_state && new_is_phantom) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
+ return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.unbounded_req = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ new_pipe->update_flags.bits.det_size = 1;
+ if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
+ new_pipe->stream_res.test_pattern_params.width != 0 &&
+ new_pipe->stream_res.test_pattern_params.height != 0)
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+
+ /* For SubVP we need to unconditionally enable because any phantom pipes are
+ * always removed then newly added for every full updates whenever SubVP is in use.
+ * The remove-add sequence of the phantom pipe always results in the pipe
+ * being blanked in enable_stream_timing (DPG).
+ */
+ if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
+ new_pipe->update_flags.bits.enable = 1;
+
+ /* Phantom pipes are effectively disabled, if the pipe was previously phantom
+ * we have to enable
+ */
+ if (old_pipe->plane_state && old_is_phantom &&
+ new_pipe->plane_state && !new_is_phantom)
+ new_pipe->update_flags.bits.enable = 1;
+
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ /* Detect plane change */
+ if (old_pipe->plane_state != new_pipe->plane_state)
+ new_pipe->update_flags.bits.plane_changed = true;
+
+ /* Detect top pipe only changes */
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
+ /* Detect global sync changes */
+ if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
+ || (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
+ || (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
+ || (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+
+ if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
+ new_pipe->update_flags.bits.det_size = 1;
+
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /*
+ * Detect mpcc blending changes, only dpp inst and opp matter here,
+ * mpccs getting removed/inserted update connected ones during their own
+ * programming
+ */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
+ struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
+ struct dml2_display_rq_regs old_rq_regs = old_pipe->hubp_regs.rq_regs;
+ struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
+ struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
+ struct dml2_display_rq_regs *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
+
+ /* Detect pipe interdependent updates */
+ if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
+ || (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
+ || (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
+ || (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
+ || (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
+ || (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
+ || (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
+ || (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
+ || (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
+ || (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
+ || (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
+ || (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
+ new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
+ || (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
+ || (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
+ old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
+ old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
+ old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
+ old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
+ old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
+ old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
+ old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
+ old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
+ old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
+ old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
+ old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
+ old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
+ old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
+ old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
+ memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
+ memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+
+ if (old_pipe->unbounded_req != new_pipe->unbounded_req)
+ new_pipe->update_flags.bits.unbounded_req = 1;
+
+ if (memcmp(&old_pipe->stream_res.test_pattern_params,
+ &new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
+ }
+}
+
+void dcn401_plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ }
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+ hubp->funcs->hubp_reset(hubp);
+ dpp->funcs->dpp_reset(dpp);
+
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
+ DC_LOG_DEBUG(
+ "Power gated front end %d\n", hubp->inst);
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
+}
+
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+ p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Check and set DC_IP_REQUEST_CNTL if needed */
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+ }
+
+ /* DPP power gating control */
+ hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false);
+
+ /* HUBP power gating control */
+ hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false);
+
+ /* HUBP reset */
+ hwss_add_hubp_reset(seq_state, hubp);
+
+ /* DPP reset */
+ hwss_add_dpp_reset(seq_state, dpp);
+
+ /* Restore DC_IP_REQUEST_CNTL if it was originally 0 */
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
+
+ /* DPP root clock control */
+ hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false);
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync using block sequence */
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+
+ mpc_tree_params = &(opp->mpc_tree_params);
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+
+ /*Already reset*/
+ if (mpcc_to_remove == NULL)
+ return;
+
+ /* Step 1: Remove MPCC from MPC tree */
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove);
+
+ // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
+ // so don't wait for MPCC_IDLE in the programming sequence
+ if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) {
+ /* Step 2: Set MPCC disconnect pending flag */
+ hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true);
+ }
+
+ /* Step 3: Set optimized required flag */
+ hwss_add_dc_set_optimized_required(seq_state, dc, true);
+
+ /* Step 4: Disconnect HUBP if function exists */
+ if (hubp->funcs->hubp_disconnect)
+ hwss_add_hubp_disconnect(seq_state, hubp);
+
+ /* Step 5: Verify pstate change high if debug sanity checks are enabled */
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state)
+{
+ struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = stream->output_color_space;
+ enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ struct pipe_ctx *odm_pipe;
+ struct rect odm_slice_src;
+
+ if (stream->link->test_pattern_enabled)
+ return;
+
+ /* get opp dpg blank color */
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ if (blank) {
+ /* Set ABM immediate disable */
+ hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx);
+
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
+ } else {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ }
+
+ odm_pipe = pipe_ctx;
+
+ /* Set display pattern generator for all ODM pipes */
+ while (odm_pipe->next_odm_pipe) {
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ /* Set display pattern generator for final ODM pipe */
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ /* Handle ABM level setting when not blanking */
+ if (!blank) {
+ if (stream_res->abm) {
+ /* Set pipe for ABM */
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ /* Set ABM level */
+ hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level);
+ }
+ }
+}
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ int i_wb, i_pipe;
+
+ if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb)
+ return;
+
+ /* For each writeback pipe */
+ for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
+ /* Get direct pointer to writeback info */
+ struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb];
+ int mpcc_inst = -1;
+
+ if (wb_info->wb_enabled) {
+ /* Get the MPCC instance for writeback_source_plane */
+ for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ if (pipe_ctx->plane_state == wb_info->writeback_source_plane) {
+ mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+ break;
+ }
+ }
+
+ if (mpcc_inst == -1) {
+ /* Disable writeback pipe and disconnect from MPCC
+ * if source plane has been removed
+ */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ continue;
+ }
+
+ ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* Writeback pipe already enabled, only need to update */
+ dcn401_update_writeback_sequence(dc, wb_info, context, seq_state);
+ } else {
+ /* Enable writeback pipe and connect to MPCC */
+ dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state);
+ }
+ } else {
+ /* Disable writeback pipe and disconnect from MPCC */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ }
+ }
+}
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update DWBC with new parameters */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Configure MCIF_WB buffer settings */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+
+ /* Configure MCIF_WB arbitration */
+ hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+
+ /* Enable MCIF_WB */
+ hwss_add_mcif_wb_enable(seq_state, mcif_wb);
+
+ /* Set DWB MUX to connect writeback to MPCC */
+ hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst);
+
+ /* Enable DWBC */
+ hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params);
+}
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Disable DWBC */
+ hwss_add_dwbc_disable(seq_state, dwb);
+
+ /* Disable DWB MUX */
+ hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst);
+
+ /* Disable MCIF_WB */
+ hwss_add_mcif_wb_disable(seq_state, mcif_wb);
+}
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update writeback pipe */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Update MCIF_WB buffer settings if needed */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+}
+
+static int find_free_gsl_group(const struct dc *dc)
+{
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
+
+ return 0;
+}
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
+ }
+
+ hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl);
+ hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+}
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
+ if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
+ return;
+
+ /* Wait for MPCC disconnect */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state);
+
+ /* In flip immediate with pipe splitting case GSL is used for synchronization
+ * so we must disable it when the plane is disabled.
+ */
+ if (pipe_ctx->stream_res.gsl_group != 0)
+ dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state);
+
+ /* Update HUBP mall sel */
+ if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel)
+ hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false);
+
+ /* Set flip control GSL */
+ hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* HUBP clock control */
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* DPP clock control */
+ hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false);
+
+ /* Plane atomic power down */
+ if (dc->hwseq->funcs.plane_atomic_power_down_sequence)
+ dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp, seq_state);
+
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
+
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled */
+ if (is_phantom && tg && tg->funcs->disable_phantom_crtc)
+ hwss_add_disable_phantom_crtc(seq_state, tg);
+}
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state)
+{
+ struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ /* Wait for all DPP pipes in current mpc blending tree completes double
+ * buffered disconnection before resetting OPP
+ */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state);
+
+ if (dsc) {
+ bool *is_ungated = NULL;
+ /* Check DSC power gate status */
+ if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status)
+ hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false);
+
+ /* Seamless update specific where we will postpone non
+ * double buffered DSCCLK disable logic in post unlock
+ * sequence after DSC is disconnected from OPP but not
+ * yet power gated.
+ */
+
+ /* DSC wait disconnect pending clear */
+ hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated);
+
+ /* DSC disable */
+ hwss_add_dsc_disable(seq_state, dsc, is_ungated);
+
+ /* Set reference DSCCLK */
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0);
+ }
+}
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0);
+}
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp)
+ return;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+
+ /* Step 1: DPP root clock control - enable clock */
+ if (hws->funcs.dpp_root_clock_control)
+ hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 2: Enable DC IP request (if needed) */
+ if (hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+
+ /* Step 3: DPP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control)
+ hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 4: HUBP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control)
+ hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ /* Step 5: Disable DC IP request (restore state) */
+ if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ /* Step 6: HUBP clock control - enable DCFCLK */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl)
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true);
+
+ /* Step 7: HUBP initialization */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_init)
+ hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp);
+
+ /* Step 8: OPP pipe clock control - enable */
+ if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control)
+ hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true);
+
+ /* Step 9: VM system aperture settings */
+ if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) {
+ hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0,
+ dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr);
+ }
+
+ /* Step 10: Flip interrupt setup */
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) {
+ hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp);
+ }
+}
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dccg *dccg = dc->res_pool->dccg;
+ bool viewport_changed = false;
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
+
+ if (!hubp || !dpp || !plane_state)
+ return;
+
+ /* Step 1: DPP DPPCLK control */
+ if (pipe_ctx->update_flags.bits.dppclk)
+ hwss_add_dpp_dppclk_control(seq_state, dpp, false, true);
+
+ /* Step 2: DCCG update DPP DTO */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
+
+ /* Step 3: HUBP VTG selection */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst);
+
+ /* Step 4: HUBP setup (choose setup2 or setup) */
+ if (hubp->funcs->hubp_setup2) {
+ hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync, &pipe_ctx->stream->timing);
+ } else if (hubp->funcs->hubp_setup) {
+ hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param);
+ }
+ }
+
+ /* Step 5: Set unbounded requesting */
+ if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
+ hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req);
+
+ /* Step 6: HUBP interdependent setup */
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2)
+ hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs);
+ else if (hubp->funcs->hubp_setup_interdependent)
+ hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs);
+ }
+
+ /* Step 7: DPP setup - input CSC and format setup */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ hwss_add_dpp_setup_dpp(seq_state, pipe_ctx);
+
+ /* Step 8: DPP cursor matrix setup */
+ if (dpp->funcs->set_cursor_matrix) {
+ hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space,
+ &plane_state->cursor_csc_color_matrix);
+ }
+
+ /* Step 9: DPP program bias and scale */
+ if (dpp->funcs->dpp_program_bias_and_scale)
+ hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx);
+ }
+
+ /* Step 10: MPCC updates */
+ if (pipe_ctx->update_flags.bits.mpcc ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.global_alpha_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change) {
+
+ /* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */
+ if (hws->funcs.update_mpcc_sequence)
+ hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ /* Step 11: DPP scaler setup */
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
+ hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+ /* Step 12: HUBP viewport programming */
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+ hwss_add_hubp_mem_program_viewport(seq_state, hubp,
+ &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
+
+ /* Step 13: HUBP program mcache if available */
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs);
+
+ /* Step 14: Cursor attribute setup */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+
+ hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx);
+
+ hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
+
+ /* Step 15: Cursor position setup */
+ hwss_add_set_cursor_position(seq_state, dc, pipe_ctx);
+
+ /* Step 16: Cursor SDR white level */
+ if (dc->hwss.set_cursor_sdr_white_level)
+ hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 17: Gamut remap and output CSC */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.gamut_remap ||
+ plane_state->update_flags.bits.gamut_remap_change ||
+ pipe_ctx->stream->update_flags.bits.out_csc) {
+
+ /* Gamut remap */
+ hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx);
+
+ /* Output CSC */
+ hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id);
+ }
+
+ /* Step 18: HUBP surface configuration */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hwss_add_hubp_program_surface_config(seq_state, hubp,
+ plane_state->format, &plane_state->tiling_info, size,
+ plane_state->rotation, &plane_state->dcc,
+ plane_state->horizontal_mirror, 0);
+ hubp->power_gated = false;
+ }
+
+ /* Step 19: Update plane address (with SubVP support) */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.addr_update) {
+
+ /* SubVP save surface address if needed */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) {
+ hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv,
+ &pipe_ctx->plane_state->address, pipe_ctx->subvp_index);
+ }
+
+ /* Update plane address */
+ hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 20: HUBP set blank - enable plane */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_hubp_set_blank(seq_state, hubp, false);
+
+ /* Step 21: Phantom HUBP post enable */
+ if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
+ hwss_add_phantom_hubp_post_enable(seq_state, hubp);
+}
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
+ bool per_pixel_alpha;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+ if (!hubp || !pipe_ctx->plane_state)
+ return;
+
+ per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
+
+ /* Initialize blend configuration */
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_gain = 0xff;
+
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
+ if (pipe_ctx->plane_state->global_alpha)
+ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ else
+ blnd_cfg.global_alpha = 0xff;
+
+ blnd_cfg.background_color_bpc = 4;
+ blnd_cfg.bottom_gain_mode = 0;
+ blnd_cfg.top_gain = 0x1f000;
+ blnd_cfg.bottom_inside_gain = 0x1f000;
+ blnd_cfg.bottom_outside_gain = 0x1f000;
+
+ if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
+
+ /* MPCC instance is equal to HUBP instance */
+ mpcc_id = hubp->inst;
+
+ /* Step 1: Update blending if no full update needed */
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+
+ /* Update blending configuration */
+ hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id);
+
+ /* Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+ return;
+ }
+
+ /* Step 2: Get existing MPCC for DPP */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+
+ /* Step 3: Remove MPCC if being used */
+ if (new_mpcc != NULL) {
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc);
+ } else {
+ /* Step 4: Assert MPCC idle (debug only) */
+ if (dc->debug.sanity_checks)
+ hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id);
+ }
+
+ /* Step 5: Insert new plane into MPC tree */
+ hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id);
+
+ /* Step 6: Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+
+ /* Step 7: Set HUBP OPP and MPCC IDs */
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
+}
+
+static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+{
+ int i;
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ if (res_pool->hubps[i]->inst == mpcc_inst)
+ return res_pool->hubps[i];
+ }
+ ASSERT(false);
+ return NULL;
+}
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ int mpcc_inst;
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+
+ if (!pipe_ctx->stream_res.opp)
+ return;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
+ hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst);
+ }
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ if (hubp)
+ hwss_add_hubp_set_blank(seq_state, hubp, true);
+ }
+ }
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+
+ if (start_line < 0)
+ start_line = 0;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line);
+}
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
+ uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
+ convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
+
+ hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult);
+}
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ int i;
+ unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context);
+ bool cache_cursor = false;
+
+ // Don't force p-state disallow -- can't block dummy p-state
+
+ // Update MALL_SEL register for each pipe (break down update_mall_sel call)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
+ int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
+ cache_cursor = true;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false);
+ } else {
+ // MALL not supported with Stereo3D
+ uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways &&
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface) ? 2 : 0;
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor);
+ }
+ }
+ }
+
+ // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
+ hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true);
+ }
+ }
+}
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high)
+ return;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
+ /* Attempt hardware workaround force recovery */
+ dcn401_hw_wa_force_recovery_sequence(dc, seq_state);
+ }
+}
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp;
+ unsigned int i;
+
+ if (!dc->debug.recovery_enabled)
+ return false;
+
+ /* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true);
+
+ /* Step 3: Set HUBP_DISABLE=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 4: Set HUBP_DISABLE=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, false);
+ }
+ }
+
+ /* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false);
+
+ /* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, false);
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 28a513dfc005..f78162ab859b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -9,6 +9,7 @@
#include "dc.h"
#include "dc_stream.h"
#include "hw_sequencer_private.h"
+#include "hwss/hw_sequencer.h"
#include "dcn401/dcn401_dccg.h"
struct dc;
@@ -63,8 +64,6 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx);
bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable);
-struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc,
- uint8_t region);
void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
@@ -75,15 +74,17 @@ void dcn401_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock);
void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable);
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params);
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params);
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings);
void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
@@ -95,6 +96,118 @@ void dcn401_reset_back_end_for_pipe(
void dcn401_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
+void dcn401_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
+void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
+void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
+bool dcn401_update_bandwidth(struct dc *dc, struct dc_state *context);
+void dcn401_detect_pipe_changes(
+ struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+void dcn401_plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp);
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
+void dcn401_initialize_min_clocks(struct dc *dc);
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable);
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index b30f665d98a6..162096ce0bdf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -9,6 +9,7 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
#include "dcn401/dcn401_hwseq.h"
#include "dcn401_init.h"
@@ -17,9 +18,10 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.init_hw = dcn401_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
- .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .program_front_end_for_ctx = dcn401_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
- .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .post_unlock_program_front_end = dcn401_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -37,12 +39,13 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_plane_sequence = dcn401_disable_plane_sequence,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn401_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn401_prepare_bandwidth,
.optimize_bandwidth = dcn401_optimize_bandwidth,
- .update_bandwidth = dcn20_update_bandwidth,
+ .update_bandwidth = dcn401_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn31_set_static_screen_control,
@@ -52,6 +55,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .wait_for_mpcc_disconnect_sequence = dcn401_wait_for_mpcc_disconnect_sequence,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
@@ -59,6 +63,12 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.set_cursor_position = dcn401_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
@@ -94,50 +104,70 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
.wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
- .fams2_global_control_lock = dcn401_fams2_global_control_lock,
+ .dmub_hw_control_lock = dcn401_dmub_hw_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
- .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
+ .dmub_hw_control_lock_fast = dcn401_dmub_hw_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .detect_pipe_changes = dcn401_detect_pipe_changes,
+ .enable_plane = dcn20_enable_plane,
+ .enable_plane_sequence = dcn401_enable_plane_sequence,
+ .update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .update_dchubp_dpp_sequence = dcn401_update_dchubp_dpp_sequence,
+ .post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .post_unlock_reset_opp_sequence = dcn401_post_unlock_reset_opp_sequence,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
.init_pipes = dcn10_init_pipes,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .plane_atomic_disconnect_sequence = dcn401_plane_atomic_disconnect_sequence,
.update_mpcc = dcn20_update_mpcc,
+ .update_mpcc_sequence = dcn401_update_mpcc_sequence,
.set_input_transfer_func = dcn32_set_input_transfer_func,
.set_output_transfer_func = dcn401_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
+ .blank_pixel_data_sequence = dcn401_blank_pixel_data_sequence,
.reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .setup_vupdate_interrupt_sequence = dcn401_setup_vupdate_interrupt_sequence,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
- .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .plane_atomic_power_down = dcn401_plane_atomic_power_down,
+ .plane_atomic_power_down_sequence = dcn401_plane_atomic_power_down_sequence,
.enable_power_gating_plane = dcn32_enable_power_gating_plane,
.hubp_pg_control = dcn32_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .program_all_writeback_pipes_in_tree_sequence = dcn401_program_all_writeback_pipes_in_tree_sequence,
.update_odm = dcn401_update_odm,
+ .update_odm_sequence = dcn401_update_odm_sequence,
.dsc_pg_control = dcn32_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .set_hdr_multiplier_sequence = dcn401_set_hdr_multiplier_sequence,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .verify_allow_pstate_change_high_sequence = dcn401_verify_allow_pstate_change_high_sequence,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn401_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
+ .program_mall_pipe_config_sequence = dcn401_program_mall_pipe_config_sequence,
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe,
.populate_mcm_luts = NULL,
.perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock,
+ .program_pipe_sequence = dcn401_program_pipe_sequence,
+ .dc_ip_request_cntl = dcn401_dc_ip_request_cntl,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index a5bb10d7b160..8ed9eea40c56 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -31,6 +31,8 @@
#include "inc/hw/opp.h"
#include "inc/hw/link_encoder.h"
#include "inc/core_status.h"
+#include "inc/hw/hw_shared.h"
+#include "dsc/dsc.h"
struct pipe_ctx;
struct dc_state;
@@ -46,6 +48,10 @@ struct dce_hwseq;
struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
+struct drr_params;
+struct dc_underflow_debug_data;
+struct dsc_optc_config;
+struct vm_system_aperture_param;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -60,7 +66,7 @@ struct pipe_control_lock_params {
};
struct set_flip_control_gsl_params {
- struct pipe_ctx *pipe_ctx;
+ struct hubp *hubp;
bool flip_immediate;
};
@@ -146,12 +152,587 @@ struct wait_for_dcc_meta_propagation_params {
const struct pipe_ctx *top_pipe_to_program;
};
-struct fams2_global_control_lock_fast_params {
+struct dmub_hw_control_lock_fast_params {
struct dc *dc;
bool is_required;
bool lock;
};
+struct program_surface_config_params {
+ struct hubp *hubp;
+ enum surface_pixel_format format;
+ struct dc_tiling_info *tiling_info;
+ struct plane_size plane_size;
+ enum dc_rotation_angle rotation;
+ struct dc_plane_dcc_param *dcc;
+ bool horizontal_mirror;
+ int compat_level;
+};
+
+struct program_mcache_id_and_split_coordinate {
+ struct hubp *hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs;
+};
+
+struct program_cursor_update_now_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct hubp_wait_pipe_read_start_params {
+ struct hubp *hubp;
+};
+
+struct apply_update_flags_for_phantom_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct update_phantom_vp_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct set_odm_combine_params {
+ struct timing_generator *tg;
+ int opp_inst[MAX_PIPES];
+ int opp_head_count;
+ int odm_slice_width;
+ int last_odm_slice_width;
+};
+
+struct set_odm_bypass_params {
+ struct timing_generator *tg;
+ const struct dc_crtc_timing *timing;
+};
+
+struct opp_pipe_clock_control_params {
+ struct output_pixel_processor *opp;
+ bool enable;
+};
+
+struct opp_program_left_edge_extra_pixel_params {
+ struct output_pixel_processor *opp;
+ enum dc_pixel_encoding pixel_encoding;
+ bool is_otg_master;
+};
+
+struct dccg_set_dto_dscclk_params {
+ struct dccg *dccg;
+ int inst;
+ int num_slices_h;
+};
+
+struct dsc_set_config_params {
+ struct display_stream_compressor *dsc;
+ struct dsc_config *dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg;
+};
+
+struct dsc_enable_params {
+ struct display_stream_compressor *dsc;
+ int opp_inst;
+};
+
+struct tg_set_dsc_config_params {
+ struct timing_generator *tg;
+ struct dsc_optc_config *dsc_optc_cfg;
+ bool enable;
+};
+
+struct dsc_disconnect_params {
+ struct display_stream_compressor *dsc;
+};
+
+struct dsc_read_state_params {
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state *dsc_state;
+};
+
+struct dsc_calculate_and_set_config_params {
+ struct pipe_ctx *pipe_ctx;
+ struct dsc_optc_config dsc_optc_cfg;
+ bool enable;
+ int opp_cnt;
+};
+
+struct dsc_enable_with_opp_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_tg_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct tg_program_global_sync_params {
+ struct timing_generator *tg;
+ int vready_offset;
+ unsigned int vstartup_lines;
+ unsigned int vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines;
+};
+
+struct tg_wait_for_state_params {
+ struct timing_generator *tg;
+ enum crtc_state state;
+};
+
+struct tg_set_vtg_params_params {
+ struct timing_generator *tg;
+ struct dc_crtc_timing *timing;
+ bool program_fp2;
+};
+
+struct tg_set_gsl_params {
+ struct timing_generator *tg;
+ struct gsl_params gsl;
+};
+
+struct tg_set_gsl_source_select_params {
+ struct timing_generator *tg;
+ int group_idx;
+ uint32_t gsl_ready_signal;
+};
+
+struct setup_vupdate_interrupt_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct tg_setup_vertical_interrupt2_params {
+ struct timing_generator *tg;
+ int start_line;
+};
+
+struct dpp_set_hdr_multiplier_params {
+ struct dpp *dpp;
+ uint32_t hw_mult;
+};
+
+struct program_det_size_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_buffer_size_kb;
+};
+
+struct program_det_segments_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_size;
+};
+
+struct update_dchubp_dpp_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct opp_set_dyn_expansion_params {
+ struct output_pixel_processor *opp;
+ enum dc_color_space color_space;
+ enum dc_color_depth color_depth;
+ enum signal_type signal;
+};
+
+struct opp_program_fmt_params {
+ struct output_pixel_processor *opp;
+ struct bit_depth_reduction_params *fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping;
+};
+
+struct opp_program_bit_depth_reduction_params {
+ struct output_pixel_processor *opp;
+ bool use_default_params;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct opp_set_disp_pattern_generator_params {
+ struct output_pixel_processor *opp;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ struct tg_color solid_color;
+ bool use_solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct set_abm_pipe_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_abm_level_params {
+ struct abm *abm;
+ unsigned int abm_level;
+};
+
+struct set_abm_immediate_disable_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_disp_pattern_generator_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ const struct tg_color *solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct mpc_update_blending_params {
+ struct mpc *mpc;
+ struct mpcc_blnd_cfg blnd_cfg;
+ int mpcc_id;
+};
+
+struct mpc_assert_idle_mpcc_params {
+ struct mpc *mpc;
+ int mpcc_id;
+};
+
+struct mpc_insert_plane_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc_blnd_cfg blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg;
+ struct mpcc *insert_above_mpcc;
+ int dpp_id;
+ int mpcc_id;
+};
+
+struct mpc_remove_mpcc_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove;
+};
+
+struct opp_set_mpcc_disconnect_pending_params {
+ struct output_pixel_processor *opp;
+ int mpcc_inst;
+ bool pending;
+};
+
+struct dc_set_optimized_required_params {
+ struct dc *dc;
+ bool optimized_required;
+};
+
+struct hubp_disconnect_params {
+ struct hubp *hubp;
+};
+
+struct hubbub_force_pstate_change_control_params {
+ struct hubbub *hubbub;
+ bool enable;
+ bool wait;
+};
+
+struct tg_enable_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct hubp_wait_flip_pending_params {
+ struct hubp *hubp;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct tg_wait_double_buffer_pending_params {
+ struct timing_generator *tg;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct update_force_pstate_params {
+ struct dc *dc;
+ struct dc_state *context;
+};
+
+struct hubbub_apply_dedcn21_147_wa_params {
+ struct hubbub *hubbub;
+};
+
+struct hubbub_allow_self_refresh_control_params {
+ struct hubbub *hubbub;
+ bool allow;
+ bool *disallow_self_refresh_applied;
+};
+
+struct tg_get_frame_count_params {
+ struct timing_generator *tg;
+ unsigned int *frame_count;
+};
+
+struct mpc_set_dwb_mux_params {
+ struct mpc *mpc;
+ int dwb_id;
+ int mpcc_id;
+};
+
+struct mpc_disable_dwb_mux_params {
+ struct mpc *mpc;
+ unsigned int dwb_id;
+};
+
+struct mcif_wb_config_buf_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_buf_params *mcif_buf_params;
+ unsigned int dest_height;
+};
+
+struct mcif_wb_config_arb_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_arb_params *mcif_arb_params;
+};
+
+struct mcif_wb_enable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct mcif_wb_disable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct dwbc_enable_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct dwbc_disable_params {
+ struct dwbc *dwb;
+};
+
+struct dwbc_update_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct hubp_update_mall_sel_params {
+ struct hubp *hubp;
+ uint32_t mall_sel;
+ bool cache_cursor;
+};
+
+struct hubp_prepare_subvp_buffering_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_set_blank_en_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_disable_control_params {
+ struct hubp *hubp;
+ bool disable;
+};
+
+struct hubbub_soft_reset_params {
+ struct hubbub *hubbub;
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset);
+ bool reset;
+};
+
+struct hubp_clk_cntl_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_init_params {
+ struct hubp *hubp;
+};
+
+struct hubp_set_vm_system_aperture_settings_params {
+ struct hubp *hubp;
+ //struct vm_system_aperture_param apt;
+ PHYSICAL_ADDRESS_LOC sys_default;
+ PHYSICAL_ADDRESS_LOC sys_low;
+ PHYSICAL_ADDRESS_LOC sys_high;
+};
+
+struct hubp_set_flip_int_params {
+ struct hubp *hubp;
+};
+
+struct dpp_dppclk_control_params {
+ struct dpp *dpp;
+ bool dppclk_div;
+ bool enable;
+};
+
+struct disable_phantom_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct dpp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool power_on;
+};
+
+struct hubp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int hubp_inst;
+ bool power_on;
+};
+
+struct hubp_reset_params {
+ struct hubp *hubp;
+};
+
+struct dpp_reset_params {
+ struct dpp *dpp;
+};
+
+struct dpp_root_clock_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool clock_on;
+};
+
+struct dc_ip_request_cntl_params {
+ struct dc *dc;
+ bool enable;
+};
+
+struct dsc_pg_status_params {
+ struct dce_hwseq *hws;
+ int dsc_inst;
+ bool is_ungated;
+};
+
+struct dsc_wait_disconnect_pending_clear_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dsc_disable_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dccg_set_ref_dscclk_params {
+ struct dccg *dccg;
+ int dsc_inst;
+ bool *is_ungated;
+};
+
+struct dccg_update_dpp_dto_params {
+ struct dccg *dccg;
+ int dpp_inst;
+ int dppclk_khz;
+};
+
+struct hubp_vtg_sel_params {
+ struct hubp *hubp;
+ uint32_t otg_inst;
+};
+
+struct hubp_setup2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+ union dml2_global_sync_programming *global_sync;
+ struct dc_crtc_timing *timing;
+};
+
+struct hubp_setup_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest;
+};
+
+struct hubp_set_unbounded_requesting_params {
+ struct hubp *hubp;
+ bool unbounded_req;
+};
+
+struct hubp_setup_interdependent2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+};
+
+struct hubp_setup_interdependent_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+};
+
+struct dpp_set_cursor_matrix_params {
+ struct dpp *dpp;
+ enum dc_color_space color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix;
+};
+
+struct mpc_update_mpcc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct dpp_set_scaler_params {
+ struct dpp *dpp;
+ const struct scaler_data *scl_data;
+};
+
+struct hubp_mem_program_viewport_params {
+ struct hubp *hubp;
+ const struct rect *viewport;
+ const struct rect *viewport_c;
+};
+
+struct hubp_program_mcache_id_and_split_coordinate_params {
+ struct hubp *hubp;
+ struct mcache_regs_struct *mcache_regs;
+};
+
+struct abort_cursor_offload_update_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_attribute_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_sdr_white_level_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_output_csc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum dc_color_space colorspace;
+ uint16_t *matrix;
+ int opp_id;
+};
+
+struct hubp_set_blank_params {
+ struct hubp *hubp;
+ bool blank;
+};
+
+struct phantom_hubp_post_enable_params {
+ struct hubp *hubp;
+};
+
union block_sequence_params {
struct update_plane_addr_params update_plane_addr_params;
struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params;
@@ -171,7 +752,108 @@ union block_sequence_params {
struct set_ocsc_default_params set_ocsc_default_params;
struct subvp_save_surf_addr subvp_save_surf_addr;
struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params;
- struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params;
+ struct dmub_hw_control_lock_fast_params dmub_hw_control_lock_fast_params;
+ struct program_surface_config_params program_surface_config_params;
+ struct program_mcache_id_and_split_coordinate program_mcache_id_and_split_coordinate;
+ struct program_cursor_update_now_params program_cursor_update_now_params;
+ struct hubp_wait_pipe_read_start_params hubp_wait_pipe_read_start_params;
+ struct apply_update_flags_for_phantom_params apply_update_flags_for_phantom_params;
+ struct update_phantom_vp_position_params update_phantom_vp_position_params;
+ struct set_odm_combine_params set_odm_combine_params;
+ struct set_odm_bypass_params set_odm_bypass_params;
+ struct opp_pipe_clock_control_params opp_pipe_clock_control_params;
+ struct opp_program_left_edge_extra_pixel_params opp_program_left_edge_extra_pixel_params;
+ struct dccg_set_dto_dscclk_params dccg_set_dto_dscclk_params;
+ struct dsc_set_config_params dsc_set_config_params;
+ struct dsc_enable_params dsc_enable_params;
+ struct tg_set_dsc_config_params tg_set_dsc_config_params;
+ struct dsc_disconnect_params dsc_disconnect_params;
+ struct dsc_read_state_params dsc_read_state_params;
+ struct dsc_calculate_and_set_config_params dsc_calculate_and_set_config_params;
+ struct dsc_enable_with_opp_params dsc_enable_with_opp_params;
+ struct program_tg_params program_tg_params;
+ struct tg_program_global_sync_params tg_program_global_sync_params;
+ struct tg_wait_for_state_params tg_wait_for_state_params;
+ struct tg_set_vtg_params_params tg_set_vtg_params_params;
+ struct tg_setup_vertical_interrupt2_params tg_setup_vertical_interrupt2_params;
+ struct dpp_set_hdr_multiplier_params dpp_set_hdr_multiplier_params;
+ struct tg_set_gsl_params tg_set_gsl_params;
+ struct tg_set_gsl_source_select_params tg_set_gsl_source_select_params;
+ struct setup_vupdate_interrupt_params setup_vupdate_interrupt_params;
+ struct program_det_size_params program_det_size_params;
+ struct program_det_segments_params program_det_segments_params;
+ struct update_dchubp_dpp_params update_dchubp_dpp_params;
+ struct opp_set_dyn_expansion_params opp_set_dyn_expansion_params;
+ struct opp_program_fmt_params opp_program_fmt_params;
+ struct opp_program_bit_depth_reduction_params opp_program_bit_depth_reduction_params;
+ struct opp_set_disp_pattern_generator_params opp_set_disp_pattern_generator_params;
+ struct set_abm_pipe_params set_abm_pipe_params;
+ struct set_abm_level_params set_abm_level_params;
+ struct set_abm_immediate_disable_params set_abm_immediate_disable_params;
+ struct set_disp_pattern_generator_params set_disp_pattern_generator_params;
+ struct mpc_remove_mpcc_params mpc_remove_mpcc_params;
+ struct opp_set_mpcc_disconnect_pending_params opp_set_mpcc_disconnect_pending_params;
+ struct dc_set_optimized_required_params dc_set_optimized_required_params;
+ struct hubp_disconnect_params hubp_disconnect_params;
+ struct hubbub_force_pstate_change_control_params hubbub_force_pstate_change_control_params;
+ struct tg_enable_crtc_params tg_enable_crtc_params;
+ struct hubp_wait_flip_pending_params hubp_wait_flip_pending_params;
+ struct tg_wait_double_buffer_pending_params tg_wait_double_buffer_pending_params;
+ struct update_force_pstate_params update_force_pstate_params;
+ struct hubbub_apply_dedcn21_147_wa_params hubbub_apply_dedcn21_147_wa_params;
+ struct hubbub_allow_self_refresh_control_params hubbub_allow_self_refresh_control_params;
+ struct tg_get_frame_count_params tg_get_frame_count_params;
+ struct mpc_set_dwb_mux_params mpc_set_dwb_mux_params;
+ struct mpc_disable_dwb_mux_params mpc_disable_dwb_mux_params;
+ struct mcif_wb_config_buf_params mcif_wb_config_buf_params;
+ struct mcif_wb_config_arb_params mcif_wb_config_arb_params;
+ struct mcif_wb_enable_params mcif_wb_enable_params;
+ struct mcif_wb_disable_params mcif_wb_disable_params;
+ struct dwbc_enable_params dwbc_enable_params;
+ struct dwbc_disable_params dwbc_disable_params;
+ struct dwbc_update_params dwbc_update_params;
+ struct hubp_update_mall_sel_params hubp_update_mall_sel_params;
+ struct hubp_prepare_subvp_buffering_params hubp_prepare_subvp_buffering_params;
+ struct hubp_set_blank_en_params hubp_set_blank_en_params;
+ struct hubp_disable_control_params hubp_disable_control_params;
+ struct hubbub_soft_reset_params hubbub_soft_reset_params;
+ struct hubp_clk_cntl_params hubp_clk_cntl_params;
+ struct hubp_init_params hubp_init_params;
+ struct hubp_set_vm_system_aperture_settings_params hubp_set_vm_system_aperture_settings_params;
+ struct hubp_set_flip_int_params hubp_set_flip_int_params;
+ struct dpp_dppclk_control_params dpp_dppclk_control_params;
+ struct disable_phantom_crtc_params disable_phantom_crtc_params;
+ struct dpp_pg_control_params dpp_pg_control_params;
+ struct hubp_pg_control_params hubp_pg_control_params;
+ struct hubp_reset_params hubp_reset_params;
+ struct dpp_reset_params dpp_reset_params;
+ struct dpp_root_clock_control_params dpp_root_clock_control_params;
+ struct dc_ip_request_cntl_params dc_ip_request_cntl_params;
+ struct dsc_pg_status_params dsc_pg_status_params;
+ struct dsc_wait_disconnect_pending_clear_params dsc_wait_disconnect_pending_clear_params;
+ struct dsc_disable_params dsc_disable_params;
+ struct dccg_set_ref_dscclk_params dccg_set_ref_dscclk_params;
+ struct dccg_update_dpp_dto_params dccg_update_dpp_dto_params;
+ struct hubp_vtg_sel_params hubp_vtg_sel_params;
+ struct hubp_setup2_params hubp_setup2_params;
+ struct hubp_setup_params hubp_setup_params;
+ struct hubp_set_unbounded_requesting_params hubp_set_unbounded_requesting_params;
+ struct hubp_setup_interdependent2_params hubp_setup_interdependent2_params;
+ struct hubp_setup_interdependent_params hubp_setup_interdependent_params;
+ struct dpp_set_cursor_matrix_params dpp_set_cursor_matrix_params;
+ struct mpc_update_mpcc_params mpc_update_mpcc_params;
+ struct mpc_update_blending_params mpc_update_blending_params;
+ struct mpc_assert_idle_mpcc_params mpc_assert_idle_mpcc_params;
+ struct mpc_insert_plane_params mpc_insert_plane_params;
+ struct dpp_set_scaler_params dpp_set_scaler_params;
+ struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params;
+ struct abort_cursor_offload_update_params abort_cursor_offload_update_params;
+ struct set_cursor_attribute_params set_cursor_attribute_params;
+ struct set_cursor_position_params set_cursor_position_params;
+ struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params;
+ struct program_output_csc_params program_output_csc_params;
+ struct hubp_set_blank_params hubp_set_blank_params;
+ struct phantom_hubp_post_enable_params phantom_hubp_post_enable_params;
};
enum block_sequence_func {
@@ -187,14 +869,113 @@ enum block_sequence_func {
DPP_SETUP_DPP,
DPP_PROGRAM_BIAS_AND_SCALE,
DPP_SET_OUTPUT_TRANSFER_FUNC,
+ DPP_SET_HDR_MULTIPLIER,
MPC_UPDATE_VISUAL_CONFIRM,
MPC_POWER_ON_MPC_MEM_PWR,
MPC_SET_OUTPUT_CSC,
MPC_SET_OCSC_DEFAULT,
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
- DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
-
+ DMUB_HW_CONTROL_LOCK_FAST,
+ HUBP_PROGRAM_SURFACE_CONFIG,
+ HUBP_PROGRAM_MCACHE_ID,
+ PROGRAM_CURSOR_UPDATE_NOW,
+ HUBP_WAIT_PIPE_READ_START,
+ HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM,
+ HWS_UPDATE_PHANTOM_VP_POSITION,
+ OPTC_SET_ODM_COMBINE,
+ OPTC_SET_ODM_BYPASS,
+ OPP_PIPE_CLOCK_CONTROL,
+ OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL,
+ DCCG_SET_DTO_DSCCLK,
+ DSC_SET_CONFIG,
+ DSC_ENABLE,
+ TG_SET_DSC_CONFIG,
+ DSC_DISCONNECT,
+ DSC_READ_STATE,
+ DSC_CALCULATE_AND_SET_CONFIG,
+ DSC_ENABLE_WITH_OPP,
+ TG_PROGRAM_GLOBAL_SYNC,
+ TG_WAIT_FOR_STATE,
+ TG_SET_VTG_PARAMS,
+ TG_SETUP_VERTICAL_INTERRUPT2,
+ HUBP_PROGRAM_DET_SIZE,
+ HUBP_PROGRAM_DET_SEGMENTS,
+ OPP_SET_DYN_EXPANSION,
+ OPP_PROGRAM_FMT,
+ OPP_PROGRAM_BIT_DEPTH_REDUCTION,
+ OPP_SET_DISP_PATTERN_GENERATOR,
+ ABM_SET_PIPE,
+ ABM_SET_LEVEL,
+ ABM_SET_IMMEDIATE_DISABLE,
+ MPC_REMOVE_MPCC,
+ OPP_SET_MPCC_DISCONNECT_PENDING,
+ DC_SET_OPTIMIZED_REQUIRED,
+ HUBP_DISCONNECT,
+ HUBBUB_FORCE_PSTATE_CHANGE_CONTROL,
+ TG_ENABLE_CRTC,
+ TG_SET_GSL,
+ TG_SET_GSL_SOURCE_SELECT,
+ HUBP_WAIT_FLIP_PENDING,
+ TG_WAIT_DOUBLE_BUFFER_PENDING,
+ UPDATE_FORCE_PSTATE,
+ PROGRAM_MALL_PIPE_CONFIG,
+ HUBBUB_APPLY_DEDCN21_147_WA,
+ HUBBUB_ALLOW_SELF_REFRESH_CONTROL,
+ TG_GET_FRAME_COUNT,
+ MPC_SET_DWB_MUX,
+ MPC_DISABLE_DWB_MUX,
+ MCIF_WB_CONFIG_BUF,
+ MCIF_WB_CONFIG_ARB,
+ MCIF_WB_ENABLE,
+ MCIF_WB_DISABLE,
+ DWBC_ENABLE,
+ DWBC_DISABLE,
+ DWBC_UPDATE,
+ HUBP_UPDATE_MALL_SEL,
+ HUBP_PREPARE_SUBVP_BUFFERING,
+ HUBP_SET_BLANK_EN,
+ HUBP_DISABLE_CONTROL,
+ HUBBUB_SOFT_RESET,
+ HUBP_CLK_CNTL,
+ HUBP_INIT,
+ HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS,
+ HUBP_SET_FLIP_INT,
+ DPP_DPPCLK_CONTROL,
+ DISABLE_PHANTOM_CRTC,
+ DSC_PG_STATUS,
+ DSC_WAIT_DISCONNECT_PENDING_CLEAR,
+ DSC_DISABLE,
+ DCCG_SET_REF_DSCCLK,
+ DPP_PG_CONTROL,
+ HUBP_PG_CONTROL,
+ HUBP_RESET,
+ DPP_RESET,
+ DPP_ROOT_CLOCK_CONTROL,
+ DC_IP_REQUEST_CNTL,
+ DCCG_UPDATE_DPP_DTO,
+ HUBP_VTG_SEL,
+ HUBP_SETUP2,
+ HUBP_SETUP,
+ HUBP_SET_UNBOUNDED_REQUESTING,
+ HUBP_SETUP_INTERDEPENDENT2,
+ HUBP_SETUP_INTERDEPENDENT,
+ DPP_SET_CURSOR_MATRIX,
+ MPC_UPDATE_BLENDING,
+ MPC_ASSERT_IDLE_MPCC,
+ MPC_INSERT_PLANE,
+ DPP_SET_SCALER,
+ HUBP_MEM_PROGRAM_VIEWPORT,
+ ABORT_CURSOR_OFFLOAD_UPDATE,
+ SET_CURSOR_ATTRIBUTE,
+ SET_CURSOR_POSITION,
+ SET_CURSOR_SDR_WHITE_LEVEL,
+ PROGRAM_OUTPUT_CSC,
+ HUBP_SET_LEGACY_TILING_COMPAT_LEVEL,
+ HUBP_SET_BLANK,
+ PHANTOM_HUBP_POST_ENABLE,
+ /* This must be the last value in this enum, add new ones above */
+ HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
struct block_sequence {
@@ -202,6 +983,13 @@ struct block_sequence {
enum block_sequence_func func;
};
+struct block_sequence_state {
+ struct block_sequence *steps;
+ unsigned int *num_steps;
+};
+
+#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
+
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
/* Embedded Display Related */
@@ -217,6 +1005,8 @@ struct hw_sequencer_funcs {
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
+ void (*disable_plane_sequence)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
@@ -234,6 +1024,10 @@ struct hw_sequencer_funcs {
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
+ void (*wait_for_mpcc_disconnect_sequence)(struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*edp_backlight_control)(
struct dc_link *link,
bool enable);
@@ -241,6 +1035,7 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
+ void (*clear_surface_dcc_and_tiling)(struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state, bool clear_tiling);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -304,6 +1099,13 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
+ void (*abort_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*begin_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*commit_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*update_cursor_offload_pipe)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*notify_cursor_offload_drr_update)(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+ void (*program_cursor_offload_now)(struct dc *dc, const struct pipe_ctx *pipe);
/* Colour Related */
void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx);
@@ -446,18 +1248,45 @@ struct hw_sequencer_funcs {
const struct dc_state *new_ctx);
void (*wait_for_dcc_meta_propagation)(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
- void (*fams2_global_control_lock)(struct dc *dc,
+ void (*dmub_hw_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock);
void (*fams2_update_config)(struct dc *dc,
struct dc_state *context,
bool enable);
- void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
+ void (*dmub_hw_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
void (*program_outstanding_updates)(struct dc *dc,
struct dc_state *context);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
void (*wait_for_all_pending_updates)(const struct pipe_ctx *pipe_ctx);
+ void (*detect_pipe_changes)(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+ void (*enable_plane)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ void (*enable_plane_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+ void (*update_dchubp_dpp)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ void (*update_dchubp_dpp_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+ void (*post_unlock_reset_opp)(struct dc *dc,
+ struct pipe_ctx *opp_head);
+ void (*post_unlock_reset_opp_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
+ void (*get_underflow_debug_data)(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
};
void color_space_to_black_color(
@@ -485,11 +1314,15 @@ void get_hdr_visual_confirm_color(
void get_mpctree_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color);
+void get_vabc_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
void get_subvp_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
void get_fams2_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
@@ -504,19 +1337,29 @@ void get_cursor_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_dcc_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+void set_drr_and_clear_adjust_pending(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream,
+ struct drr_params *params);
+
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps);
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
@@ -553,4 +1396,630 @@ void hwss_set_ocsc_default(union block_sequence_params *params);
void hwss_subvp_save_surf_addr(union block_sequence_params *params);
+void hwss_program_surface_config(union block_sequence_params *params);
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params);
+
+void hwss_set_odm_combine(union block_sequence_params *params);
+
+void hwss_set_odm_bypass(union block_sequence_params *params);
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params);
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params);
+
+void hwss_blank_pixel_data(union block_sequence_params *params);
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params);
+
+void hwss_dsc_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable(union block_sequence_params *params);
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params);
+
+void hwss_dsc_disconnect(union block_sequence_params *params);
+
+void hwss_dsc_read_state(union block_sequence_params *params);
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params);
+
+void hwss_program_tg(union block_sequence_params *params);
+
+void hwss_tg_program_global_sync(union block_sequence_params *params);
+
+void hwss_tg_wait_for_state(union block_sequence_params *params);
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params);
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params);
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params);
+
+void hwss_program_det_size(union block_sequence_params *params);
+
+void hwss_program_det_segments(union block_sequence_params *params);
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params);
+
+void hwss_opp_program_fmt(union block_sequence_params *params);
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params);
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params);
+
+void hwss_set_abm_pipe(union block_sequence_params *params);
+
+void hwss_set_abm_level(union block_sequence_params *params);
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params);
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params);
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params);
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params);
+
+void hwss_hubp_disconnect(union block_sequence_params *params);
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params);
+
+void hwss_tg_enable_crtc(union block_sequence_params *params);
+
+void hwss_tg_set_gsl(union block_sequence_params *params);
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params);
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params);
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params);
+
+void hwss_update_force_pstate(union block_sequence_params *params);
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params);
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params);
+
+void hwss_tg_get_frame_count(union block_sequence_params *params);
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params);
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params);
+
+void hwss_mcif_wb_enable(union block_sequence_params *params);
+
+void hwss_mcif_wb_disable(union block_sequence_params *params);
+
+void hwss_dwbc_enable(union block_sequence_params *params);
+
+void hwss_dwbc_disable(union block_sequence_params *params);
+
+void hwss_dwbc_update(union block_sequence_params *params);
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params);
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params);
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params);
+
+void hwss_hubp_disable_control(union block_sequence_params *params);
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params);
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params);
+
+void hwss_hubp_init(union block_sequence_params *params);
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params);
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params);
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params);
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params);
+
+void hwss_dsc_pg_status(union block_sequence_params *params);
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params);
+
+void hwss_dsc_disable(union block_sequence_params *params);
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params);
+
+void hwss_dpp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_reset(union block_sequence_params *params);
+
+void hwss_dpp_reset(union block_sequence_params *params);
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params);
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params);
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params);
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params);
+
+void hwss_hubp_setup2(union block_sequence_params *params);
+
+void hwss_hubp_setup(union block_sequence_params *params);
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params);
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params);
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_update_blending(union block_sequence_params *params);
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_insert_plane(union block_sequence_params *params);
+
+void hwss_dpp_set_scaler(union block_sequence_params *params);
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params);
+
+void hwss_abort_cursor_offload_update(union block_sequence_params *params);
+
+void hwss_set_cursor_attribute(union block_sequence_params *params);
+
+void hwss_set_cursor_position(union block_sequence_params *params);
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params);
+
+void hwss_program_output_csc(union block_sequence_params *params);
+
+void hwss_hubp_set_legacy_tiling_compat_level(union block_sequence_params *params);
+
+void hwss_hubp_set_blank(union block_sequence_params *params);
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params);
+
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool lock);
+
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp, bool flip_immediate);
+
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
+
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state);
+
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream);
+
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, int mpcc_id);
+
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int mpcc_id, bool power_on);
+
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, enum dc_color_space colorspace, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv, struct dc_plane_address *addr, uint8_t subvp_index);
+
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *top_pipe_to_program);
+
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width);
+
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *optc, struct dc_crtc_timing *timing);
+
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines);
+
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, enum crtc_state state);
+
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
+
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line);
+
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult);
+
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_buffer_size_kb);
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs);
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, bool enable, bool wait);
+
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_size);
+
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth, enum signal_type signal);
+
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm, uint32_t abm_level);
+
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h);
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt);
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove);
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending);
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc);
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required);
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset);
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable);
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height);
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params);
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id);
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id);
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb);
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl);
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal);
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor);
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable);
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset);
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable);
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated);
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated);
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank);
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp);
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable);
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high);
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz);
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst);
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing);
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req);
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs);
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level);
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix);
+
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id);
+
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id);
+
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data);
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context);
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub);
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied);
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count);
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dsc_optc_config *dsc_optc_cfg,
+ bool enable);
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 22a5d4a03c98..406db231bc72 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -27,6 +27,7 @@
#define __DC_HW_SEQUENCER_PRIVATE_H__
#include "dc_types.h"
+#include "hw_sequencer.h"
enum pipe_gating_control {
PIPE_GATING_CONTROL_DISABLE = 0,
@@ -49,6 +50,7 @@ struct hwseq_wa_state {
bool DEGVIDCN10_253_applied;
bool disallow_self_refresh_during_multi_plane_transition_applied;
unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
+ bool skip_blank_stream;
};
struct pipe_ctx;
@@ -79,7 +81,13 @@ struct hwseq_private_funcs {
void (*plane_atomic_disconnect)(struct dc *dc,
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect_sequence)(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*update_mpcc_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*set_input_transfer_func)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
@@ -96,6 +104,10 @@ struct hwseq_private_funcs {
void (*blank_pixel_data)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank);
+ void (*blank_pixel_data_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
enum dc_status (*enable_stream_timing)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -104,6 +116,8 @@ struct hwseq_private_funcs {
bool enable);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
+ void (*setup_vupdate_interrupt_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*init_blank)(struct dc *dc, struct timing_generator *tg);
void (*disable_vga)(struct dce_hwseq *hws);
@@ -111,6 +125,10 @@ struct hwseq_private_funcs {
void (*plane_atomic_power_down)(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+ void (*plane_atomic_power_down_sequence)(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*enable_power_gating_plane)(struct dce_hwseq *hws,
bool enable);
@@ -139,15 +157,31 @@ struct hwseq_private_funcs {
unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+ void (*update_odm_sequence)(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *pipe_ctx, struct block_sequence_state *seq_state);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
const struct dc_stream_state *stream,
struct dc_state *context);
+ void (*program_all_writeback_pipes_in_tree_sequence)(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*s0i3_golden_init_wa)(struct dc *dc);
void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx);
+ void (*set_hdr_multiplier_sequence)(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*verify_allow_pstate_change_high)(struct dc *dc);
+ void (*verify_allow_pstate_change_high_sequence)(struct dc *dc,
+ struct block_sequence_state *seq_state);
void (*program_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*program_pipe_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*wait_for_blank_complete)(struct output_pixel_processor *opp);
void (*dccg_init)(struct dce_hwseq *hws);
bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx,
@@ -162,6 +196,8 @@ struct hwseq_private_funcs {
void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
+ void (*program_mall_pipe_config_sequence)(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx,
@@ -183,6 +219,9 @@ struct hwseq_private_funcs {
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
+ void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
+ void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*dc_ip_request_cntl)(struct dc *dc, bool enable);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index b5afd8c3103d..82085d9c3f40 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -26,6 +26,8 @@
#ifndef _CORE_STATUS_H_
#define _CORE_STATUS_H_
+#include "dc_hw_types.h"
+
enum dc_status {
DC_OK = 1,
@@ -56,6 +58,8 @@ enum dc_status {
DC_NO_LINK_ENC_RESOURCE = 26,
DC_FAIL_DP_PAYLOAD_ALLOCATION = 27,
DC_FAIL_DP_LINK_BANDWIDTH = 28,
+ DC_FAIL_HW_CURSOR_SUPPORT = 29,
+ DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 06a420154888..5ed2cd344804 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -39,7 +39,7 @@
#include "panel_cntl.h"
#include "dmub/inc/dmub_cmd.h"
#include "pg_cntl.h"
-#include "spl/dc_spl.h"
+#include "sspl/dc_spl.h"
#define MAX_CLOCK_SOURCES 7
#define MAX_SVP_PHANTOM_STREAMS 2
@@ -58,13 +58,16 @@
#include "transform.h"
#include "dpp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
struct resource_pool;
struct dc_state;
struct resource_context;
struct clk_bw_params;
+struct dc_mcache_params;
+
+#define MAX_RMCM_INST 2
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
@@ -78,11 +81,10 @@ struct resource_funcs {
/* Create a minimal link encoder object with no dc_link object
* associated with it. */
struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id);
-
- bool (*validate_bandwidth)(
+ enum dc_status (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void (*calculate_wm_and_dlg)(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -107,7 +109,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* Algorithm for assigning available link encoders to links.
@@ -217,6 +219,17 @@ struct resource_funcs {
*/
int (*get_power_profile)(const struct dc_state *context);
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
+ unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx);
+ unsigned int (*get_max_hw_cursor_size)(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+ bool (*program_mcache_pipe_config)(struct dc_state *context,
+ const struct dc_mcache_params *mcache_params);
+ enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
};
struct audio_support{
@@ -261,7 +274,7 @@ struct resource_pool {
/* An array for accessing the link encoder objects that have been created.
* Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA
*/
- struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS];
+ struct link_encoder *link_encoders[MAX_LINK_ENCODERS];
/* Number of DIG link encoder objects created - i.e. number of valid
* entries in link_encoders array.
*/
@@ -275,6 +288,7 @@ struct resource_pool {
struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
struct dc_3dlut *mpc_lut[MAX_PIPES];
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
+ struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST];
struct {
unsigned int xtalin_clock_inKhz;
@@ -375,12 +389,15 @@ struct plane_resource {
/* all mappable hardware resources used to enable a link */
struct link_resource {
+ struct link_encoder *dio_link_enc;
struct hpo_dp_link_encoder *hpo_dp_link_enc;
};
struct link_config {
struct dc_link_settings dp_link_settings;
+ struct dc_tunnel_settings dp_tunnel_settings;
};
+
union pipe_update_flags {
struct {
uint32_t enable : 1;
@@ -416,7 +433,14 @@ enum p_state_switch_method {
P_STATE_V_ACTIVE,
P_STATE_SUB_VP,
P_STATE_DRR_SUB_VP,
- P_STATE_V_BLANK_SUB_VP
+ P_STATE_V_BLANK_SUB_VP,
+};
+
+struct dsc_padding_params {
+ /* pixels borrowed from hblank to hactive */
+ uint8_t dsc_hactive_padding;
+ uint32_t dsc_htotal_padding;
+ uint32_t dsc_pix_clk_100hz;
};
struct pipe_ctx {
@@ -465,6 +489,7 @@ struct pipe_ctx {
unsigned int surface_size_in_mall_bytes;
struct dml2_dchub_per_pipe_register_set hubp_regs;
struct dml2_hubp_pipe_mcache_regs mcache_regs;
+ union dml2_global_sync_programming global_sync;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
@@ -475,8 +500,11 @@ struct pipe_ctx {
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
uint8_t subvp_index;
struct pixel_rate_divider pixel_rate_divider;
- /* pixels borrowed from hblank to hactive */
- uint8_t hblank_borrow;
+ struct dsc_padding_params dsc_padding_params;
+ /* next vupdate */
+ uint32_t next_vupdate;
+ uint32_t wait_frame_count;
+ bool wait_is_required;
};
/* Data used for dynamic link encoder assignment.
@@ -486,7 +514,7 @@ struct pipe_ctx {
struct link_enc_cfg_context {
enum link_enc_cfg_mode mode;
struct link_enc_assignment link_enc_assignments[MAX_PIPES];
- enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ enum engine_id link_enc_avail[MAX_LINK_ENCODERS];
struct link_enc_assignment transient_assignments[MAX_PIPES];
};
@@ -498,11 +526,13 @@ struct resource_context {
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
struct link_enc_cfg_context link_enc_cfg_ctx;
+ unsigned int dio_link_enc_to_link_idx[MAX_LINK_ENCODERS];
+ int dio_link_enc_ref_cnts[MAX_LINK_ENCODERS];
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
- /* solely used for build scalar data in dml2 */
+ /* used to build scalar data in dml2 and for edp backlight programming */
struct pipe_ctx temp_pipe;
};
@@ -540,7 +570,10 @@ struct dcn_bw_output {
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct dmub_cmd_fams2_global_config fams2_global_config;
union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES];
- union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union {
+ union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES];
+ };
struct dml2_display_arb_regs arb_regs;
};
@@ -625,7 +658,7 @@ struct dc_state {
*/
struct bw_context bw_ctx;
- struct block_sequence block_sequence[50];
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE];
unsigned int block_sequence_steps;
struct dc_dmub_cmd dc_dmub_cmd[10];
unsigned int dmub_cmd_count;
@@ -656,6 +689,7 @@ struct replay_context {
/* Controller Id used for Dig Fe source select */
enum controller_id controllerId;
unsigned int line_time_in_ns;
+ bool os_request_force_ffu;
};
enum dc_replay_enable {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index d19a595c2be4..134091d5842d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn_get_soc_clks(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 2d06067ff36d..2c9a4a12bd8a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -100,6 +100,17 @@ struct dcn301_clk_internal {
#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
+enum clk_type {
+ CLK_TYPE_DCFCLK,
+ CLK_TYPE_FCLK,
+ CLK_TYPE_MCLK,
+ CLK_TYPE_SOCCLK,
+ CLK_TYPE_DTBCLK,
+ CLK_TYPE_DISPCLK,
+ CLK_TYPE_DPPCLK,
+ CLK_TYPE_DSCCLK,
+ CLK_TYPE_COUNT
+};
struct clk_limit_table_entry {
unsigned int voltage; /* milivolts withh 2 fractional bits */
@@ -306,6 +317,9 @@ struct clk_mgr_funcs {
*/
void (*set_hard_min_memclk)(struct clk_mgr *clk_mgr, bool current_mode);
+ int (*get_hard_min_memclk)(struct clk_mgr *clk_mgr);
+ int (*get_hard_min_fclk)(struct clk_mgr *clk_mgr);
+
/* Send message to PMFW to set hard max memclk frequency to highest DPM */
void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr);
@@ -321,6 +335,11 @@ struct clk_mgr_funcs {
int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base);
+ bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr);
+
+ uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set);
+
+ unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
};
struct clk_mgr {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 7a1ca1e98059..bac8febad69a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -199,6 +199,7 @@ enum dentist_divider_range {
CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
CLK_SR_DCN35(CLK5_spll_field_8), \
+ CLK_SR_DCN35(CLK6_spll_field_8), \
SR(DENTIST_DISPCLK_CNTL), \
#define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
@@ -221,6 +222,7 @@ enum dentist_divider_range {
CLK_SF(CLK0_CLK_PLL_REQ, FbMult_frac, mask_sh)
#define CLK_REG_LIST_DCN401() \
+ SR(DENTIST_DISPCLK_CNTL), \
CLK_SR_DCN401(CLK0_CLK_PLL_REQ, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK0_DFS_CNTL, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK1_DFS_CNTL, CLK01, 0), \
@@ -306,7 +308,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK4_ALLOW_DS;
uint32_t CLK1_CLK5_ALLOW_DS;
uint32_t CLK5_spll_field_8;
-
+ uint32_t CLK6_spll_field_8;
};
struct clk_mgr_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
index 45645f9fd86c..7ce2f417f86a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -57,9 +57,9 @@ struct cursor_attribute_cache_hubp {
} size;
union reg_cursor_settings_cfg {
struct {
- uint32_t dst_y_offset: 8;
- uint32_t chunk_hdl_adjust: 2;
- uint32_t reserved: 22;
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
} bits;
uint32_t raw;
} settings;
@@ -83,12 +83,34 @@ union reg_cur0_control_cfg {
} bits;
uint32_t raw;
};
+
struct cursor_position_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
};
struct cursor_attribute_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
+ union reg_cur0_fp_scale_bias {
+ struct {
+ uint32_t fp_bias: 16;
+ uint32_t fp_scale: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias;
+ union reg_cur0_fp_scale_bias_g_y {
+ struct {
+ uint32_t fp_bias_g_y: 16;
+ uint32_t fp_scale_g_y: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_g_y;
+ union reg_cur0_fp_scale_bias_rb_crcb {
+ struct {
+ uint32_t fp_bias_rb_crcb: 16;
+ uint32_t fp_scale_rb_crcb: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_rb_crcb;
};
struct cursor_attributes_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index e94e9ba60f55..500a601e99b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -71,6 +71,125 @@ enum pixel_rate_div {
PIXEL_RATE_DIV_NA = 0xF
};
+struct dcn_dccg_reg_state {
+ uint32_t dc_mem_global_pwr_req_cntl;
+ uint32_t dccg_audio_dtbclk_dto_modulo;
+ uint32_t dccg_audio_dtbclk_dto_phase;
+ uint32_t dccg_audio_dto_source;
+ uint32_t dccg_audio_dto0_module;
+ uint32_t dccg_audio_dto0_phase;
+ uint32_t dccg_audio_dto1_module;
+ uint32_t dccg_audio_dto1_phase;
+ uint32_t dccg_cac_status;
+ uint32_t dccg_cac_status2;
+ uint32_t dccg_disp_cntl_reg;
+ uint32_t dccg_ds_cntl;
+ uint32_t dccg_ds_dto_incr;
+ uint32_t dccg_ds_dto_modulo;
+ uint32_t dccg_ds_hw_cal_interval;
+ uint32_t dccg_gate_disable_cntl;
+ uint32_t dccg_gate_disable_cntl2;
+ uint32_t dccg_gate_disable_cntl3;
+ uint32_t dccg_gate_disable_cntl4;
+ uint32_t dccg_gate_disable_cntl5;
+ uint32_t dccg_gate_disable_cntl6;
+ uint32_t dccg_global_fgcg_rep_cntl;
+ uint32_t dccg_gtc_cntl;
+ uint32_t dccg_gtc_current;
+ uint32_t dccg_gtc_dto_incr;
+ uint32_t dccg_gtc_dto_modulo;
+ uint32_t dccg_perfmon_cntl;
+ uint32_t dccg_perfmon_cntl2;
+ uint32_t dccg_soft_reset;
+ uint32_t dccg_test_clk_sel;
+ uint32_t dccg_vsync_cnt_ctrl;
+ uint32_t dccg_vsync_cnt_int_ctrl;
+ uint32_t dccg_vsync_otg0_latch_value;
+ uint32_t dccg_vsync_otg1_latch_value;
+ uint32_t dccg_vsync_otg2_latch_value;
+ uint32_t dccg_vsync_otg3_latch_value;
+ uint32_t dccg_vsync_otg4_latch_value;
+ uint32_t dccg_vsync_otg5_latch_value;
+ uint32_t dispclk_cgtt_blk_ctrl_reg;
+ uint32_t dispclk_freq_change_cntl;
+ uint32_t dp_dto_dbuf_en;
+ uint32_t dp_dto0_modulo;
+ uint32_t dp_dto0_phase;
+ uint32_t dp_dto1_modulo;
+ uint32_t dp_dto1_phase;
+ uint32_t dp_dto2_modulo;
+ uint32_t dp_dto2_phase;
+ uint32_t dp_dto3_modulo;
+ uint32_t dp_dto3_phase;
+ uint32_t dpiaclk_540m_dto_modulo;
+ uint32_t dpiaclk_540m_dto_phase;
+ uint32_t dpiaclk_810m_dto_modulo;
+ uint32_t dpiaclk_810m_dto_phase;
+ uint32_t dpiaclk_dto_cntl;
+ uint32_t dpiasymclk_cntl;
+ uint32_t dppclk_cgtt_blk_ctrl_reg;
+ uint32_t dppclk_ctrl;
+ uint32_t dppclk_dto_ctrl;
+ uint32_t dppclk0_dto_param;
+ uint32_t dppclk1_dto_param;
+ uint32_t dppclk2_dto_param;
+ uint32_t dppclk3_dto_param;
+ uint32_t dprefclk_cgtt_blk_ctrl_reg;
+ uint32_t dprefclk_cntl;
+ uint32_t dpstreamclk_cntl;
+ uint32_t dscclk_dto_ctrl;
+ uint32_t dscclk0_dto_param;
+ uint32_t dscclk1_dto_param;
+ uint32_t dscclk2_dto_param;
+ uint32_t dscclk3_dto_param;
+ uint32_t dtbclk_dto_dbuf_en;
+ uint32_t dtbclk_dto0_modulo;
+ uint32_t dtbclk_dto0_phase;
+ uint32_t dtbclk_dto1_modulo;
+ uint32_t dtbclk_dto1_phase;
+ uint32_t dtbclk_dto2_modulo;
+ uint32_t dtbclk_dto2_phase;
+ uint32_t dtbclk_dto3_modulo;
+ uint32_t dtbclk_dto3_phase;
+ uint32_t dtbclk_p_cntl;
+ uint32_t force_symclk_disable;
+ uint32_t hdmicharclk0_clock_cntl;
+ uint32_t hdmistreamclk_cntl;
+ uint32_t hdmistreamclk0_dto_param;
+ uint32_t microsecond_time_base_div;
+ uint32_t millisecond_time_base_div;
+ uint32_t otg_pixel_rate_div;
+ uint32_t otg0_phypll_pixel_rate_cntl;
+ uint32_t otg0_pixel_rate_cntl;
+ uint32_t otg1_phypll_pixel_rate_cntl;
+ uint32_t otg1_pixel_rate_cntl;
+ uint32_t otg2_phypll_pixel_rate_cntl;
+ uint32_t otg2_pixel_rate_cntl;
+ uint32_t otg3_phypll_pixel_rate_cntl;
+ uint32_t otg3_pixel_rate_cntl;
+ uint32_t phyasymclk_clock_cntl;
+ uint32_t phybsymclk_clock_cntl;
+ uint32_t phycsymclk_clock_cntl;
+ uint32_t phydsymclk_clock_cntl;
+ uint32_t phyesymclk_clock_cntl;
+ uint32_t phyplla_pixclk_resync_cntl;
+ uint32_t phypllb_pixclk_resync_cntl;
+ uint32_t phypllc_pixclk_resync_cntl;
+ uint32_t phyplld_pixclk_resync_cntl;
+ uint32_t phyplle_pixclk_resync_cntl;
+ uint32_t refclk_cgtt_blk_ctrl_reg;
+ uint32_t socclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_psp_cntl;
+ uint32_t symclk32_le_cntl;
+ uint32_t symclk32_se_cntl;
+ uint32_t symclka_clock_enable;
+ uint32_t symclkb_clock_enable;
+ uint32_t symclkc_clock_enable;
+ uint32_t symclkd_clock_enable;
+ uint32_t symclke_clock_enable;
+};
+
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
@@ -81,7 +200,6 @@ struct dccg {
//int audio_dtbclk_khz;/* TODO needs to be removed */
//int ref_dtbclk_khz;/* TODO needs to be removed */
};
-
struct dtbclk_dto_params {
const struct dc_crtc_timing *timing;
int otg_inst;
@@ -211,9 +329,10 @@ struct dccg_funcs {
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
- void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
+ void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
+ void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 52b745667ef7..1ddfa30411c8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -137,6 +137,27 @@ struct dcn_hubbub_state {
uint32_t dram_state_cntl;
};
+struct dcn_hubbub_reg_state {
+ uint32_t det0_ctrl;
+ uint32_t det1_ctrl;
+ uint32_t det2_ctrl;
+ uint32_t det3_ctrl;
+ uint32_t compbuf_ctrl;
+};
+
+struct hubbub_system_latencies {
+ uint32_t max_latency_ns;
+ uint32_t avg_latency_ns;
+ uint32_t min_latency_ns;
+};
+
+struct hubbub_urgent_latency_params {
+ uint32_t refclk_mhz;
+ uint32_t t_win_ns;
+ uint32_t bandwidth_mbps;
+ uint32_t bw_factor_x1000;
+};
+
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
@@ -203,6 +224,8 @@ struct hubbub_funcs {
void (*init_watermarks)(struct hubbub *hubbub);
+ void (*hubbub_read_reg_state)(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
+
/**
* @program_det_size:
*
@@ -229,6 +252,39 @@ struct hubbub_funcs {
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
+ void (*dchvm_init)(struct hubbub *hubbub);
+
+ struct hubbub_perfmon_funcs {
+ void (*reset)(struct hubbub *hubbub);
+ void (*start_measuring_max_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_max_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_average_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_urgent_ramp_latency_ns)(
+ struct hubbub *hubbub,
+ const struct hubbub_urgent_latency_params *params);
+ uint32_t (*get_urgent_ramp_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz);
+ void (*start_measuring_unbounded_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_unbounded_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *duration_ns);
+ void (*start_measuring_average_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t min_duration_ns,
+ uint32_t *duration_ns);
+ } perfmon;
+
+ struct hubbub_qos_funcs {
+ void (*force_display_nominal_profile)(struct hubbub *hubbub);
+ void (*force_display_urgent_profile)(struct hubbub *hubbub);
+ void (*reset_display_qos_profile)(struct hubbub *hubbub);
+ } qos;
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0150f2581ee4..d88b57d4f512 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -65,7 +65,6 @@ union defer_reg_writes {
} bits;
uint32_t raw;
};
-
struct dpp {
const struct dpp_funcs *funcs;
struct dc_context *ctx;
@@ -84,6 +83,7 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+ bool cursor_offload;
struct cursor_position_cache_dpp pos;
struct cursor_attribute_cache_dpp att;
@@ -119,10 +119,14 @@ static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] =
{ 0x39a6, 0x2568, 0, 0xe0d6,
0xeedd, 0x2568, 0xf925, 0x9a8,
0, 0x2568, 0x43ee, 0xdbb2 } },
- { COLOR_SPACE_2020_YCBCR,
+ { COLOR_SPACE_2020_YCBCR_FULL,
{ 0x2F30, 0x2000, 0, 0xE869,
0xEDB7, 0x2000, 0xFABC, 0xBC6,
0, 0x2000, 0x3C34, 0xE1E6 } },
+ { COLOR_SPACE_2020_YCBCR_LIMITED,
+ { 0x35B9, 0x2543, 0, 0xE2B2,
+ 0xEB2F, 0x2543, 0xFA01, 0x0B1F,
+ 0, 0x2543, 0x4489, 0xDB42 } },
{ COLOR_SPACE_2020_RGB_LIMITEDRANGE,
{ 0x35E0, 0x255F, 0, 0xE2B3,
0xEB20, 0x255F, 0xF9FD, 0xB1E,
@@ -198,6 +202,19 @@ struct dcn_dpp_state {
uint32_t gamcor_mode;
};
+struct dcn_dpp_reg_state {
+ uint32_t recout_start;
+ uint32_t recout_size;
+ uint32_t scl_horz_filter_scale_ratio;
+ uint32_t scl_vert_filter_scale_ratio;
+ uint32_t scl_mode;
+ uint32_t cm_control;
+ uint32_t dpp_control;
+ uint32_t dscl_control;
+ uint32_t obuf_control;
+ uint32_t mpc_size;
+};
+
struct CM_bias_params {
uint32_t cm_bias_cr_r;
uint32_t cm_bias_y_g;
@@ -221,6 +238,8 @@ struct dpp_funcs {
void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
+ void (*dpp_read_reg_state)(struct dpp *dpp, struct dcn_dpp_reg_state *dpp_reg_state);
+
void (*dpp_reset)(struct dpp *dpp);
void (*dpp_set_scaler)(struct dpp *dpp,
@@ -345,6 +364,9 @@ struct dpp_funcs {
struct dpp *dpp_base,
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix);
+
+ void (*dpp_force_disable_cursor)(struct dpp *dpp_base);
+
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index d0878fc0cc94..a79019365af8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -41,7 +41,8 @@
#include "mem_input.h"
#include "cursor_reg_cache.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -88,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode {
enum hubp_3dlut_fl_width {
hubp_3dlut_fl_width_17 = 17,
hubp_3dlut_fl_width_33 = 33,
- hubp_3dlut_fl_width_transformed = 4916
+ hubp_3dlut_fl_width_transformed = 4916, //mpc default
};
enum hubp_3dlut_fl_crossbar_bit_slice {
@@ -98,6 +99,22 @@ enum hubp_3dlut_fl_crossbar_bit_slice {
hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
};
+struct hubp_fl_3dlut_config {
+ bool enabled;
+ enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_format format;
+ uint16_t bias;
+ uint16_t scale;
+ struct dc_plane_address address;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum dc_cm2_gpu_mem_layout layout;
+ uint8_t protection_bits;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+};
+
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -109,11 +126,13 @@ struct hubp {
int mpcc_id;
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
+ bool cursor_offload;
bool power_gated;
struct cursor_position_cache_hubp pos;
struct cursor_attribute_cache_hubp att;
struct cursor_rect cur_rect;
+ bool use_mall_for_cursor;
};
struct surface_flip_registers {
@@ -144,14 +163,26 @@ struct hubp_funcs {
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ void (*hubp_setup2)(
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
+
void (*hubp_setup_interdependent)(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+ void (*hubp_setup_interdependent2)(
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *pipe_regs);
+
void (*dcc_control)(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size);
+ void (*hubp_reset)(struct hubp *hubp);
+
void (*mem_program_viewport)(
struct hubp *hubp,
const struct rect *viewport,
@@ -165,7 +196,7 @@ struct hubp_funcs {
void (*hubp_program_pte_vm)(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation);
void (*hubp_set_vm_system_aperture_settings)(
@@ -179,7 +210,7 @@ struct hubp_funcs {
void (*hubp_program_surface_config)(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -207,6 +238,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
void (*hubp_read_state)(struct hubp *hubp);
+ void (*hubp_read_reg_state)(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
@@ -269,13 +301,16 @@ struct hubp_funcs {
void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
- void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled);
+ void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits);
void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg);
void (*hubp_clear_tiling)(struct hubp *hubp);
+ uint32_t (*hubp_get_current_read_line)(struct hubp *hubp);
+ uint32_t (*hubp_get_det_config_error)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 41c76ba9ba56..a61d12ec61bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,12 +44,67 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2 +2)
+
+#define MAX_DPIA 6
+#define MAX_CONNECTOR 6
+#define MAX_VIRTUAL_LINKS 4
+
+#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
+
+/**
+ * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders
+ *
+ * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7,
+ * although not every GPU may have that many.
+ */
#define MAX_DIG_LINK_ENCODERS 7
+
+/**
+ * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders
+ *
+ * Analog encoders are ENGINE_ID_DACA/B, there are at most 2,
+ * although not every GPU may have that many. Modern GPUs typically
+ * don't have analog encoders.
+ */
+#define MAX_DAC_LINK_ENCODERS 2
+
+/**
+ * define MAX_LINK_ENCODERS - maximum number link encoders in total
+ *
+ * This includes both analog and digital encoders.
+ */
+#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS)
+
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
#define MAX_HPO_DP2_LINK_ENCODERS 4
+/* Pipe topology snapshot structures */
+#define MAX_TOPOLOGY_SNAPSHOTS 4
+
+struct pipe_topology_line {
+ bool is_phantom_pipe;
+ int plane_idx;
+ int slice_idx;
+ int stream_idx;
+ int dpp_inst;
+ int opp_inst;
+ int tg_inst;
+};
+
+struct pipe_topology_snapshot {
+ struct pipe_topology_line pipe_log_lines[MAX_PIPES];
+ int line_count;
+ uint64_t timestamp_us;
+ int stream_count;
+ int phantom_stream_count;
+};
+
+struct pipe_topology_history {
+ struct pipe_topology_snapshot snapshots[MAX_TOPOLOGY_SNAPSHOTS];
+ int current_snapshot_index;
+};
+
struct gamma_curve {
uint32_t offset;
uint32_t segments_num;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index af9183f5d69b..df512920a9fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -47,6 +47,7 @@ struct encoder_init_data {
enum hpd_source_id hpd_source;
/* TODO: in DAL2, here was pointer to EventManagerInterface */
struct graphics_object_id encoder;
+ enum engine_id analog_engine;
struct dc_context *ctx;
enum transmitter transmitter;
};
@@ -83,6 +84,7 @@ struct link_encoder {
struct graphics_object_id connector;
uint32_t output_signals;
enum engine_id preferred_engine;
+ enum engine_id analog_engine;
struct encoder_feature_support features;
enum transmitter transmitter;
enum hpd_source_id hpd_source;
@@ -168,6 +170,14 @@ struct link_encoder_funcs {
struct link_encoder *enc,
enum encoder_type_select sel,
uint32_t hpo_inst);
+ void (*enable_dpia_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy);
+ void (*disable_dpia_output)(struct link_encoder *link_enc,
+ uint8_t dpia_id,
+ uint8_t digmode);
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 4f5d102455ca..d468bc85566a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -29,7 +29,7 @@
#include "include/grph_object_id.h"
#include "dml/display_mode_structs.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
struct dchub_init_data;
struct cstate_pstate_watermarks_st {
@@ -150,7 +150,7 @@ struct mem_input_funcs {
void (*mem_input_program_pte_vm)(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation);
void (*mem_input_set_vm_system_aperture_settings)(
@@ -164,7 +164,7 @@ struct mem_input_funcs {
void (*mem_input_program_surface_config)(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 3a89cc0cffc1..a8d1abe20f62 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -115,6 +115,16 @@ enum MCM_LUT_ID {
MCM_LUT_SHAPER
};
+struct mpc_fl_3dlut_config {
+ bool enabled;
+ uint16_t width;
+ bool select_lut_bank_a;
+ uint16_t bit_depth;
+ int hubp_index;
+ uint16_t bias;
+ uint16_t scale;
+};
+
union mcm_lut_params {
const struct pwl_params *pwl;
const struct tetrahedral_params *lut3d;
@@ -190,6 +200,42 @@ struct mpc_grph_gamut_adjustment {
enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id;
};
+struct mpc_rmcm_regs {
+ uint32_t rmcm_3dlut_mem_pwr_state;
+ uint32_t rmcm_3dlut_mem_pwr_force;
+ uint32_t rmcm_3dlut_mem_pwr_dis;
+ uint32_t rmcm_3dlut_mem_pwr_mode;
+ uint32_t rmcm_3dlut_size;
+ uint32_t rmcm_3dlut_mode;
+ uint32_t rmcm_3dlut_mode_cur;
+ uint32_t rmcm_3dlut_read_sel;
+ uint32_t rmcm_3dlut_30bit_en;
+ uint32_t rmcm_3dlut_wr_en_mask;
+ uint32_t rmcm_3dlut_ram_sel;
+ uint32_t rmcm_3dlut_out_norm_factor;
+ uint32_t rmcm_3dlut_fl_sel;
+ uint32_t rmcm_3dlut_out_offset_r;
+ uint32_t rmcm_3dlut_out_scale_r;
+ uint32_t rmcm_3dlut_fl_done;
+ uint32_t rmcm_3dlut_fl_soft_underflow;
+ uint32_t rmcm_3dlut_fl_hard_underflow;
+ uint32_t rmcm_cntl;
+ uint32_t rmcm_shaper_mem_pwr_state;
+ uint32_t rmcm_shaper_mem_pwr_force;
+ uint32_t rmcm_shaper_mem_pwr_dis;
+ uint32_t rmcm_shaper_mem_pwr_mode;
+ uint32_t rmcm_shaper_lut_mode;
+ uint32_t rmcm_shaper_mode_cur;
+ uint32_t rmcm_shaper_lut_write_en_mask;
+ uint32_t rmcm_shaper_lut_write_sel;
+ uint32_t rmcm_shaper_offset_b;
+ uint32_t rmcm_shaper_scale_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_seg_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_base_b;
+};
+
struct mpcc_sm_cfg {
bool enable;
/* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
@@ -301,6 +347,16 @@ struct mpcc_state {
uint32_t rgam_mode;
uint32_t rgam_lut;
struct mpc_grph_gamut_adjustment gamut_remap;
+ struct mpc_rmcm_regs rmcm_regs;
+};
+
+struct dcn_mpc_reg_state {
+ uint32_t mpcc_bot_sel;
+ uint32_t mpcc_control;
+ uint32_t mpcc_status;
+ uint32_t mpcc_top_sel;
+ uint32_t mpcc_opp_id;
+ uint32_t mpcc_ogam_control;
};
/**
@@ -326,6 +382,24 @@ struct mpc_funcs {
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s);
+ /**
+ * @mpc_read_reg_state:
+ *
+ * Read MPC register state for debugging underflow purposes.
+ *
+ * Parameters:
+ *
+ * - [in] mpc - MPC context
+ * - [out] reg_state - MPC register state structure
+ *
+ * Return:
+ *
+ * void
+ */
+ void (*mpc_read_reg_state)(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
/**
* @insert_plane:
@@ -967,23 +1041,6 @@ struct mpc_funcs {
*/
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
- /**
- * @get_3dlut_fast_load_status:
- *
- * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] mpcc_id
- * - [in/out] done
- * - [in/out] soft_underflow
- * - [in/out] hard_underflow
- *
- * Return:
- *
- * void
- */
- void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
/**
* @populate_lut:
@@ -1039,21 +1096,46 @@ struct mpc_funcs {
*/
void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
bool lut_bank_a, int mpcc_id);
+
/**
- * @program_3dlut_size:
- *
- * Program 3D LUT size.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] is_17x17x17 - is 3dlut 17x17x17
- * - [in] mpcc_id
- *
- * Return:
- *
- * void
- */
- void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
+ * @mcm:
+ *
+ * MPC MCM new HW sequential programming functions
+ */
+ struct {
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } mcm;
+
+ /**
+ * @rmcm:
+ *
+ * MPC RMCM new HW sequential programming functions
+ */
+ struct {
+ void (*fl_3dlut_configure)(struct mpc *mpc, struct mpc_fl_3dlut_config *cfg, int mpcc_id);
+ void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
+ void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
+ bool lut_bank_a, int mpcc_id);
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+
+ void (*power_on_shaper_3dlut)(struct mpc *mpc, uint32_t mpcc_id, bool power_on);
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } rmcm;
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 747679cb4944..e1428a83ecbc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -297,6 +297,16 @@ struct oppbuf_params {
uint32_t num_segment_padded_pixels;
};
+struct dcn_opp_reg_state {
+ uint32_t dpg_control;
+ uint32_t fmt_control;
+ uint32_t oppbuf_control;
+ uint32_t opp_pipe_control;
+ uint32_t opp_pipe_crc_control;
+ uint32_t opp_abm_control;
+ uint32_t dscrm_dsc_forward_config;
+};
+
struct opp_funcs {
@@ -368,6 +378,9 @@ struct opp_funcs {
struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding,
bool is_primary);
+
+ void (*opp_read_reg_state)(
+ struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 03cbcbb36f1c..0d5a8358a778 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -68,37 +68,10 @@ struct optc {
int pstate_keepout;
struct dc_crtc_timing orginal_patched_timing;
enum signal_type signal;
+ uint32_t max_frame_count;
};
-struct dcn_otg_state {
- uint32_t v_blank_start;
- uint32_t v_blank_end;
- uint32_t v_sync_a_pol;
- uint32_t v_total;
- uint32_t v_total_max;
- uint32_t v_total_min;
- uint32_t v_total_min_sel;
- uint32_t v_total_max_sel;
- uint32_t v_sync_a_start;
- uint32_t v_sync_a_end;
- uint32_t h_blank_start;
- uint32_t h_blank_end;
- uint32_t h_sync_a_start;
- uint32_t h_sync_a_end;
- uint32_t h_sync_a_pol;
- uint32_t h_total;
- uint32_t underflow_occurred_status;
- uint32_t otg_enabled;
- uint32_t blank_enabled;
- uint32_t vertical_interrupt1_en;
- uint32_t vertical_interrupt1_line;
- uint32_t vertical_interrupt2_en;
- uint32_t vertical_interrupt2_line;
- uint32_t otg_master_update_lock;
- uint32_t otg_double_buffer_control;
-};
-
-void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s);
+void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s);
bool optc1_get_hw_timing(struct timing_generator *tg, struct dc_crtc_timing *hw_crtc_timing);
@@ -210,7 +183,7 @@ void optc1_enable_crtc_reset(struct timing_generator *optc,
bool optc1_configure_crc(struct timing_generator *optc, const struct crc_params *params);
-bool optc1_get_crc(struct timing_generator *optc,
+bool optc1_get_crc(struct timing_generator *optc, uint8_t idx,
uint32_t *r_cr,
uint32_t *g_y,
uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
index 00ea3864dd4d..227e3f8d7e5f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
@@ -46,7 +46,10 @@ struct pg_cntl_funcs {
void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on);
void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*init_pg_status)(struct pg_cntl *pg_cntl);
+ void (*print_pg_status)(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log);
};
#endif //__DC_PG_CNTL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe7f3137f228..27f950ae45ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -117,6 +117,7 @@ struct stream_encoder {
uint32_t stream_enc_inst;
struct vpg *vpg;
struct afmt *afmt;
+ struct apg *apg;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index b74e18cc1e66..da7bf59c4b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -141,6 +141,167 @@ struct crc_params {
bool continuous_mode;
bool enable;
+
+ uint8_t crc_eng_inst;
+ bool reset;
+};
+
+struct dcn_otg_state {
+ uint32_t v_blank_start;
+ uint32_t v_blank_end;
+ uint32_t v_sync_a_pol;
+ uint32_t v_total;
+ uint32_t v_total_max;
+ uint32_t v_total_min;
+ uint32_t v_total_min_sel;
+ uint32_t v_total_max_sel;
+ uint32_t v_sync_a_start;
+ uint32_t v_sync_a_end;
+ uint32_t h_blank_start;
+ uint32_t h_blank_end;
+ uint32_t h_sync_a_start;
+ uint32_t h_sync_a_end;
+ uint32_t h_sync_a_pol;
+ uint32_t h_total;
+ uint32_t underflow_occurred_status;
+ uint32_t otg_enabled;
+ uint32_t blank_enabled;
+ uint32_t vertical_interrupt1_en;
+ uint32_t vertical_interrupt1_line;
+ uint32_t vertical_interrupt2_en;
+ uint32_t vertical_interrupt2_line;
+ uint32_t vertical_interrupt2_dest;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_double_buffer_control;
+};
+
+struct dcn_optc_reg_state {
+ uint32_t optc_bytes_per_pixel;
+ uint32_t optc_data_format_control;
+ uint32_t optc_data_source_select;
+ uint32_t optc_input_clock_control;
+ uint32_t optc_input_global_control;
+ uint32_t optc_input_spare_register;
+ uint32_t optc_memory_config;
+ uint32_t optc_rsmu_underflow;
+ uint32_t optc_underflow_threshold;
+ uint32_t optc_width_control;
+
+ uint32_t otg_3d_structure_control;
+ uint32_t otg_clock_control;
+ uint32_t otg_control;
+ uint32_t otg_count_control;
+ uint32_t otg_count_reset;
+ uint32_t otg_crc_cntl;
+ uint32_t otg_crc_sig_blue_control_mask;
+ uint32_t otg_crc_sig_red_green_mask;
+ uint32_t otg_crc0_data_b;
+ uint32_t otg_crc0_data_rg;
+ uint32_t otg_crc0_windowa_x_control;
+ uint32_t otg_crc0_windowa_x_control_readback;
+ uint32_t otg_crc0_windowa_y_control;
+ uint32_t otg_crc0_windowa_y_control_readback;
+ uint32_t otg_crc0_windowb_x_control;
+ uint32_t otg_crc0_windowb_x_control_readback;
+ uint32_t otg_crc0_windowb_y_control;
+ uint32_t otg_crc0_windowb_y_control_readback;
+ uint32_t otg_crc1_data_b;
+ uint32_t otg_crc1_data_rg;
+ uint32_t otg_crc1_windowa_x_control;
+ uint32_t otg_crc1_windowa_x_control_readback;
+ uint32_t otg_crc1_windowa_y_control;
+ uint32_t otg_crc1_windowa_y_control_readback;
+ uint32_t otg_crc1_windowb_x_control;
+ uint32_t otg_crc1_windowb_x_control_readback;
+ uint32_t otg_crc1_windowb_y_control;
+ uint32_t otg_crc1_windowb_y_control_readback;
+ uint32_t otg_crc2_data_b;
+ uint32_t otg_crc2_data_rg;
+ uint32_t otg_crc3_data_b;
+ uint32_t otg_crc3_data_rg;
+ uint32_t otg_dlpc_control;
+ uint32_t otg_double_buffer_control;
+ uint32_t otg_drr_control2;
+ uint32_t otg_drr_control;
+ uint32_t otg_drr_timing_int_status;
+ uint32_t otg_drr_trigger_window;
+ uint32_t otg_drr_v_total_change;
+ uint32_t otg_drr_v_total_reach_range;
+ uint32_t otg_dsc_start_position;
+ uint32_t otg_force_count_now_cntl;
+ uint32_t otg_global_control0;
+ uint32_t otg_global_control1;
+ uint32_t otg_global_control2;
+ uint32_t otg_global_control3;
+ uint32_t otg_global_control4;
+ uint32_t otg_global_sync_status;
+ uint32_t otg_gsl_control;
+ uint32_t otg_gsl_vsync_gap;
+ uint32_t otg_gsl_window_x;
+ uint32_t otg_gsl_window_y;
+ uint32_t otg_h_blank_start_end;
+ uint32_t otg_h_sync_a;
+ uint32_t otg_h_sync_a_cntl;
+ uint32_t otg_h_timing_cntl;
+ uint32_t otg_h_total;
+ uint32_t otg_interlace_control;
+ uint32_t otg_interlace_status;
+ uint32_t otg_interrupt_control;
+ uint32_t otg_long_vblank_status;
+ uint32_t otg_m_const_dto0;
+ uint32_t otg_m_const_dto1;
+ uint32_t otg_manual_force_vsync_next_line;
+ uint32_t otg_master_en;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_master_update_mode;
+ uint32_t otg_nom_vert_position;
+ uint32_t otg_pipe_update_status;
+ uint32_t otg_pixel_data_readback0;
+ uint32_t otg_pixel_data_readback1;
+ uint32_t otg_request_control;
+ uint32_t otg_snapshot_control;
+ uint32_t otg_snapshot_frame;
+ uint32_t otg_snapshot_position;
+ uint32_t otg_snapshot_status;
+ uint32_t otg_spare_register;
+ uint32_t otg_static_screen_control;
+ uint32_t otg_status;
+ uint32_t otg_status_frame_count;
+ uint32_t otg_status_hv_count;
+ uint32_t otg_status_position;
+ uint32_t otg_status_vf_count;
+ uint32_t otg_stereo_control;
+ uint32_t otg_stereo_force_next_eye;
+ uint32_t otg_stereo_status;
+ uint32_t otg_trig_manual_control;
+ uint32_t otg_triga_cntl;
+ uint32_t otg_triga_manual_trig;
+ uint32_t otg_trigb_cntl;
+ uint32_t otg_trigb_manual_trig;
+ uint32_t otg_update_lock;
+ uint32_t otg_v_blank_start_end;
+ uint32_t otg_v_count_stop_control;
+ uint32_t otg_v_count_stop_control2;
+ uint32_t otg_v_sync_a;
+ uint32_t otg_v_sync_a_cntl;
+ uint32_t otg_v_total;
+ uint32_t otg_v_total_control;
+ uint32_t otg_v_total_int_status;
+ uint32_t otg_v_total_max;
+ uint32_t otg_v_total_mid;
+ uint32_t otg_v_total_min;
+ uint32_t otg_vert_sync_control;
+ uint32_t otg_vertical_interrupt0_control;
+ uint32_t otg_vertical_interrupt0_position;
+ uint32_t otg_vertical_interrupt1_control;
+ uint32_t otg_vertical_interrupt1_position;
+ uint32_t otg_vertical_interrupt2_control;
+ uint32_t otg_vertical_interrupt2_position;
+ uint32_t otg_vready_param;
+ uint32_t otg_vstartup_param;
+ uint32_t otg_vsync_nom_int_status;
+ uint32_t otg_vupdate_keepout;
+ uint32_t otg_vupdate_param;
};
/**
@@ -291,7 +452,7 @@ struct timing_generator_funcs {
* @get_crc: Get CRCs for the given timing generator. Return false if
* CRCs are not enabled (via configure_crc).
*/
- bool (*get_crc)(struct timing_generator *tg,
+ bool (*get_crc)(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
void (*program_manual_trigger)(struct timing_generator *optc);
@@ -342,11 +503,14 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_otg_disable)(struct timing_generator *optc);
bool (*get_optc_double_buffer_pending)(struct timing_generator *tg);
bool (*get_otg_double_buffer_pending)(struct timing_generator *tg);
bool (*get_pipe_update_pending)(struct timing_generator *tg);
void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable);
bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked);
+ void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s);
+ void (*optc_read_reg_state)(struct timing_generator *tg, struct dcn_optc_reg_state *optc_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 45262cba675e..5a1d9b708a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -29,7 +29,7 @@
#include "hw_shared.h"
#include "dc_hw_types.h"
#include "fixed31_32.h"
-#include "spl/dc_spl_types.h"
+#include "sspl/dc_spl_types.h"
#define CSC_TEMPERATURE_MATRIX_SIZE 12
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
index dc650be3837e..f1afb31ac70b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -96,11 +96,6 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
/* Return next available DIG link encoder. NULL if none available. */
struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc);
-/* Return DIG link encoder used by stream. NULL if unused. */
-struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
- struct dc *dc,
- const struct dc_stream_state *stream);
-
/* Return DIG link encoder. NULL if unused. */
struct link_encoder *link_enc_cfg_get_link_enc(const struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
index f04292086c08..6f94e48a24d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
@@ -42,8 +42,8 @@
* dc_link_exports.c or other dc files implement dc.h
*
* DC to Link:
- * dc_link_exports.c or other dc files include link.h
- * link_factory.c implements link.h
+ * dc_link_exports.c or other dc files include link_service.h
+ * link_factory.c implements link_service.h
*
* Link sub-component to Link sub-component:
* link_factory.c includes --> link_xxx.h
@@ -73,7 +73,7 @@
* 2. Implement your function in the suitable link_xxx.c file.
* 3. Assign the function to link_service in link_factory.c
* 4. NEVER include link_xxx.h headers outside link component.
- * 5. NEVER include link.h on DM side.
+ * 5. NEVER include link_service.h on DM side.
*/
#include "core_types.h"
@@ -144,9 +144,13 @@ struct link_service {
uint32_t (*dp_link_bandwidth_kbps)(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
- bool (*validate_dpia_bandwidth)(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+ enum dc_status (*validate_dp_tunnel_bandwidth)(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
+
+ uint32_t (*dp_required_hblank_size_bytes)(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
/*************************** DPMS *************************************/
@@ -203,6 +207,9 @@ struct link_service {
bool (*dp_decide_link_settings)(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
+ void (*dp_decide_tunnel_settings)(
+ struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
enum dp_link_encoding (*mst_decide_link_encoding_format)(
const struct dc_link *link);
bool (*edp_decide_link_settings)(struct dc_link *link,
@@ -211,13 +218,14 @@ struct link_service {
bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
struct dc_link_settings *link_setting);
-
+ uint8_t (*dp_get_lttpr_count)(struct dc_link *link);
+ void (*edp_get_alpm_support)(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
/*************************** DP DPIA/PHY ******************************/
- int (*dpia_handle_usb4_bandwidth_allocation_for_link)(
+ void (*dpia_handle_usb4_bandwidth_allocation_for_link)(
struct dc_link *link, int peak_bw);
- void (*dpia_handle_bw_alloc_response)(
- struct dc_link *link, uint8_t bw, uint8_t result);
void (*dp_set_drive_settings)(
struct dc_link *link,
const struct link_resource *link_res,
@@ -284,12 +292,12 @@ struct link_service {
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint32_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const enum pr_residency_mode mode);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index a402df225a76..26cb1459b743 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -508,6 +508,10 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
initial_val, \
n, __VA_ARGS__)
+#define IX_REG_SET_SYNC(index, init_value, f1, v1) \
+ IX_REG_SET_N_SYNC(index, 1, init_value, \
+ FN(reg, f1), v1)
+
#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \
IX_REG_SET_N_SYNC(index, 2, init_value, \
FN(reg, f1), v1,\
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index cd1157d225ab..79746d931471 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -29,10 +29,10 @@
#include "core_status.h"
#include "dal_asic_id.h"
#include "dm_pp_smu.h"
-#include "spl/dc_spl.h"
#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define MEMORY_TYPE_HBM 2
+#define MAX_MCACHES 8
#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
@@ -45,9 +45,11 @@ enum dce_version resource_parse_asic_id(
struct resource_caps {
int num_timing_generator;
int num_opp;
+ int num_dpp;
int num_video_plane;
int num_audio;
int num_stream_encoder;
+ int num_analog_stream_encoder;
int num_pll;
int num_dwb;
int num_ddc;
@@ -66,6 +68,13 @@ struct resource_straps {
uint32_t audio_stream_number;
};
+struct dc_mcache_allocations {
+ int global_mcache_ids_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_plane1[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane1[MAX_MCACHES + 1];
+};
+
struct resource_create_funcs {
void (*read_dce_straps)(
struct dc_context *ctx, struct resource_straps *straps);
@@ -152,6 +161,8 @@ bool resource_attach_surfaces_to_context(
struct dc_state *context,
const struct resource_pool *pool);
+bool resource_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx);
+
#define FREE_PIPE_INDEX_NOT_FOUND -1
/*
@@ -627,8 +638,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
/* Get hw programming parameters container from pipe context
* @pipe_ctx: pipe context
* @dscl_prog_data: struct to hold programmable hw reg values
@@ -646,4 +655,9 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master);
bool resource_is_hpo_acquired(struct dc_state *context);
+
+struct link_encoder *get_temp_dio_link_enc(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *const pool,
+ const struct dc_link *link);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
new file mode 100644
index 000000000000..23daf98b8aa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef __SOC_AND_IP_TRANSLATOR_H__
+#define __SOC_AND_IP_TRANSLATOR_H__
+
+#include "dc.h"
+#include "dml_top_soc_parameter_types.h"
+
+struct soc_and_ip_translator_funcs {
+ void (*get_soc_bb)(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+ void (*get_ip_caps)(struct dml2_ip_capabilities *dml_ip_caps);
+};
+
+struct soc_and_ip_translator {
+ const struct soc_and_ip_translator_funcs *translator_funcs;
+};
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version);
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator);
+
+
+#endif // __SOC_AND_IP_TRANSLATOR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 8ac36bdd4e1e..b5e14d792378 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -182,6 +182,15 @@ AMD_DAL_IRQ_DCN351= $(addprefix $(AMDDALPATH)/dc/irq/dcn351/,$(IRQ_DCN351))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN351)
###############################################################################
+# DCN 36
+###############################################################################
+IRQ_DCN36 = irq_service_dcn36.o
+
+AMD_DAL_IRQ_DCN36= $(addprefix $(AMDDALPATH)/dc/irq/dcn36/,$(IRQ_DCN36))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN36)
+
+###############################################################################
# DCN 401
###############################################################################
IRQ_DCN401 = irq_service_dcn401.o
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 953f4a4dacad..33ce470e4c88 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -37,36 +37,9 @@
#include "ivsrcid/ivsrcid_vislands30.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
index 2c72074310c7..d777b85e70da 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
@@ -46,36 +46,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -391,5 +364,3 @@ struct irq_service *dal_irq_service_dce60_create(
dce60_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 49317934ef4f..3a9163acb49b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -37,36 +37,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -303,5 +276,3 @@ struct irq_service *dal_irq_service_dce80_create(
dce80_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 9ca28565a9d1..4ce9edd16344 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 916f0c974637..5847af0e66cb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 1d61d475d36f..6417011d2246 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -80,36 +80,9 @@ static enum dc_irq_source to_dal_irq_source_dcn201(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 42cdfe6c3538..71d2f065140b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -132,36 +132,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
return DC_IRQ_SOURCE_INVALID;
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index a443a8abb1ea..2a4080bdcf6b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -139,36 +139,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -447,4 +420,3 @@ struct irq_service *dal_irq_service_dcn30_create(
dcn30_irq_construct(irq_service, init_data);
return irq_service;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
index 8ffc7e2c681a..624f1ac309f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
@@ -126,26 +126,9 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
index 262bb8b74b15..137caffae916 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
@@ -77,26 +77,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
index 53e78ae7eecf..921cb167d920 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
@@ -128,36 +128,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index e0563e880432..0118fd6e5db0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
index 2ef22299101a..adebfc888618 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
@@ -135,36 +135,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
index f0ac0aeeac51..e9e315c75d76 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -191,6 +164,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -259,6 +242,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -270,14 +260,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
-}
-
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -285,6 +267,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -387,21 +383,29 @@ irq_source_info_dcn32[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3)
};
static const struct irq_service_funcs irq_service_funcs_dcn32 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
index ea8c271171bc..79e5e8c137ca 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
@@ -127,36 +127,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
index 7ec8e0de2f01..163b8ee9ebf7 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
@@ -106,36 +106,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
new file mode 100644
index 000000000000..f716ab0fd30e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "dcn/dcn_3_6_0_offset.h"
+#include "dcn/dcn_3_6_0_sh_mask.h"
+
+#include "irq_service_dcn36.h"
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+static enum dc_irq_source to_dal_irq_source_dcn36(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC1_VLINE0;
+ case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC2_VLINE0;
+ case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC3_VLINE0;
+ case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC4_VLINE0;
+ case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC5_VLINE0;
+ case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC6_VLINE0;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+ case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
+ return DC_IRQ_SOURCE_DMCUB_OUTBOX;
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd0_ack
+};
+
+static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs outbox_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline0_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#undef BASE_INNER
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_DMUB(reg_name)\
+ BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define IRQ_REG_ENTRY(base, block, reg_num, reg1, mask1, reg2, mask2)\
+ REG_STRUCT[base + reg_num].enable_reg = SRI(reg1, block, reg_num),\
+ REG_STRUCT[base + reg_num].enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base + reg_num].enable_value[0] = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base + reg_num].enable_value[1] = \
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
+ REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\
+ REG_STRUCT[base + reg_num].ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ REG_STRUCT[base + reg_num].ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define IRQ_REG_ENTRY_DMUB(base, reg1, mask1, reg2, mask2)\
+ REG_STRUCT[base].enable_reg = SRI_DMUB(reg1),\
+ REG_STRUCT[base].enable_mask = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base].enable_value[0] = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base].enable_value[1] = \
+ ~reg1 ## __ ## mask1 ## _MASK, \
+ REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\
+ REG_STRUCT[base].ack_mask = \
+ reg2 ## __ ## mask2 ## _MASK,\
+ REG_STRUCT[base].ack_value = \
+ reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1, HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].funcs = &hpd_irq_info_funcs;\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
+
+#define hpd_rx_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1RX, HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].funcs = &hpd_rx_irq_info_funcs;\
+
+#define pflip_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_PFLIP1, HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_PFLIP1 + reg_num].funcs = &pflip_irq_info_funcs\
+
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_VUPDATE1, OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_VUPDATE1 + reg_num].funcs = &vupdate_no_lock_irq_info_funcs\
+
+#define vblank_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_VBLANK1, OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_VBLANK1 + reg_num].funcs = &vblank_irq_info_funcs\
+
+#define vline0_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_DC1_VLINE0, OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num].funcs = &vline0_irq_info_funcs\
+
+#define dmub_outbox_int_entry()\
+ IRQ_REG_ENTRY_DMUB(DC_IRQ_SOURCE_DMCUB_OUTBOX, \
+ DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
+ DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_DMCUB_OUTBOX].funcs = &outbox_irq_info_funcs
+
+#define dummy_irq_entry(irqno) \
+ REG_STRUCT[irqno].funcs = &dummy_irq_info_funcs\
+
+#define i2c_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_I2C_DDC ## reg_num)
+
+#define dp_sink_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_DPSINK ## reg_num)
+
+#define gpio_pad_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_GPIOPAD ## reg_num)
+
+#define dc_underflow_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW)
+
+static struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+#define dcn36_irq_init_part_1() {\
+ dummy_irq_entry(DC_IRQ_SOURCE_INVALID); \
+ hpd_int_entry(0); \
+ hpd_int_entry(1); \
+ hpd_int_entry(2); \
+ hpd_int_entry(3); \
+ hpd_int_entry(4); \
+ hpd_rx_int_entry(0); \
+ hpd_rx_int_entry(1); \
+ hpd_rx_int_entry(2); \
+ hpd_rx_int_entry(3); \
+ hpd_rx_int_entry(4); \
+ i2c_int_entry(1); \
+ i2c_int_entry(2); \
+ i2c_int_entry(3); \
+ i2c_int_entry(4); \
+ i2c_int_entry(5); \
+ i2c_int_entry(6); \
+ dp_sink_int_entry(1); \
+ dp_sink_int_entry(2); \
+ dp_sink_int_entry(3); \
+ dp_sink_int_entry(4); \
+ dp_sink_int_entry(5); \
+ dp_sink_int_entry(6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_TIMER); \
+ pflip_int_entry(0); \
+ pflip_int_entry(1); \
+ pflip_int_entry(2); \
+ pflip_int_entry(3); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP5); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP_UNDERLAY0); \
+ gpio_pad_int_entry(0); \
+ gpio_pad_int_entry(1); \
+ gpio_pad_int_entry(2); \
+ gpio_pad_int_entry(3); \
+ gpio_pad_int_entry(4); \
+ gpio_pad_int_entry(5); \
+ gpio_pad_int_entry(6); \
+ gpio_pad_int_entry(7); \
+ gpio_pad_int_entry(8); \
+ gpio_pad_int_entry(9); \
+ gpio_pad_int_entry(10); \
+ gpio_pad_int_entry(11); \
+ gpio_pad_int_entry(12); \
+ gpio_pad_int_entry(13); \
+ gpio_pad_int_entry(14); \
+ gpio_pad_int_entry(15); \
+ gpio_pad_int_entry(16); \
+ gpio_pad_int_entry(17); \
+ gpio_pad_int_entry(18); \
+ gpio_pad_int_entry(19); \
+ gpio_pad_int_entry(20); \
+ gpio_pad_int_entry(21); \
+ gpio_pad_int_entry(22); \
+ gpio_pad_int_entry(23); \
+ gpio_pad_int_entry(24); \
+ gpio_pad_int_entry(25); \
+ gpio_pad_int_entry(26); \
+ gpio_pad_int_entry(27); \
+ gpio_pad_int_entry(28); \
+ gpio_pad_int_entry(29); \
+ gpio_pad_int_entry(30); \
+ dc_underflow_int_entry(1); \
+ dc_underflow_int_entry(2); \
+ dc_underflow_int_entry(3); \
+ dc_underflow_int_entry(4); \
+ dc_underflow_int_entry(5); \
+ dc_underflow_int_entry(6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DMCU_SCP); \
+ dummy_irq_entry(DC_IRQ_SOURCE_VBIOS_SW); \
+}
+
+#define dcn36_irq_init_part_2() {\
+ vupdate_no_lock_int_entry(0); \
+ vupdate_no_lock_int_entry(1); \
+ vupdate_no_lock_int_entry(2); \
+ vupdate_no_lock_int_entry(3); \
+ vblank_int_entry(0); \
+ vblank_int_entry(1); \
+ vblank_int_entry(2); \
+ vblank_int_entry(3); \
+ vline0_int_entry(0); \
+ vline0_int_entry(1); \
+ vline0_int_entry(2); \
+ vline0_int_entry(3); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC5_VLINE1); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC6_VLINE1); \
+ dmub_outbox_int_entry(); \
+}
+
+#define dcn36_irq_init() {\
+ dcn36_irq_init_part_1(); \
+ dcn36_irq_init_part_2(); \
+}
+
+static struct irq_source_info irq_source_info_dcn36[DAL_IRQ_SOURCES_NUMBER] = {0};
+
+static struct irq_service_funcs irq_service_funcs_dcn36 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn36
+};
+
+static void dcn36_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ struct dc_context *ctx = init_data->ctx;
+
+#define REG_STRUCT irq_source_info_dcn36
+ dcn36_irq_init();
+
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn36;
+ irq_service->funcs = &irq_service_funcs_dcn36;
+}
+
+struct irq_service *dal_irq_service_dcn36_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dcn36_irq_construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h
new file mode 100644
index 000000000000..21ff95f6562d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#ifndef __DAL_IRQ_SERVICE_DCN36_H__
+#define __DAL_IRQ_SERVICE_DCN36_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn36_create(
+ struct irq_service_init_data *init_data);
+
+#endif /* __DAL_IRQ_SERVICE_DCN36_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
index b43c9524b0de..fd9bb1950c20 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
@@ -109,36 +109,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -171,6 +144,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -239,6 +222,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -250,13 +240,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
- }
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -264,6 +247,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -364,21 +361,29 @@ irq_source_info_dcn401[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3),
};
static const struct irq_service_funcs irq_service_funcs_dcn401 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index eca3d7ee7e4e..b595a11c5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -41,6 +41,16 @@
#include "reg_helper.h"
#include "irq_service.h"
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+//HPD1_DC_HPD_INT_STATUS
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x10
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x4
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x100
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x8
#define CTX \
@@ -177,3 +187,57 @@ enum dc_irq_source dal_irq_service_to_irq_source(
src_id,
ext_id);
}
+
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index b178f85944cd..bbcef3d2fe33 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -82,4 +82,12 @@ void dal_irq_service_set_generic(
const struct irq_source_info *info,
bool enable);
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index e962c426beda..a2f7b933bebf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -161,6 +161,20 @@ enum dc_irq_source {
DC_IRQ_SOURCE_DPCX_TX_PHYE,
DC_IRQ_SOURCE_DPCX_TX_PHYF,
+ DC_IRQ_SOURCE_DC1_VLINE2,
+ DC_IRQ_SOURCE_DC2_VLINE2,
+ DC_IRQ_SOURCE_DC3_VLINE2,
+ DC_IRQ_SOURCE_DC4_VLINE2,
+ DC_IRQ_SOURCE_DC5_VLINE2,
+ DC_IRQ_SOURCE_DC6_VLINE2,
+
+ DC_IRQ_SOURCE_DCI2C_RR_DDC1,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC2,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC3,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC4,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC5,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC6,
+
DAL_IRQ_SOURCES_NUMBER
};
@@ -170,6 +184,9 @@ enum irq_type
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
+ IRQ_TYPE_VLINE1 = DC_IRQ_SOURCE_DC1_VLINE1,
+ IRQ_TYPE_VLINE2 = DC_IRQ_SOURCE_DC1_VLINE2,
+ IRQ_TYPE_DCUNDERFLOW = DC_IRQ_SOURCE_DC1UNDERFLOW,
};
#define DAL_VALID_IRQ_SRC_NUM(src) \
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index ff8fe1a94965..1045c268672e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "clk_mgr.h"
#define DC_LOGGER \
link->ctx->logger
@@ -67,10 +68,20 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
+ struct dc_stream_update stream_update = { 0 };
+ bool dpms_off = false;
+ bool needs_divider_update = false;
bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state);
bool is_hpo_acquired;
uint8_t count;
int i;
+ struct audio_output audio_output[MAX_PIPES];
+ struct dc_stream_state *streams_on_link[MAX_PIPES];
+ int num_streams_on_link = 0;
+ struct dc *dc = (struct dc *)link->dc;
+
+ needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
+ link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
udelay(100);
@@ -83,16 +94,66 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
link->dc,
state,
pipes[i]);
+
+ // Disable OTG and re-enable after updating clocks
+ pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg);
}
- if (link->dc->hwss.setup_hpo_hw_control) {
- is_hpo_acquired = resource_is_hpo_acquired(state);
- if (was_hpo_acquired != is_hpo_acquired)
- link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+ if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) {
+ link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link,
+ link_setting, count,
+ *pipes, &audio_output[0]);
+ for (i = 0; i < count; i++) {
+ pipes[i]->clock_source->funcs->program_pix_clk(
+ pipes[i]->clock_source,
+ &pipes[i]->stream_res.pix_clk_params,
+ link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings),
+ &pipes[i]->pll_settings);
+
+ if (pipes[i]->stream_res.audio != NULL) {
+ const struct link_hwss *link_hwss = get_link_hwss(
+ link, &pipes[i]->link_res);
+
+ link_hwss->setup_audio_output(pipes[i], &audio_output[i],
+ pipes[i]->stream_res.audio->inst);
+
+ pipes[i]->stream_res.audio->funcs->az_configure(
+ pipes[i]->stream_res.audio,
+ pipes[i]->stream->signal,
+ &audio_output[i].crtc_info,
+ &pipes[i]->stream->audio_info,
+ &audio_output[i].dp_link_info);
+
+ if (link->dc->config.disable_hbr_audio_dp2 &&
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio &&
+ link->dc->link_srv->dp_is_128b_132b_signal(pipes[i]))
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio);
+ }
+ }
}
- for (i = count-1; i >= 0; i--)
- link_set_dpms_on(state, pipes[i]);
+ // Toggle on HPO I/O if necessary
+ is_hpo_acquired = resource_is_hpo_acquired(state);
+ if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control)
+ link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+
+ for (i = 0; i < count; i++)
+ pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
+
+ // Set DPMS on with stream update
+ // Cache all streams on current link since dc_update_planes_and_stream might kill current_state
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ streams_on_link[num_streams_on_link++] = state->streams[i];
+ }
+
+ for (i = 0; i < num_streams_on_link; i++) {
+ if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
+ stream_update.stream = streams_on_link[i];
+ stream_update.dpms_off = &dpms_off;
+ dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
+ }
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
@@ -251,7 +312,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
@@ -646,7 +707,7 @@ bool dp_set_test_pattern(
if (IS_DP_PHY_PATTERN(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
@@ -816,7 +877,7 @@ bool dp_set_test_pattern(
return false;
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -864,7 +925,7 @@ bool dp_set_test_pattern(
CRTC_STATE_VACTIVE);
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
index eae23ea7f6ec..033650cdb811 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_CTS_H__
#define __LINK_DP_CTS_H__
-#include "link.h"
+#include "link_service.h"
void dp_handle_automated_test(struct dc_link *link);
bool dp_set_test_pattern(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index ab437a0c9101..9ff4a6c46a2b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_TRACE_H__
#define __LINK_DP_TRACE_H__
-#include "link.h"
+#include "link_service.h"
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 3e47a6735912..befa67b2b2ae 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -48,11 +48,19 @@ void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
- link_enc->funcs->connect_dig_be_to_fe(link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
+
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
@@ -71,9 +79,16 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
+
if (!stream_enc)
return;
@@ -84,10 +99,13 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
if (stream_enc->funcs->enable_stream)
stream_enc->funcs->enable_stream(stream_enc,
pipe_ctx->stream->signal, false);
- link_enc->funcs->connect_dig_be_to_fe(
- link_enc,
- pipe_ctx->stream_res.stream_enc->id,
- false);
+
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
pipe_ctx->stream->link,
@@ -101,7 +119,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- if (!dc_is_virtual_signal(stream->signal))
+ if (!dc_is_virtual_signal(stream->signal) &&
+ !dc_is_rgb_signal(stream->signal))
stream_encoder->funcs->setup_stereo_sync(
stream_encoder,
pipe_ctx->stream_res.tg->inst,
@@ -124,8 +143,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
stream_encoder->funcs->dvi_set_stream_attribute(
stream_encoder,
&stream->timing,
- (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
- true : false);
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK);
else if (dc_is_lvds_signal(stream->signal))
stream_encoder->funcs->lvds_set_stream_attribute(
stream_encoder,
@@ -142,7 +160,14 @@ void enable_dio_dp_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
if (dc_is_dp_sst_signal(signal))
link_enc->funcs->enable_dp_output(
@@ -162,7 +187,14 @@ void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
link_enc->funcs->disable_output(link_enc, signal);
link->dc->link_srv->dp_trace_source_sequence(link,
@@ -173,7 +205,14 @@ void set_dio_dp_link_test_pattern(struct dc_link *link,
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
@@ -184,7 +223,14 @@ void set_dio_dp_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings);
}
@@ -193,9 +239,15 @@ void update_dio_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
- ASSERT(link_enc);
link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
}
@@ -280,7 +332,10 @@ static const struct link_hwss dio_link_hwss = {
bool can_use_dio_link_hwss(const struct dc_link *link,
const struct link_resource *link_res)
{
- return link->link_enc != NULL;
+ if (!link->dc->config.unify_link_enc_assignment)
+ return link->link_enc != NULL;
+ else
+ return link_res->dio_link_enc != NULL;
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 45f0e091fcb0..4a25210a344f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -27,7 +27,7 @@
#define __LINK_HWSS_DIO_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
const struct link_hwss *get_dio_link_hwss(void);
bool can_use_dio_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index 348ea4cb832d..e1dff4e3f446 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -127,7 +127,10 @@ static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(
link, link_res, tp_params, get_dio_link_hwss())) {
@@ -187,7 +190,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
{
- return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
+ return ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
}
const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
index 9ac08a332540..cf578a8662a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
-#include "link.h"
+#include "link_service.h"
uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link);
uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 6499807af72a..81bf3c5e1fdf 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -35,12 +35,15 @@ static void update_dpia_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
static enum dc_status status;
uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
int i;
DC_LOGGER_INIT(link->ctx->logger);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
for (i = 0; i < table->stream_count; i++)
mst_alloc_slots += table->stream_allocations[i].slot_count;
@@ -61,7 +64,10 @@ static void set_dio_dpia_link_test_pattern(struct dc_link *link,
if (tp_params->dp_phy_pattern != DP_TEST_PATTERN_VIDEO_MODE)
return;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (!link_enc)
return;
@@ -77,17 +83,78 @@ static void set_dio_dpia_lane_settings(struct dc_link *link,
{
}
+static void enable_dpia_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc != NULL) {
+ if (link->dc->config.enable_dpia_pre_training || link->dc->config.unify_link_enc_assignment) {
+ uint8_t fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
+ uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
+
+ if (link_enc->funcs->enable_dpia_output)
+ link_enc->funcs->enable_dpia_output(
+ link_enc,
+ link_settings,
+ link->ddc_hw_inst,
+ digmode,
+ fec_rdy);
+ else
+ DC_LOG_ERROR("%s: link encoder does not support enable_dpia_output\n", __func__);
+ } else
+ enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings);
+
+ }
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+}
+
+static void disable_dpia_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc != NULL) {
+ if (link->dc->config.enable_dpia_pre_training || link->dc->config.unify_link_enc_assignment) {
+ uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
+
+ if (link_enc->funcs->disable_dpia_output)
+ link_enc->funcs->disable_dpia_output(link_enc, link->ddc_hw_inst, digmode);
+ else
+ DC_LOG_ERROR("%s: link encoder does not support disable_dpia_output\n", __func__);
+ } else
+ link_enc->funcs->disable_output(link_enc, signal);
+ }
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
- .disable_link_output = disable_dio_link_output,
+ .disable_link_output = disable_dpia_link_output,
.setup_audio_output = setup_dio_audio_output,
.enable_audio_packet = enable_dio_audio_packet,
.disable_audio_packet = disable_dio_audio_packet,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
- .enable_dp_link_output = enable_dio_dp_link_output,
+ .enable_dp_link_output = enable_dpia_link_output,
.set_dp_link_test_pattern = set_dio_dpia_link_test_pattern,
.set_dp_lane_settings = set_dio_dpia_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
@@ -97,8 +164,10 @@ static const struct link_hwss dpia_link_hwss = {
bool can_use_dpia_link_hwss(const struct dc_link *link,
const struct link_resource *link_res)
{
- return link->is_dig_mapping_flexible &&
- link->dc->res_pool->funcs->link_encs_assign;
+ if (!link->dc->config.unify_link_enc_assignment)
+ return link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign;
+ else
+ return link->is_dig_mapping_flexible && link_res->dio_link_enc != NULL;
}
const struct link_hwss *get_dpia_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
index ad16ec5d9bb7..259e0f4775e1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
@@ -27,6 +27,9 @@
#include "link_hwss.h"
+#define DIG_SST_MODE 0
+#define DIG_MST_MODE 5
+
const struct link_hwss *get_dpia_link_hwss(void);
bool can_use_dpia_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 1d3ed8ca83b5..7c9005bc2587 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -26,7 +26,7 @@
#define __LINK_HWSS_HPO_DP_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index 116ff37126e7..55c5148de800 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- uint8_t clk_src = 0x4C;
+ uint8_t clk_src = 0xC4;
uint8_t pattern = 0x4F; /* SQ128 */
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
index 82301187bc7c..8bf36827ecfb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
-#include "link.h"
+#include "link_service.h"
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link);
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index e026c728042a..6d31f4967f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -270,6 +270,10 @@ static void read_scdc_caps(struct ddc_service *ddc_service,
uint8_t slave_address = HDMI_SCDC_ADDRESS;
uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI;
+ if (ddc_service->link->local_sink &&
+ !ddc_service->link->local_sink->edid_caps.scdc_present)
+ return;
+
link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte,
sizeof(sink->scdc_caps.manufacturer_OUI.byte));
@@ -593,8 +597,9 @@ static bool detect_dp(struct dc_link *link,
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (!detect_dp_sink_caps(link))
+ if (!detect_dp_sink_caps(link)) {
return false;
+ }
if (is_dp_branch_device(link))
/* DP SST branch */
@@ -611,6 +616,7 @@ static bool detect_dp(struct dc_link *link,
link->dpcd_caps.dongle_type = sink_caps->dongle_type;
link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
link->dpcd_caps.dpcd_rev.raw = 0;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = 0;
}
return true;
@@ -654,7 +660,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
return true;
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
- DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
+ DC_LOG_DC("DP Alt mode state on HPD: %d Link=%d\n", is_in_alt_mode, link->link_index);
if (is_in_alt_mode)
return true;
@@ -816,7 +822,10 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
{
bool destrictive = false;
struct dc_link_settings max_link_cap;
- bool is_link_enc_unavailable = link->link_enc &&
+ bool is_link_enc_unavailable = false;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ is_link_enc_unavailable = link->link_enc &&
link->dc->res_pool->funcs->link_encs_assign &&
!link_enc_cfg_is_link_enc_avail(
link->ctx->dc,
@@ -829,7 +838,8 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
if (link->dc->debug.skip_detection_link_training ||
dc_is_embedded_signal(link->local_sink->sink_signal) ||
- link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)) {
destrictive = false;
} else if (link_dp_get_encoding_format(&max_link_cap) ==
DP_8b_10b_ENCODING) {
@@ -852,6 +862,94 @@ static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
verify_link_capability_non_destructive(link);
}
+/**
+ * link_detect_evaluate_edid_header() - Evaluate if an EDID header is acceptable.
+ *
+ * Evaluates an 8-byte EDID header to check if it's good enough
+ * for the purpose of determining whether a display is connected
+ * without reading the full EDID.
+ *
+ * @edid_header: The first 8 bytes of the EDID read from DDC.
+ *
+ * Return: true if the header looks valid (>= 6 of 8 bytes match the
+ * expected 00/FF pattern), false otherwise.
+ */
+static bool link_detect_evaluate_edid_header(uint8_t edid_header[8])
+{
+ int edid_header_score = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ edid_header_score += edid_header[i] == ((i == 0 || i == 7) ? 0x00 : 0xff);
+
+ return edid_header_score >= 6;
+}
+
+/**
+ * link_detect_ddc_probe() - Probe the DDC to see if a display is connected.
+ *
+ * Detect whether a display is connected to DDC without reading full EDID.
+ * Reads only the EDID header (the first 8 bytes of EDID) from DDC and
+ * evaluates whether that matches.
+ *
+ * @link: DC link whose DDC/I2C is probed for the EDID header.
+ *
+ * Return: true if the EDID header was read and passes validation,
+ * false otherwise.
+ */
+static bool link_detect_ddc_probe(struct dc_link *link)
+{
+ if (!link->ddc)
+ return false;
+
+ uint8_t edid_header[8] = {0};
+ bool ddc_probed = i2c_read(link->ddc, 0x50, edid_header, sizeof(edid_header));
+
+ if (!ddc_probed)
+ return false;
+
+ if (!link_detect_evaluate_edid_header(edid_header))
+ return false;
+
+ return true;
+}
+
+/**
+ * link_detect_dac_load_detect() - Performs DAC load detection.
+ *
+ * Load detection can be used to detect the presence of an
+ * analog display when we can't read DDC. This causes a visible
+ * visual glitch so it should be used sparingly.
+ *
+ * @link: DC link to test using the DAC load-detect path.
+ *
+ * Return: true if the VBIOS load-detect call reports OK, false
+ * otherwise.
+ */
+static bool link_detect_dac_load_detect(struct dc_link *link)
+{
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct link_encoder *link_enc = link->link_enc;
+ enum engine_id engine_id = link_enc->preferred_engine;
+ enum dal_device_type device_type = DEVICE_TYPE_CRT;
+ enum bp_result bp_result;
+ uint32_t enum_id;
+
+ switch (engine_id) {
+ case ENGINE_ID_DACB:
+ enum_id = 2;
+ break;
+ case ENGINE_ID_DACA:
+ default:
+ engine_id = ENGINE_ID_DACA;
+ enum_id = 1;
+ break;
+ }
+
+ bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+ return bp_result == BP_RESULT_OK;
+}
+
/*
* detect_link_and_local_sink() - Detect if a sink is attached to a given link
*
@@ -936,6 +1034,12 @@ static bool detect_link_and_local_sink(struct dc_link *link,
break;
}
+ case SIGNAL_TYPE_RGB: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_RGB;
+ break;
+ }
+
case SIGNAL_TYPE_LVDS: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_LVDS;
@@ -1003,21 +1107,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
- /*
- * If this is DP over USB4 link then we need to:
- * - Enable BW ALLOC support on DPtx if applicable
- */
- if (dc->config.usb4_bw_alloc_support) {
- if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
- /* update with non reduced link cap if bw allocation mode is supported */
- if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
- link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate =
- link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count =
- link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
- }
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
+ if (link_dpia_enable_usb4_dp_bw_alloc_mode(link) == false)
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc = false;
}
break;
}
@@ -1070,7 +1164,30 @@ static bool detect_link_and_local_sink(struct dc_link *link,
DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
break;
case EDID_NO_RESPONSE:
+ /* Analog connectors without EDID:
+ * - old monitor that actually doesn't have EDID
+ * - cheap DVI-A cable or adapter that doesn't connect DDC
+ */
+ if (dc_connector_supports_analog(link->link_id.id)) {
+ /* If we didn't do DAC load detection yet, do it now
+ * to verify there really is a display connected.
+ */
+ if (link->type != dc_connection_dac_load &&
+ !link_detect_dac_load_detect(link)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+ return false;
+ }
+
+ DC_LOG_INFO("%s detected analog display without EDID\n", __func__);
+ link->type = dc_connection_dac_load;
+ sink->edid_caps.analog = true;
+ break;
+ }
+
DC_LOG_ERROR("No EDID read.\n");
+
/*
* Abort detection for non-DP connectors if we have
* no EDID
@@ -1137,13 +1254,26 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
- query_hdcp_capability(sink->sink_signal, link);
+
+ if (!sink->edid_caps.analog)
+ query_hdcp_capability(sink->sink_signal, link);
}
+ /* DVI-I connector connected to analog display. */
+ if ((link->link_id.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ link->link_id.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ sink->edid_caps.analog)
+ sink->sink_signal = SIGNAL_TYPE_RGB;
+
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ else if (dc_is_dvi_signal(sink->sink_signal) &&
+ dc_is_dvi_signal(link->connector_signal) &&
+ aud_support->hdmi_audio_native &&
+ sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
dp_trace_init(link);
@@ -1232,6 +1362,36 @@ static bool detect_link_and_local_sink(struct dc_link *link,
return true;
}
+/**
+ * link_detect_analog() - Determines if an analog sink is connected.
+ *
+ * @link: DC link to evaluate (must support analog signalling).
+ * @type: Updated with the detected connection type:
+ * dc_connection_single (analog via DDC),
+ * dc_connection_dac_load (via load-detect),
+ * or dc_connection_none.
+ *
+ * Return: true if detection completed.
+ */
+static bool link_detect_analog(struct dc_link *link, enum dc_connection_type *type)
+{
+ /* Don't care about connectors that don't support an analog signal. */
+ ASSERT(dc_connector_supports_analog(link->link_id.id));
+
+ if (link_detect_ddc_probe(link)) {
+ *type = dc_connection_single;
+ return true;
+ }
+
+ if (link_detect_dac_load_detect(link)) {
+ *type = dc_connection_dac_load;
+ return true;
+ }
+
+ *type = dc_connection_none;
+ return true;
+}
+
/*
* link_detect_connection_type() - Determine if there is a sink connected
*
@@ -1248,6 +1408,17 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *
return true;
}
+ /* Ignore the HPD pin (if any) for analog connectors.
+ * Instead rely on DDC and DAC.
+ *
+ * - VGA connectors don't have any HPD at all.
+ * - Some DVI-A cables don't connect the HPD pin.
+ * - Some DVI-A cables pull up the HPD pin.
+ * (So it's high even when no display is connected.)
+ */
+ if (dc_connector_supports_analog(link->link_id.id))
+ return link_detect_analog(link, type);
+
if (link->connector_signal == SIGNAL_TYPE_EDP) {
/*in case it is not on*/
if (!link->dc->config.edp_no_power_sequencing)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
index 7da05078721e..1ab29476060b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -25,7 +25,7 @@
#ifndef __DC_LINK_DETECTION_H__
#define __DC_LINK_DETECTION_H__
-#include "link.h"
+#include "link_service.h"
bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
bool link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 60e64e0138a3..6ae134147617 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
}
}
- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) &&
+ (link->type != dc_connection_none))
dpcd_write_rx_power_ctrl(link, false);
}
}
@@ -148,6 +149,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_stream_state *streams[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
uint8_t count;
int i;
@@ -160,10 +162,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+ /* The subsequent call to dc_commit_updates_for_stream for a full update
+ * will release the current state and swap to a new state. Releasing the
+ * current state results in the stream pointers in the pipe_ctx structs
+ * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream.
+ */
+ for (i = 0; i < count; i++)
+ streams[i] = pipes[i]->stream;
+
for (i = 0; i < count; i++) {
- stream_update.stream = pipes[i]->stream;
+ stream_update.stream = streams[i];
dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
- pipes[i]->stream, &stream_update,
+ streams[i], &stream_update,
state);
}
@@ -652,15 +662,15 @@ static void write_i2c_redriver_setting(
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct cp_psp_stream_config config = {0};
enum dp_panel_mode panel_mode =
dp_get_panel_mode(pipe_ctx->stream->link);
if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
return;
-
- link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
ASSERT(link_enc);
if (link_enc == NULL)
return;
@@ -772,6 +782,20 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
return result;
}
+static bool dp_set_hblank_reduction_on_rx(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ bool result = false;
+
+ if (dc_is_virtual_signal(stream->signal))
+ result = true;
+ else
+ result = dm_helpers_dp_write_hblank_reduction(dc->ctx, stream);
+ return result;
+}
+
+
/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
* i.e. after dp_enable_dsc_on_rx() had been called
*/
@@ -808,7 +832,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
@@ -817,16 +841,17 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
@@ -946,6 +971,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
@@ -1910,7 +1936,7 @@ static void disable_link_dp(struct dc_link *link,
if (link_dp_get_encoding_format(&link_settings) ==
DP_8b_10b_ENCODING) {
- dp_set_fec_enable(link, false);
+ dp_set_fec_enable(link, link_res, false);
dp_set_fec_ready(link, link_res, false);
}
}
@@ -1960,8 +1986,8 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
/* DP159, Retimer settings */
eng_id = pipe_ctx->stream_res.stream_enc->id;
@@ -1972,7 +1998,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
write_i2c_default_retimer_setting(pipe_ctx,
is_vga_mode, is_over_340mhz);
}
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ } else if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
/* PI3EQX1204, Redriver settings */
write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
}
@@ -2028,7 +2054,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int lt_attempts = LINK_TRAINING_ATTEMPTS;
// Increase retry count if attempting DP1.x on FIXED_VS link
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
lt_attempts = 10;
@@ -2043,7 +2069,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
/* Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
*/
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)
do_fallback = true;
/*
@@ -2107,7 +2134,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
fec_enable = true;
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- dp_set_fec_enable(link, fec_enable);
+ dp_set_fec_enable(link, &pipe_ctx->link_res, fec_enable);
// during mode set we do DP_SET_POWER off then on, aux writes are lost
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
@@ -2199,7 +2226,11 @@ static enum dc_status enable_link(
{
enum dc_status status = DC_ERROR_UNEXPECTED;
struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
+ struct dc_link *link = NULL;
+
+ if (stream == NULL)
+ return DC_ERROR_UNEXPECTED;
+ link = stream->link;
/* There's some scenarios where driver is unloaded with display
* still enabled. When driver is reloaded, it may cause a display
@@ -2231,6 +2262,9 @@ static enum dc_status enable_link(
enable_link_lvds(pipe_ctx);
status = DC_OK;
break;
+ case SIGNAL_TYPE_RGB:
+ status = DC_OK;
+ break;
case SIGNAL_TYPE_VIRTUAL:
status = enable_link_virtual(pipe_ctx);
break;
@@ -2272,26 +2306,10 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i
link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
}
- /* get dp overhead for dp tunneling */
- link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+ link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link);
req_bw += link->dpia_bw_alloc_config.dp_overhead;
- if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) {
- if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) {
- DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
- __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
- link->dpia_bw_alloc_config.dp_overhead);
- } else {
- // Cannot get the required bandwidth.
- DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
- __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
- link->dpia_bw_alloc_config.dp_overhead);
- return false;
- }
- } else {
- DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__);
- return false;
- }
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw);
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
int i = 0;
@@ -2349,9 +2367,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal, link->link_index);
}
}
@@ -2365,7 +2383,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
deallocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -2379,13 +2397,13 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
unsigned short masked_chip_caps = link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
//Need to inform that sink is going to use legacy HDMI mode.
write_scdc_data(
link->ddc,
165000,//vbios only handles 165Mhz.
false);
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
/* DP159, Retimer settings */
if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
write_i2c_retimer_setting(pipe_ctx,
@@ -2393,7 +2411,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
else
write_i2c_default_retimer_setting(pipe_ctx,
false, false);
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ } else if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
/* PI3EQX1204, Redriver settings */
write_i2c_redriver_setting(pipe_ctx, false);
}
@@ -2433,7 +2451,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (link->connector_signal == SIGNAL_TYPE_EDP && dc->debug.psp_disabled_wa) {
/* reset internal save state to default since eDP is off */
enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link);
- /* since current psp not loaded, we need to reset it to default*/
+ /* since current psp not loaded, we need to reset it to default */
link->panel_mode = panel_mode;
}
}
@@ -2446,7 +2464,7 @@ void link_set_dpms_on(
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
enum dc_status status;
- struct link_encoder *link_enc;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
@@ -2465,13 +2483,15 @@ void link_set_dpms_on(
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal,
+ link->link_index);
}
}
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
@@ -2527,12 +2547,29 @@ void link_set_dpms_on(
!pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
update_psp_stream_config(pipe_ctx, false);
+
+ if (link->is_dds) {
+ uint32_t post_oui_delay = 30; // 30ms
+
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+ }
+
return;
}
if (pipe_ctx->stream->dpms_off)
return;
+ /* For Dp tunneling link, a pending HPD means that we have a race condition between processing
+ * current link and processing the pending HPD. If we enable the link now, we may end up with a
+ * link that is not actually connected to a sink. So we skip enabling the link in this case.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->is_hpd_pending) {
+ DC_LOG_DEBUG("%s, Link%d HPD is pending, not enable it.\n", __func__, link->link_index);
+ return;
+ }
+
/* Have to setup DSC before DIG FE and BE are connected (which happens before the
* link training). This is to make sure the bandwidth sent to DIG BE won't be
* bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
@@ -2598,7 +2635,10 @@ void link_set_dpms_on(
}
}
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_set_hblank_reduction_on_rx(pipe_ctx);
+
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
allocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -2607,6 +2647,15 @@ void link_set_dpms_on(
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
+ /* Corruption was observed on systems with display mux when stream gets
+ * enabled after the mux switch. Having a small delay between link
+ * training and stream unblank resolves the corruption issue.
+ * This is workaround.
+ */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ link->is_display_mux_present)
+ msleep(20);
+
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
index 9398f9c1666a..bd6fc63064a3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DPMS_H__
#define __DC_LINK_DPMS_H__
-#include "link.h"
+#include "link_service.h"
void link_set_dpms_on(
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 5e1b5ab9fbc6..a6e2b0821969 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -100,7 +100,8 @@ static void construct_link_service_validation(struct link_service *link_srv)
{
link_srv->validate_mode_timing = link_validate_mode_timing;
link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
- link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+ link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth;
+ link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes;
}
/* link dpms owns the programming sequence of stream's dpms state associated
@@ -155,6 +156,7 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
link_srv->dp_should_enable_fec = dp_should_enable_fec;
link_srv->dp_decide_link_settings = link_decide_link_settings;
+ link_srv->dp_decide_tunnel_settings = link_decide_dp_tunnel_settings;
link_srv->mst_decide_link_encoding_format =
mst_decide_link_encoding_format;
link_srv->edp_decide_link_settings = edp_decide_link_settings;
@@ -163,6 +165,8 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_overwrite_extended_receiver_cap =
dp_overwrite_extended_receiver_cap;
link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+ link_srv->dp_get_lttpr_count = dp_get_lttpr_count;
+ link_srv->edp_get_alpm_support = edp_get_alpm_support;
}
/* link dp phy/dpia implements basic dp phy/dpia functionality such as
@@ -174,7 +178,6 @@ static void construct_link_service_dp_phy_or_dpia(struct link_service *link_srv)
{
link_srv->dpia_handle_usb4_bandwidth_allocation_for_link =
dpia_handle_usb4_bandwidth_allocation_for_link;
- link_srv->dpia_handle_bw_alloc_response = dpia_handle_bw_alloc_response;
link_srv->dp_set_drive_settings = dp_set_drive_settings;
link_srv->dpcd_write_rx_power_ctrl = dpcd_write_rx_power_ctrl;
}
@@ -448,6 +451,46 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
+static enum engine_id find_analog_engine(struct dc_link *link)
+{
+ struct dc_bios *bp = link->ctx->dc_bios;
+ struct graphics_object_id encoder = {0};
+ enum bp_result bp_result = BP_RESULT_OK;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ bp_result = bp->funcs->get_src_obj(bp, link->link_id, i, &encoder);
+
+ if (bp_result != BP_RESULT_OK)
+ return ENGINE_ID_UNKNOWN;
+
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENGINE_ID_DACA;
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENGINE_ID_DACB;
+ }
+ }
+
+ return ENGINE_ID_UNKNOWN;
+}
+
+static bool transmitter_supported(const enum transmitter transmitter)
+{
+ return transmitter != TRANSMITTER_UNKNOWN &&
+ transmitter != TRANSMITTER_NUTMEG_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_LCD;
+}
+
+static bool analog_engine_supported(const enum engine_id engine_id)
+{
+ return engine_id == ENGINE_ID_DACA ||
+ engine_id == ENGINE_ID_DACB;
+}
+
static bool construct_phy(struct dc_link *link,
const struct link_init_data *init_params)
{
@@ -464,6 +507,7 @@ static bool construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_read_request = DC_IRQ_SOURCE_INVALID;
link->link_status.dpcd_caps = &link->dpcd_caps;
link->dc = init_params->dc;
@@ -478,10 +522,23 @@ static bool construct_phy(struct dc_link *link,
link->link_id =
bios->funcs->get_connector_id(bios, init_params->connector_index);
+ /* Determine early if the link has any supported encoders,
+ * so that we avoid initializing DDC and HPD, etc.
+ */
+ bp_funcs->get_src_obj(bios, link->link_id, 0, &enc_init_data.encoder);
+ enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder);
+ enc_init_data.analog_engine = find_analog_engine(link);
+
link->ep_type = DISPLAY_ENDPOINT_PHY;
DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
+ if (!transmitter_supported(enc_init_data.transmitter) &&
+ !analog_engine_supported(enc_init_data.analog_engine)) {
+ DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id);
+ goto unsupported_fail;
+ }
+
if (bios->funcs->get_disp_connector_caps_info) {
bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
@@ -514,6 +571,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_HDMI_TYPE_A:
link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ if (link->hpd_gpio)
+ link->irq_source_read_request =
+ dal_irq_get_read_request(link->hpd_gpio);
break;
case CONNECTOR_ID_SINGLE_LINK_DVID:
case CONNECTOR_ID_SINGLE_LINK_DVII:
@@ -523,6 +583,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_DUAL_LINK_DVII:
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
+ case CONNECTOR_ID_VGA:
+ link->connector_signal = SIGNAL_TYPE_RGB;
+ break;
case CONNECTOR_ID_DISPLAY_PORT:
case CONNECTOR_ID_MXM:
case CONNECTOR_ID_USBC:
@@ -534,10 +597,16 @@ static bool construct_phy(struct dc_link *link,
break;
case CONNECTOR_ID_EDP:
+ // If smartmux is supported, only create the link on the primary eDP.
+ // Dual eDP is not supported with smartmux.
+ if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0))
+ goto create_fail;
+
link->connector_signal = SIGNAL_TYPE_EDP;
if (link->hpd_gpio) {
- if (!link->dc->config.allow_edp_hotplug_detection)
+ if (!link->dc->config.allow_edp_hotplug_detection
+ && !is_smartmux_suported(link))
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
switch (link->dc->config.allow_edp_hotplug_detection) {
@@ -598,16 +667,12 @@ static bool construct_phy(struct dc_link *link,
dal_ddc_get_line(get_ddc_pin(link->ddc));
enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
- &enc_init_data.encoder);
enc_init_data.connector = link->link_id;
enc_init_data.channel = get_ddc_line(link);
enc_init_data.hpd_source = get_hpd_line(link);
link->hpd_src = enc_init_data.hpd_source;
- enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
link->link_enc =
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
@@ -653,7 +718,7 @@ static bool construct_phy(struct dc_link *link,
}
/* Look for device tag that matches connector signal,
- * CRT for rgb, LCD for other supported signal tyes
+ * CRT for rgb, LCD for other supported signal types
*/
if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
link->device_tag.dev_id))
@@ -698,7 +763,7 @@ static bool construct_phy(struct dc_link *link,
link->chip_caps);
}
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
link->bios_forced_drive_settings.VOLTAGE_SWING =
(bios->integrated_info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
link->bios_forced_drive_settings.PRE_EMPHASIS =
@@ -722,6 +787,7 @@ static bool construct_phy(struct dc_link *link,
link->psr_settings.psr_vtotal_control_support = false;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
return true;
@@ -740,6 +806,7 @@ create_fail:
link->hpd_gpio = NULL;
}
+unsupported_fail:
DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
return false;
}
@@ -803,9 +870,7 @@ static bool construct_dpia(struct dc_link *link,
/* TODO: Create link encoder */
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-
- /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
- link->wa_flags.dp_mot_reset_segment = true;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
index e96220d48d03..aad36ca1a31c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_FACTORY_H__
#define __LINK_FACTORY_H__
-#include "link.h"
+#include "link_service.h"
struct dc_link *link_create(const struct link_init_data *init_params);
void link_destroy(struct dc_link **link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
index 1907bda3cb6e..f7aa3bc3a93a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_RESOURCE_H__
#define __LINK_RESOURCE_H__
-#include "link.h"
+#include "link_service.h"
void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
void link_restore_res_map(const struct dc *dc, uint32_t *map);
void link_get_cur_link_res(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 60f15a9ba7a5..acdc162de535 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing(
if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
return false;
break;
+ case PIXEL_ENCODING_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
default:
/* Invalid Pixel Encoding*/
return false;
@@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
break;
+ case COLOR_DEPTH_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps(
return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
}
+static uint32_t dp_get_timing_bandwidth_kbps(
+ const struct dc_crtc_timing *timing,
+ const struct dc_link *link)
+{
+ return dc_bandwidth_in_kbps_from_timing(timing,
+ dc_link_get_highest_encoding_format(link));
+}
+
static bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -351,61 +367,260 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
+static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ int i;
+ const struct dc_tunnel_settings *dp_tunnel_settings = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) {
+ dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings;
+ break;
+ }
+ }
+
+ return dp_tunnel_settings;
+}
+
/*
- * This function calculates the bandwidth required for the stream timing
- * and aggregates the stream bandwidth for the respective dpia link
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * @stream: pointer to the dc_stream_state struct instance
- * @num_streams: number of streams to be validated
- *
- * return: true if validation is succeeded
+ * return: dc_status
*/
-bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- int bw_needed[MAX_DPIA_NUM] = {0};
- struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
- int num_dpias = 0;
-
- for (unsigned int i = 0; i < num_streams; ++i) {
- if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
- /* new dpia sst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
+ struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 };
+ uint8_t link_count = 0;
+ enum dc_status result = DC_OK;
- dpia_link[num_dpias] = stream[i].link;
- bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
- num_dpias++;
- } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- uint8_t j = 0;
- /* check whether its a known dpia link */
- for (; j < num_dpias; ++j) {
- if (dpia_link[j] == stream[i].link)
- break;
+ // Iterate through streams in the new context
+ for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) {
+ const struct dc_stream_state *stream = new_ctx->streams[i];
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *dp_tunnel_settings;
+ uint32_t timing_bw;
+
+ if (stream == NULL)
+ continue;
+
+ link = stream->link;
+
+ if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)))
+ continue;
+
+ if ((link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) && (link->hpd_status == false))
+ continue;
+
+ dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
+
+ if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false))
+ continue;
+
+ timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link);
+
+ // Find an existing entry for this 'link' in 'dpia_link_sets'
+ for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) {
+ bool is_new_slot = false;
+
+ if (dpia_link_sets[j].link == NULL) {
+ is_new_slot = true;
+ link_count++;
+ dpia_link_sets[j].required_bw = 0;
+ dpia_link_sets[j].link = link;
}
- if (j == num_dpias) {
- /* new dpia mst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
- else {
- dpia_link[j] = stream[i].link;
- num_dpias++;
- }
+ if (is_new_slot || (dpia_link_sets[j].link == link)) {
+ dpia_link_sets[j].tunnel_settings = dp_tunnel_settings;
+ dpia_link_sets[j].required_bw += timing_bw;
+ break;
}
+ }
+ }
+
+ if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false)
+ result = DC_FAIL_DP_TUNNEL_BW_VALIDATE;
- bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[j]));
+ return result;
+}
+
+struct dp_audio_layout_config {
+ uint8_t layouts_per_sample_denom;
+ uint8_t symbols_per_layout;
+ uint8_t max_layouts_per_audio_sdp;
+};
+
+static void get_audio_layout_config(
+ uint32_t channel_count,
+ enum dp_link_encoding encoding,
+ struct dp_audio_layout_config *output)
+{
+ memset(output, 0, sizeof(struct dp_audio_layout_config));
+
+ /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP,
+ * with each layout being the same size (8ch layout).
+ */
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
}
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ }
+}
+
+static uint32_t get_av_stream_map_lane_count(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t av_stream_map_lane_count = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (!is_mst)
+ av_stream_map_lane_count = lane_count;
+ else
+ av_stream_map_lane_count = 4;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ av_stream_map_lane_count = 4;
+ }
+
+ ASSERT(av_stream_map_lane_count != 0);
+
+ return av_stream_map_lane_count;
+}
+
+static uint32_t get_audio_sdp_overhead(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t audio_sdp_overhead = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (is_mst)
+ audio_sdp_overhead = 16; /* 4 * 2 + 8 */
+ else
+ audio_sdp_overhead = lane_count * 2 + 8;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ audio_sdp_overhead = 10; /* 4 x 2.5 */
}
- /* Include dp overheads */
- for (uint8_t i = 0; i < num_dpias; ++i) {
- int dp_overhead = 0;
+ ASSERT(audio_sdp_overhead != 0);
+
+ return audio_sdp_overhead;
+}
+
+/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST.
+ */
+static uint32_t calculate_overhead_hblank_bw_in_symbols(
+ uint32_t max_slice_h)
+{
+ uint32_t overhead_hblank_bw = 0; /* in stream symbols */
+
+ overhead_hblank_bw += max_slice_h * 4; /* EOC overhead */
+ overhead_hblank_bw += 12; /* Main link overhead (VBID, BS/BE) */
+
+ return overhead_hblank_bw;
+}
+
+uint32_t dp_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params)
+{
+ /* Main logic from dce_audio is duplicated here, with the main
+ * difference being:
+ * - Pre-determined lane count of 4
+ * - Assumed 16 dsc slices for worst case
+ * - Assumed SDP split disabled for worst case
+ * TODO: Unify logic from dce_audio to prevent duplicated logic.
+ */
- dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
- bw_needed[i] += dp_overhead;
+ const struct dc_crtc_timing *timing = audio_params->crtc_timing;
+ const uint32_t channel_count = audio_params->channel_count;
+ const uint32_t sample_rate_hz = audio_params->sample_rate_hz;
+ const enum dp_link_encoding link_encoding = audio_params->link_encoding;
+
+ // 8b/10b MST and 128b/132b are always 4 logical lanes.
+ const uint32_t lane_count = 4;
+ const bool is_mst = (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT);
+ // Maximum slice count is with ODM 4:1, 4 slices per DSC
+ const uint32_t max_slices_h = 16;
+
+ const uint32_t av_stream_map_lane_count = get_av_stream_map_lane_count(
+ link_encoding, lane_count, is_mst);
+ const uint32_t audio_sdp_overhead = get_audio_sdp_overhead(
+ link_encoding, lane_count, is_mst);
+ struct dp_audio_layout_config layout_config;
+
+ if (link_encoding == DP_8b_10b_ENCODING && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return 0;
+
+ get_audio_layout_config(
+ channel_count, link_encoding, &layout_config);
+
+ /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
+ struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
+ struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
+ timing->pix_clk_100hz, (long long)timing->h_total * 10);
+ struct fixed31_32 samples_per_line;
+ struct fixed31_32 layouts_per_line;
+ struct fixed31_32 symbols_per_sdp_max_layout;
+ struct fixed31_32 remainder;
+ uint32_t num_sdp_with_max_layouts;
+ uint32_t required_symbols_per_hblank;
+ uint32_t required_bytes_per_hblank = 0;
+
+ samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000);
+ samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz);
+ layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config.layouts_per_sample_denom);
+ // HBlank expansion usage assumes SDP split disabled to allow for worst case.
+ layouts_per_line = dc_fixpt_from_int(dc_fixpt_ceil(layouts_per_line));
+
+ num_sdp_with_max_layouts = dc_fixpt_floor(
+ dc_fixpt_div_int(layouts_per_line, layout_config.max_layouts_per_audio_sdp));
+ symbols_per_sdp_max_layout = dc_fixpt_from_int(
+ layout_config.max_layouts_per_audio_sdp * layout_config.symbols_per_layout);
+ symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead);
+ symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin);
+ required_symbols_per_hblank = num_sdp_with_max_layouts;
+ required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+
+ if (num_sdp_with_max_layouts != dc_fixpt_ceil(
+ dc_fixpt_div_int(layouts_per_line, layout_config.max_layouts_per_audio_sdp))) {
+ remainder = dc_fixpt_sub_int(layouts_per_line,
+ num_sdp_with_max_layouts * layout_config.max_layouts_per_audio_sdp);
+ remainder = dc_fixpt_mul_int(remainder, layout_config.symbols_per_layout);
+ remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead);
+ remainder = dc_fixpt_mul(remainder, audio_sdp_margin);
+ required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
}
- return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
+ required_symbols_per_hblank += calculate_overhead_hblank_bw_in_symbols(max_slices_h);
+
+ if (link_encoding == DP_8b_10b_ENCODING)
+ required_bytes_per_hblank = required_symbols_per_hblank; // 8 bits per 8b/10b symbol
+ else if (link_encoding == DP_128b_132b_ENCODING)
+ required_bytes_per_hblank = required_symbols_per_hblank * 4; // 32 bits per 128b/132b symbol
+
+ return required_bytes_per_hblank;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 595fb05946e9..595774e76453 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -24,17 +24,22 @@
*/
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
-#include "link.h"
+#include "link_service.h"
enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
const struct dc_crtc_timing *timing);
-bool link_validate_dpia_bandwidth(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+enum dc_status link_validate_dp_tunnel_bandwidth(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t dp_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
+
+uint32_t dp_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
#endif /* __LINK_VALIDATION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index d6d5bbf2108c..5d2bcce2f669 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -505,7 +505,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc,
bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
- if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((ddc->link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
!ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
ddc->ctx->dce_version == DCN_VERSION_3_1) {
/* Fixed VS workaround for AUX timeout */
@@ -549,7 +549,8 @@ void write_scdc_data(struct ddc_service *ddc_service,
/*Lower than 340 Scramble bit from SCDC caps*/
if (ddc_service->link->local_sink &&
- ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ (ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite ||
+ !ddc_service->link->local_sink->edid_caps.scdc_present))
return;
link_query_ddc_data(ddc_service, slave_address, &offset,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index a3e25e55bed6..d3e6f01a6a90 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -26,7 +26,7 @@
#ifndef __DAL_DDC_SERVICE_H__
#define __DAL_DDC_SERVICE_H__
-#include "link.h"
+#include "link_service.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index ce9d799d2386..ad90a0106938 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -158,6 +158,14 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count)
+{
+ /* Calculate offset for LTTPR closest to DPTX which is highest in the chain
+ * Offset is 0 for single LTTPR cases as base LTTPR DPCD addresses target LTTPR 1
+ */
+ return DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE * (lttpr_count - 1);
+}
+
uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
{
switch (bw) {
@@ -250,21 +258,23 @@ static uint32_t intersect_frl_link_bw_support(
{
uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
- // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
- if (hdmi_encoded_link_bw.bits.FRL_MODE) {
- if (hdmi_encoded_link_bw.bits.BW_48Gbps)
- supported_bw_in_kbps = 48000000;
- else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
- supported_bw_in_kbps = 40000000;
- else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
- supported_bw_in_kbps = 32000000;
- else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
- supported_bw_in_kbps = 24000000;
- else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
- supported_bw_in_kbps = 18000000;
- else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
- supported_bw_in_kbps = 9000000;
- }
+ /* Skip checking FRL_MODE bit, as certain PCON will clear
+ * it despite supporting the link BW indicated in the other bits.
+ */
+ if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+ supported_bw_in_kbps = 48000000;
+ else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+ supported_bw_in_kbps = 40000000;
+ else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+ supported_bw_in_kbps = 32000000;
+ else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+ supported_bw_in_kbps = 24000000;
+ else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+ supported_bw_in_kbps = 18000000;
+ else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+ supported_bw_in_kbps = 9000000;
+ else if (hdmi_encoded_link_bw.bits.FRL_LINK_TRAINING_FINISHED)
+ supported_bw_in_kbps = 0; /* This case should only get hit in regulated autonomous mode. */
return supported_bw_in_kbps;
}
@@ -330,9 +340,12 @@ bool dp_is_fec_supported(const struct dc_link *link)
/* TODO - use asic cap instead of link_enc->features
* we no longer know which link enc to use for this link before commit
*/
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
return (dc_is_dp_signal(link->connector_signal) && link_enc &&
@@ -344,7 +357,9 @@ bool dp_should_enable_fec(const struct dc_link *link)
{
bool force_disable = false;
- if (link->fec_state == dc_link_fec_enabled)
+ if (link->dc->debug.disable_fec)
+ force_disable = true;
+ else if (link->fec_state == dc_link_fec_enabled)
force_disable = false;
else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
@@ -372,9 +387,15 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
bool dp_is_lttpr_present(struct dc_link *link)
{
/* Some sink devices report invalid LTTPR revision, so don't validate against that cap */
- return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ uint32_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ bool is_lttpr_present = (lttpr_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4);
+
+ if (lttpr_count > 0 && !is_lttpr_present)
+ DC_LOG_ERROR("LTTPR count is nonzero but invalid lane count reported. Assuming no LTTPR present.\n");
+
+ return is_lttpr_present;
}
/* in DP compliance test, DPR-120 may have
@@ -405,6 +426,21 @@ static enum dc_link_rate get_link_rate_from_max_link_bw(
return link_rate;
}
+static enum dc_lane_count get_lttpr_max_lane_count(struct dc_link *link)
+{
+ enum dc_lane_count lttpr_max_lane_count = LANE_COUNT_UNKNOWN;
+
+ if (link->dpcd_caps.lttpr_caps.max_lane_count <= LANE_COUNT_DP_MAX)
+ lttpr_max_lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+
+ /* if bw_allocation is enabled and nrd_max_lane_count is set, use it */
+ if (link->dpia_bw_alloc_config.bw_alloc_enabled &&
+ link->dpia_bw_alloc_config.nrd_max_lane_count > 0)
+ lttpr_max_lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+
+ return lttpr_max_lane_count;
+}
+
static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
{
@@ -419,6 +455,11 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
break;
}
+ /* if bw_allocation is enabled and nrd_max_link_rate is set, use it */
+ if (link->dpia_bw_alloc_config.bw_alloc_enabled &&
+ link->dpia_bw_alloc_config.nrd_max_link_rate > 0)
+ lttpr_max_link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+
if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
lttpr_max_link_rate = LINK_RATE_UHBR20;
else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
@@ -945,6 +986,9 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
* TODO: add MST specific link training routine
*/
decide_mst_link_settings(link, link_setting);
+ } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
+ link_setting->lane_count = LANE_COUNT_FOUR;
+ link_setting->link_rate = LINK_RATE_HIGH3;
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
/* enable edp link optimization for DSC eDP case */
if (stream->timing.flags.DSC) {
@@ -967,9 +1011,6 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
} else {
edp_decide_link_settings(link, link_setting, req_bw);
}
- } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
- link_setting->lane_count = LANE_COUNT_FOUR;
- link_setting->link_rate = LINK_RATE_HIGH3;
} else {
decide_dp_link_settings(link, link_setting, req_bw);
}
@@ -1072,6 +1113,48 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link)
return DC_OK;
}
+static void read_and_intersect_post_frl_lt_status(
+ struct dc_link *link)
+{
+ union autonomous_mode_and_frl_link_status autonomous_mode_caps = {0};
+ union hdmi_tx_link_status hdmi_tx_link_status = {0};
+ union hdmi_encoded_link_bw hdmi_encoded_link_bw = {0};
+
+ /* Check if dongle supports regulated autonomous mode. */
+ core_link_read_dpcd(link, DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS,
+ &autonomous_mode_caps.raw, sizeof(autonomous_mode_caps));
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support =
+ autonomous_mode_caps.bits.REGULATED_AUTONOMOUS_MODE_SUPPORTED;
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support) {
+ DC_LOG_DC("%s: PCON supports regulated autonomous mode.\n", __func__);
+
+ core_link_read_dpcd(link, DP_PCON_HDMI_TX_LINK_STATUS,
+ &hdmi_tx_link_status.raw, sizeof(hdmi_tx_link_status));
+ }
+
+ // Intersect reported max link bw support with the supported link rate post FRL link training
+ if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
+ &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support &&
+ (!hdmi_tx_link_status.bits.HDMI_TX_READY_STATUS ||
+ !hdmi_encoded_link_bw.bits.FRL_LINK_TRAINING_FINISHED)) {
+ DC_LOG_WARNING("%s: PCON TX link training has not finished.\n", __func__);
+
+ /* Link training not finished, ignore values from this DPCD reg. */
+ return;
+ }
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
+ hdmi_encoded_link_bw);
+ DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__,
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps);
+ }
+}
+
static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
@@ -1160,21 +1243,12 @@ static void get_active_converter_info(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
if (link->dc->caps.dp_hdmi21_pcon_support) {
- union hdmi_encoded_link_bw hdmi_encoded_link_bw;
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
link_bw_kbps_from_raw_frl_link_rate_data(
hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
- // Intersect reported max link bw support with the supported link rate post FRL link training
- if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
- &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
- hdmi_encoded_link_bw);
- DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__,
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps);
- }
+ read_and_intersect_post_frl_lt_status(link);
if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
@@ -1336,6 +1410,21 @@ void dpcd_set_source_specific_data(struct dc_link *link)
struct dpcd_amd_signature amd_signature = {0};
struct dpcd_amd_device_id amd_device_id = {0};
+ if (link->is_dds) {
+ uint8_t dpcd_dp_edp_backlight_mode = 0;
+
+ /*
+ * Write 0 to bits 0:1 for dp_edp_backlight_mode_set register
+ * if platform is DDS
+ */
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
+ dpcd_dp_edp_backlight_mode &= ~0x3;
+
+ core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
+ }
+
amd_device_id.device_id_byte1 =
(uint8_t)(link->ctx->asic_id.chip_id);
amd_device_id.device_id_byte2 =
@@ -1458,8 +1547,8 @@ bool read_is_mst_supported(struct dc_link *link)
return false;
}
- rev.raw = 0;
- cap.raw = 0;
+ rev.raw = 0;
+ cap.raw = 0;
st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
sizeof(rev));
@@ -1491,6 +1580,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return false;
link->dpcd_sink_ext_caps.raw = dpcd_data;
+ if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) {
+ link->dpcd_sink_ext_caps.raw = 0;
+ return false;
+ }
if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK)
return false;
@@ -1502,9 +1595,11 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8] = {0};
+ uint8_t lttpr_dpcd_data[10] = {0};
enum dc_status status;
bool is_lttpr_present;
+ uint32_t lttpr_count;
+ uint32_t closest_lttpr_offset;
/* Logic to determine LTTPR support*/
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
@@ -1552,26 +1647,54 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.alpm.raw =
+ lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ lttpr_count == 0) {
/* If you see this message consistently, either the host platform has FIXED_VS flag
* incorrectly configured or the sink device is returning an invalid count.
*/
DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.",
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ lttpr_count = 1;
DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = dp_is_lttpr_present(link);
- if (is_lttpr_present)
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+
+ if (is_lttpr_present) {
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+ // Identify closest LTTPR to determine if workarounds required for known embedded LTTPR
+ closest_lttpr_offset = dp_get_closest_lttpr_offset(lttpr_count);
+
+ core_link_read_dpcd(link, (DP_LTTPR_IEEE_OUI + closest_lttpr_offset),
+ link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
+ core_link_read_dpcd(link, (DP_LTTPR_DEVICE_ID + closest_lttpr_offset),
+ link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
+
+ if (lttpr_count > 1) {
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
+ "Closest LTTPR To Host's IEEE OUI: ");
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
+ "Closest LTTPR To Host's LTTPR Device ID: ");
+ } else {
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
+ "LTTPR IEEE OUI: ");
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
+ "LTTPR Device ID: ");
+ }
+ }
+
return status;
}
@@ -1590,7 +1713,7 @@ static bool retrieve_link_cap(struct dc_link *link)
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t read_dpcd_retry_cnt = 3;
+ uint32_t read_dpcd_retry_cnt = 20;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
@@ -1633,12 +1756,13 @@ static bool retrieve_link_cap(struct dc_link *link)
}
dpcd_set_source_specific_data(link);
- /* Sink may need to configure internals based on vendor, so allow some
- * time before proceeding with possibly vendor specific transactions
- */
- msleep(post_oui_delay);
for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ /*
+ * Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
status = core_link_read_dpcd(
link,
DP_DPCD_REV,
@@ -1744,6 +1868,12 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.is_mst_capable = read_is_mst_supported(link);
DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable));
+ /* Some MST docks seem to NAK I2C writes to segment pointer with mot=0. */
+ if (link->dpcd_caps.is_mst_capable)
+ link->wa_flags.dp_mot_reset_segment = true;
+ else
+ link->wa_flags.dp_mot_reset_segment = false;
+
get_active_converter_info(ds_port.byte, link);
dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
@@ -1779,6 +1909,11 @@ static bool retrieve_link_cap(struct dc_link *link)
link->test_pattern_enabled = false;
link->compliance_test_state.raw = 0;
+ link->dpcd_caps.receive_port0_cap.raw[0] =
+ dpcd_data[DP_RECEIVE_PORT_0_CAP_0 - DP_DPCD_REV];
+ link->dpcd_caps.receive_port0_cap.raw[1] =
+ dpcd_data[DP_RECEIVE_PORT_0_BUFFER_SIZE - DP_DPCD_REV];
+
/* read sink count */
core_link_read_dpcd(link,
DP_SINK_COUNT,
@@ -1957,12 +2092,15 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
+ core_link_read_dpcd(link,
+ DP_PANEL_REPLAY_CAPABILITY_SUPPORT,
+ &link->dpcd_caps.pr_caps_supported.raw,
+ sizeof(link->dpcd_caps.pr_caps_supported.raw));
+
/* Read DP tunneling information. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dpcd_get_tunneling_device_data(link);
- if (status != DC_OK)
- dm_error("%s: Read DP tunneling device data failed.\n", __func__);
- }
+ status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ DC_LOG_DP2("%s: Read DP tunneling device data failed.\n", __func__);
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
@@ -2021,13 +2159,13 @@ void detect_edp_sink_caps(struct dc_link *link)
&backlight_adj_cap, sizeof(backlight_adj_cap));
link->dpcd_caps.dynamic_backlight_capable_edp =
- (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
+ (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true : false;
core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
&general_edp_cap, sizeof(general_edp_cap));
link->dpcd_caps.set_power_state_capable_edp =
- (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
+ (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true : false;
set_default_brightness_aux(link);
@@ -2080,18 +2218,38 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_SINK_EMISSION_RATE,
(uint8_t *)&link->dpcd_caps.edp_oled_emission_rate,
sizeof(link->dpcd_caps.edp_oled_emission_rate));
+
+ /*
+ * Read Multi-SST (Single Stream Transport) capability
+ * for eDP version 1.4 or higher.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14)
+ core_link_read_dpcd(
+ link,
+ DP_EDP_MSO_LINK_CAPABILITIES,
+ (uint8_t *)&link->dpcd_caps.mso_cap_sst_links_supported,
+ sizeof(link->dpcd_caps.mso_cap_sst_links_supported));
+ /*
+ * Read eDP general capability 2
+ */
+ core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2,
+ (uint8_t *)&link->dpcd_caps.dp_edp_general_cap_2,
+ sizeof(link->dpcd_caps.dp_edp_general_cap_2));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
{
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
if (!max_link_enc_cap) {
DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
return false;
}
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (link_enc && link_enc->funcs->get_max_link_cap) {
@@ -2117,12 +2275,16 @@ const struct dc_link_settings *dp_get_verified_link_cap(
struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
+ enum dc_lane_count lttpr_max_lane_count;
enum dc_link_rate lttpr_max_link_rate;
enum dc_link_rate cable_max_link_rate;
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
bool is_uhbr13_5_supported = true;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
/* get max link encoder capability */
@@ -2178,8 +2340,11 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
/* Some LTTPR devices do not report valid DPCD revisions, if so, do not take it's link cap into consideration. */
if (link->dpcd_caps.lttpr_caps.revision.raw >= DPCD_REV_14) {
- if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+ lttpr_max_lane_count = get_lttpr_max_lane_count(link);
+
+ if (lttpr_max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = lttpr_max_lane_count;
+
lttpr_max_link_rate = get_lttpr_max_link_rate(link);
if (lttpr_max_link_rate < max_link_cap.link_rate)
@@ -2285,6 +2450,11 @@ bool dp_verify_link_cap_with_retries(
dp_trace_detect_lt_init(link);
+ DC_LOG_HW_LINK_TRAINING("%s: Link[%d] LinkRate=0x%x LaneCount=%d",
+ __func__, link->link_index,
+ known_limit_link_setting->link_rate,
+ known_limit_link_setting->lane_count);
+
if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa)
apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
@@ -2307,12 +2477,25 @@ bool dp_verify_link_cap_with_retries(
} else {
link->verified_link_cap = last_verified_link_cap;
}
+
+ /* For Dp tunneling link, a pending HPD means that we have a race condition between processing
+ * current link and processing the pending HPD. Since the training is failed, we should just brak
+ * the loop so that we have chance to process the pending HPD.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->is_hpd_pending)
+ break;
+
fsleep(10 * 1000);
}
dp_trace_lt_fail_count_update(link, fail_count, true);
dp_trace_set_lt_end_timestamp(link, true);
+ DC_LOG_HW_LINK_TRAINING("%s: Link[%d] Exit. is_success=%d fail_count=%d",
+ __func__, link->link_index,
+ success,
+ fail_count);
+
return success;
}
@@ -2377,3 +2560,40 @@ bool dp_is_sink_present(struct dc_link *link)
return present;
}
+
+uint8_t dp_get_lttpr_count(struct dc_link *link)
+{
+ if (dp_is_lttpr_present(link))
+ return dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ return 0;
+}
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ bool lttpr_present = dp_is_lttpr_present(link);
+
+ if (auxless_support == NULL || auxwake_support == NULL)
+ return;
+
+ *auxless_support = false;
+ *auxwake_support = false;
+
+ if (!dc_is_embedded_signal(link->connector_signal))
+ return;
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_LESS_ALPM_CAP) {
+ if (lttpr_present) {
+ if (link->dpcd_caps.lttpr_caps.alpm.bits.AUX_LESS_ALPM_SUPPORTED)
+ *auxless_support = true;
+ } else
+ *auxless_support = true;
+ }
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP) {
+ if (!lttpr_present)
+ *auxwake_support = true;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 8f0ce97f2362..6e17f72a752f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_CAPABILITY_H__
#define __DC_LINK_DP_CAPABILITY_H__
-#include "link.h"
+#include "link_service.h"
bool detect_dp_sink_caps(struct dc_link *link);
@@ -48,6 +48,9 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+/* Calculate embedded LTTPR address offset for vendor-specific behaviour */
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count);
+
bool dp_is_sink_present(struct dc_link *link);
bool dp_is_lttpr_present(struct dc_link *link);
@@ -67,6 +70,7 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx);
/* Initialize output parameter lt_settings. */
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings);
@@ -104,4 +108,10 @@ uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+uint8_t dp_get_lttpr_count(struct dc_link *link);
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
+
#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 0d123e647652..9b2f1a7da1d1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -62,6 +62,39 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling == false)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) {
+ status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY,
+ dpcd_dp_tun_data, 2);
+
+ if (status != DC_OK)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw =
+ dpcd_dp_tun_data[USB4_DRIVER_BW_CAPABILITY - USB4_DRIVER_BW_CAPABILITY];
+ link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_TUNNEL_INFO - USB4_DRIVER_BW_CAPABILITY];
+ }
+
+ DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) "
+ "DPIA_BW_Alloc_support=%d "
+ "CM_BW_Alloc_support=%d ",
+ __func__, link->link_index,
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id,
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.bits.dpia_num,
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc,
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
+
status = core_link_read_dpcd(
link,
DP_USB4_ROUTER_TOPOLOGY_ID,
@@ -71,13 +104,6 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
- link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
- dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
- dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
- dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
-
for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
@@ -92,6 +118,7 @@ bool dpia_query_hpd_status(struct dc_link *link)
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.header.payload_bytes = sizeof(cmd.query_hpd.data);
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
@@ -119,3 +146,26 @@ bool dpia_query_hpd_status(struct dc_link *link)
return link->hpd_status;
}
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting)
+{
+ struct dc_link *link = stream->link;
+
+ memset(dp_tunnel_setting, 0, sizeof(*dp_tunnel_setting));
+
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT) || (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
+ dp_tunnel_setting->should_enable_dp_tunneling =
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling;
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
+ dp_tunnel_setting->should_use_dp_bw_allocation = true;
+ dp_tunnel_setting->cm_id = link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id & 0x0F;
+ dp_tunnel_setting->group_id = link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.bits.group_id;
+ dp_tunnel_setting->estimated_bw = link->dpia_bw_alloc_config.estimated_bw;
+ dp_tunnel_setting->allocated_bw = link->dpia_bw_alloc_config.allocated_bw;
+ dp_tunnel_setting->bw_granularity = link->dpia_bw_alloc_config.bw_granularity;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 363f45a1a964..7cd03fa4892b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -27,7 +27,7 @@
#ifndef __DC_LINK_DPIA_H__
#define __DC_LINK_DPIA_H__
-#include "link.h"
+#include "link_service.h"
/* Read tunneling device capability from DPCD and update link capability
* accordingly.
@@ -38,4 +38,10 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
* Returns true if HPD high.
*/
bool dpia_query_hpd_status(struct dc_link *link);
+
+/* Decide the DP tunneling settings based on the DPCD capabilities
+ */
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
+
#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 0f1c411523a2..c958d3f600c8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -24,7 +24,7 @@
*
*/
/*********************************************************************/
-// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
+// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
/*********************************************************************/
#include "link_dp_dpia_bw.h"
#include "link_dpcd.h"
@@ -35,8 +35,10 @@
#define Kbps_TO_Gbps (1000 * 1000)
+#define MST_TIME_SLOT_COUNT 64
+
// ------------------------------------------------------------------
-// PRIVATE FUNCTIONS
+// PRIVATE FUNCTIONS
// ------------------------------------------------------------------
/*
* Always Check the following:
@@ -44,11 +46,11 @@
* - Is HPD HIGH?
* - Is BW Allocation Support Mode enabled on DP-Tx?
*/
-static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
+static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (tmp && DISPLAY_ENDPOINT_USB4_DPIA == tmp->ep_type
- && tmp->hpd_status
- && tmp->dpia_bw_alloc_config.bw_alloc_enabled);
+ return (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
static void reset_bw_alloc_struct(struct dc_link *link)
@@ -60,7 +62,6 @@ static void reset_bw_alloc_struct(struct dc_link *link)
link->dpia_bw_alloc_config.estimated_bw = 0;
link->dpia_bw_alloc_config.bw_granularity = 0;
link->dpia_bw_alloc_config.dp_overhead = 0;
- link->dpia_bw_alloc_config.response_ready = false;
link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
@@ -142,7 +143,7 @@ static int get_non_reduced_max_lane_count(struct dc_link *link)
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
* for host router and dpia
*/
-static void init_usb4_bw_struct(struct dc_link *link)
+static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
{
reset_bw_alloc_struct(link);
@@ -160,78 +161,6 @@ static void init_usb4_bw_struct(struct dc_link *link)
link->dpia_bw_alloc_config.nrd_max_lane_count);
}
-static uint8_t get_lowest_dpia_index(struct dc_link *link)
-{
- const struct dc *dc_struct = link->dc;
- uint8_t idx = 0xFF;
- int i;
-
- for (i = 0; i < MAX_LINKS; ++i) {
-
- if (!dc_struct->links[i] ||
- dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
-
- if (idx > dc_struct->links[i]->link_index) {
- idx = dc_struct->links[i]->link_index;
- break;
- }
- }
-
- return idx;
-}
-
-/*
- * Get the maximum dp tunnel banwidth of host router
- *
- * @dc: pointer to the dc struct instance
- * @hr_index: host router index
- *
- * return: host router maximum dp tunnel bandwidth
- */
-static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
-{
- uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
- uint8_t hr_index_temp = 0;
- struct dc_link *link_dpia_primary, *link_dpia_secondary;
- int total_bw = 0;
-
- for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) {
-
- if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
-
- hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
-
- if (hr_index_temp == hr_index) {
- link_dpia_primary = dc->links[i];
- link_dpia_secondary = dc->links[i + 1];
-
- /**
- * If BW allocation enabled on both DPIAs, then
- * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
- * otherwise HR BW = Estimated(bw alloc enabled dpia)
- */
- if ((link_dpia_primary->hpd_status &&
- link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
- (link_dpia_secondary->hpd_status &&
- link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
- total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
- link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
- } else if (link_dpia_primary->hpd_status &&
- link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
- total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
- } else if (link_dpia_secondary->hpd_status &&
- link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
- total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
- }
- break;
- }
- }
-
- return total_bw;
-}
-
/*
* Cleanup function for when the dpia is unplugged to reset struct
* and perform any required clean up
@@ -243,323 +172,277 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
static void dpia_bw_alloc_unplug(struct dc_link *link)
{
if (link) {
- DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
+ DC_LOG_DEBUG("%s: resetting BW alloc config for link(%d)\n",
__func__, link->link_index);
reset_bw_alloc_struct(link);
}
}
-static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
{
- uint8_t requested_bw;
- uint32_t temp;
+ uint8_t request_reg_val;
+ uint32_t temp, request_bw;
- /* Error check whether request bw greater than allocated */
- if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
- DC_LOG_ERROR("%s: Request bw greater than estimated bw for link(%d)\n",
- __func__, link->link_index);
- req_bw = link->dpia_bw_alloc_config.estimated_bw;
+ if (link->dpia_bw_alloc_config.bw_granularity == 0) {
+ DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index);
+ return;
}
temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
- requested_bw = temp / Kbps_TO_Gbps;
+ request_reg_val = temp / Kbps_TO_Gbps;
/* Always make sure to add more to account for floating points */
if (temp % Kbps_TO_Gbps)
- ++requested_bw;
+ ++request_reg_val;
- /* Error check whether requested and allocated are equal */
- req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
- DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
- __func__, link->link_index);
+ request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ if (request_bw > link->dpia_bw_alloc_config.estimated_bw) {
+ DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!",
+ __func__, link->link_index,
+ req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw);
+ req_bw = link->dpia_bw_alloc_config.estimated_bw;
+
+ temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
+ request_reg_val = temp / Kbps_TO_Gbps;
+ if (temp % Kbps_TO_Gbps)
+ ++request_reg_val;
}
- link->dpia_bw_alloc_config.response_ready = false; // Reset flag
- core_link_write_dpcd(
- link,
- REQUESTED_BW,
- &requested_bw,
- sizeof(uint8_t));
-}
+ link->dpia_bw_alloc_config.allocated_bw = request_bw;
+ DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw);
-/*
- * Return the response_ready flag from dc_link struct
- *
- * @link: pointer to the dc_link struct instance
- *
- * return: response_ready flag from dc_link struct
- */
-static bool get_cm_response_ready_flag(struct dc_link *link)
-{
- return link->dpia_bw_alloc_config.response_ready;
+ core_link_write_dpcd(link, REQUESTED_BW,
+ &request_reg_val,
+ sizeof(uint8_t));
}
// ------------------------------------------------------------------
-// PUBLIC FUNCTIONS
+// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
{
bool ret = false;
- uint8_t response = 0,
- bw_support_dpia = 0,
- bw_support_cm = 0;
+ uint8_t val;
- if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status))
- goto out;
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- if (core_link_read_dpcd(
- link,
- DP_TUNNELING_CAPABILITIES,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_dpia = (response >> 7) & 1;
-
- if (core_link_read_dpcd(
- link,
- USB4_DRIVER_BW_CAPABILITY,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_cm = (response >> 7) & 1;
-
- /* Send request acknowledgment to Turn ON DPTX support */
- if (bw_support_cm && bw_support_dpia) {
-
- response = 0x80;
- if (core_link_write_dpcd(
- link,
- DPTX_BW_ALLOCATION_MODE_CONTROL,
- &response,
- sizeof(uint8_t)) != DC_OK) {
- DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
- } else {
- // SUCCESS Enabled DPtx BW Allocation Mode Support
- DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
- ret = true;
- init_usb4_bw_struct(link);
- link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ retrieve_usb4_dp_bw_allocation_info(link);
+ if (
+ link->dpia_bw_alloc_config.nrd_max_link_rate
+ && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
+
+ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
+
+ if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
/*
- * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
- * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
- * to make the CM to release preallocation and update estimated BW correctly for
- * all DPIAs per host router
+ * During DP tunnel creation, the CM preallocates BW
+ * and reduces the estimated BW of other DPIAs.
+ * The CM releases the preallocation only when the allocation is complete.
+ * Perform a zero allocation to make the CM release the preallocation
+ * and correctly update the estimated BW for all DPIAs per host router.
*/
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
}
- }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
-out:
return ret;
}
-void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
+/*
+ * Handle DP BW allocation status register
+ *
+ * @link: pointer to the dc_link struct instance
+ * @status: content of DP tunneling status DPCD register
+ *
+ * return: none
+ */
+void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
{
- int bw_needed = 0;
- int estimated = 0;
-
- if (!get_bw_alloc_proceed_flag((link)))
- return;
-
- switch (result) {
-
- case DPIA_BW_REQ_FAILED:
-
- /*
- * Ideally, we shouldn't run into this case as we always validate available
- * bandwidth and request within that limit
- */
- estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
-
- DC_LOG_ERROR("%s: BW REQ FAILURE for DP-TX Request for link(%d)\n",
- __func__, link->link_index);
- DC_LOG_ERROR("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
- __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
-
- /* Update the new Estimated BW value updated by CM */
- link->dpia_bw_alloc_config.estimated_bw = estimated;
-
- /* Allocate the previously requested bandwidth */
- set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
-
- /*
- * If FAIL then it is either:
- * 1. Due to DP-Tx trying to allocate more than available i.e. it failed locally
- * => get estimated and allocate that
- * 2. Due to the fact that DP-Tx tried to allocated ESTIMATED BW and failed then
- * CM will have to update 0xE0023 with new ESTIMATED BW value.
- */
- break;
-
- case DPIA_BW_REQ_SUCCESS:
-
- bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
-
- DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
- __func__, link->link_index);
- DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
- __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
-
- link->dpia_bw_alloc_config.allocated_bw = bw_needed;
-
- link->dpia_bw_alloc_config.response_ready = true;
- break;
-
- case DPIA_EST_BW_CHANGED:
-
- estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
+ DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
+ __func__, link->link_index);
+ }
- DC_LOG_DEBUG("%s: ESTIMATED BW CHANGED for link(%d)\n",
- __func__, link->link_index);
- DC_LOG_DEBUG("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
- __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
+ if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
+ DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
- link->dpia_bw_alloc_config.estimated_bw = estimated;
- break;
+ link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
+ }
- case DPIA_BW_ALLOC_CAPS_CHANGED:
+ if (status & DP_TUNNELING_BW_ALLOC_CAP_CHANGED) {
+ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
- DC_LOG_ERROR("%s: BW ALLOC CAPABILITY CHANGED to Disabled for link(%d)\n",
- __func__, link->link_index);
- link->dpia_bw_alloc_config.bw_alloc_enabled = false;
- break;
+ DC_LOG_DEBUG("%s: Granularity changed on link(%d) new granularity=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.bw_granularity);
}
-}
-int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
-{
- int ret = 0;
- uint8_t timeout = 10;
-
- if (!(link && DISPLAY_ENDPOINT_USB4_DPIA == link->ep_type
- && link->dpia_bw_alloc_config.bw_alloc_enabled))
- goto out;
- //1. Hot Plug
- if (link->hpd_status && peak_bw > 0) {
+ if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
- // If DP over USB4 then we need to check BW allocation
- link->dpia_bw_alloc_config.link_max_bw = peak_bw;
- set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
+ DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
+ }
- do {
- if (timeout > 0)
- timeout--;
- else
- break;
- msleep(10);
- } while (!get_cm_response_ready_flag(link));
+ core_link_write_dpcd(
+ link, DP_TUNNELING_STATUS,
+ &status, sizeof(status));
+}
- if (!timeout)
- ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
- else if (link->dpia_bw_alloc_config.allocated_bw > 0)
- ret = link->dpia_bw_alloc_config.allocated_bw;
+/*
+ * Handle the DP Bandwidth allocation for DPIA
+ *
+ */
+void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
+{
+ if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpia_bw_alloc_config.bw_alloc_enabled) {
+ if (peak_bw > 0) {
+ // If DP over USB4 then we need to check BW allocation
+ link->dpia_bw_alloc_config.link_max_bw = peak_bw;
+
+ link_dpia_send_bw_alloc_request(link, peak_bw);
+ } else
+ dpia_bw_alloc_unplug(link);
}
- //2. Cold Unplug
- else if (!link->hpd_status)
- dpia_bw_alloc_unplug(link);
-
-out:
- return ret;
}
-bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+
+void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
- bool ret = false;
- uint8_t timeout = 10;
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
- DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
+ DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d",
__func__, link->link_index, link->hpd_status,
- link->dpia_bw_alloc_config.allocated_bw, req_bw);
+ link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.estimated_bw,
+ req_bw);
+
+ if (link_dp_is_bw_alloc_available(link))
+ link_dpia_send_bw_alloc_request(link, req_bw);
+ else
+ DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
+}
- if (!get_bw_alloc_proceed_flag(link))
- goto out;
+uint32_t link_dpia_get_dp_overhead(const struct dc_link *link)
+{
+ uint32_t link_dp_overhead = 0;
- set_usb4_req_bw_req(link, req_bw);
- do {
- if (timeout > 0)
- timeout--;
- else
- break;
- msleep(10);
- } while (!get_cm_response_ready_flag(link));
+ if ((link->type == dc_connection_mst_branch) &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
+ * MST overhead is 1/64 of link bandwidth (excluding any overhead)
+ */
+ const struct dc_link_settings *link_cap = dc_link_get_link_cap(link);
- if (timeout)
- ret = true;
+ if (link_cap) {
+ uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
+ (uint32_t)link_cap->lane_count *
+ LINK_RATE_REF_FREQ_IN_KHZ * 8;
+ link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT)
+ + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0);
+ }
+ }
-out:
- DC_LOG_DEBUG("%s: EXIT: timeout(%d), ret(%d)\n", __func__, timeout, ret);
- return ret;
+ return link_dp_overhead;
}
-bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
+/*
+ * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
+ * And then validate if the required bandwidth is within the router's capacity.
+ *
+ * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
+ * @count: number of DPIA validation sets
+ *
+ * return: true if validation is succeeded
+ */
+bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count)
{
- bool ret = true;
- int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
- uint8_t lowest_dpia_index, i, hr_index;
+ uint32_t granularity_Gbps;
+ const struct dc_link *link;
+ uint32_t link_bw_granularity;
+ uint32_t link_required_bw;
+ struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 };
+ uint8_t i;
+ bool is_success = true;
+ uint8_t router_count = 0;
+
+ if ((dpia_link_sets == NULL) || (count == 0))
+ return is_success;
+
+ // Iterate through each DP tunneling link (DPIA).
+ // Aggregate its bandwidth requirements onto the respective USB4 router.
+ for (i = 0; i < count; i++) {
+ link = dpia_link_sets[i].link;
+ link_required_bw = dpia_link_sets[i].required_bw;
+ const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings;
+
+ if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0)
+ break;
- if (!num_dpias || num_dpias > MAX_DPIA_NUM)
- return ret;
+ if (link->type == dc_connection_mst_branch)
+ link_required_bw += link_dpia_get_dp_overhead(link);
- lowest_dpia_index = get_lowest_dpia_index(link[0]);
+ granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity);
+ link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps +
+ ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0);
- /* get total Host Router BW with granularity for the given modes */
- for (i = 0; i < num_dpias; ++i) {
- int granularity_Gbps = 0;
- int bw_granularity = 0;
+ // Find or add the USB4 router associated with the current DPIA link
+ for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) {
+ if (router_sets[j].is_valid == false) {
+ router_sets[j].is_valid = true;
+ router_sets[j].cm_id = dp_tunnel_settings->cm_id;
+ router_count++;
+ }
- if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
- continue;
+ if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) {
+ uint32_t remaining_bw =
+ dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw;
- if (link[i]->link_index < lowest_dpia_index)
- continue;
+ router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw;
- granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
- bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
- ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
+ if (remaining_bw > router_sets[j].remaining_bw)
+ router_sets[j].remaining_bw = remaining_bw;
- hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
- bw_needed_per_hr[hr_index] += bw_granularity;
- }
+ // Get the max estimated BW within the same CM_ID
+ if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw)
+ router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw;
- /* validate against each Host Router max BW */
- for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
- if (bw_needed_per_hr[hr_index]) {
- host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
- if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
- ret = false;
+ router_sets[j].required_bw += link_bw_granularity;
+ router_sets[j].dpia_count++;
break;
}
}
}
- return ret;
-}
+ // Validate bandwidth for each unique router found.
+ for (i = 0; i < router_count; i++) {
+ uint32_t total_bw = 0;
-int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
-{
- int dp_overhead = 0, link_mst_overhead = 0;
+ if (router_sets[i].is_valid == false)
+ break;
- if (!get_bw_alloc_proceed_flag((link)))
- return dp_overhead;
+ // Determine the total available bandwidth for the current router based on aggregated data
+ if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0))
+ total_bw = router_sets[i].estimated_bw;
+ else
+ total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw;
- /* if its mst link, add MTPH overhead */
- if ((link->type == dc_connection_mst_branch) &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
- * MST overhead is 1/64 of link bandwidth (excluding any overhead)
- */
- const struct dc_link_settings *link_cap =
- dc_link_get_link_cap(link);
- uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
- (uint32_t)link_cap->lane_count *
- LINK_RATE_REF_FREQ_IN_KHZ * 8;
- link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
+ if (router_sets[i].required_bw > total_bw) {
+ is_success = false;
+ break;
+ }
}
- /* add all the overheads */
- dp_overhead = link_mst_overhead;
-
- return dp_overhead;
+ return is_success;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 3b6d8494f9d5..30cd8e2b9d35 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -26,12 +26,8 @@
#ifndef DC_INC_LINK_DP_DPIA_BW_H_
#define DC_INC_LINK_DP_DPIA_BW_H_
-#include "link.h"
+#include "link_service.h"
-/* Number of Host Routers per motherboard is 2 */
-#define MAX_HR_NUM 2
-/* Number of DPIA per host router is 2 */
-#define MAX_DPIA_NUM (MAX_HR_NUM * 2)
/*
* Host Router BW type
@@ -42,14 +38,24 @@ enum bw_type {
HOST_ROUTER_BW_INVALID,
};
+struct usb4_router_validation_set {
+ bool is_valid;
+ uint8_t cm_id;
+ uint8_t dpia_count;
+ uint32_t required_bw;
+ uint32_t allocated_bw;
+ uint32_t estimated_bw;
+ uint32_t remaining_bw;
+};
+
/*
- * Enable BW Allocation Mode Support from the DP-Tx side
+ * Enable USB4 DP BW allocation mode
*
* @link: pointer to the dc_link struct instance
*
* return: SUCCESS or FAILURE
*/
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link);
/*
* Allocates only what the stream needs for bw, so if:
@@ -59,9 +65,8 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
* @link: pointer to the dc_link struct instance
* @req_bw: Bw requested by the stream
*
- * return: true if allocated successfully
*/
-bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
+void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
/*
* Handle the USB4 BW Allocation related functionality here:
@@ -71,41 +76,37 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
* @link: pointer to the dc_link struct instance
* @peak_bw: Peak bw used by the link/sink
*
- * return: allocated bw else return 0
*/
-int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
+void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
/*
- * Handle function for when the status of the Request above is complete.
- * We will find out the result of allocating on CM and update structs.
+ * Obtain all the DP overheads in dp tunneling for the dpia link
*
* @link: pointer to the dc_link struct instance
- * @bw: Allocated or Estimated BW depending on the result
- * @result: Response type
*
- * return: none
+ * return: DP overheads in DP tunneling
*/
-void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result);
+uint32_t link_dpia_get_dp_overhead(const struct dc_link *link);
/*
- * Handle the validation of total BW here and confirm that the bw used by each
- * DPIA doesn't exceed available BW for each host router (HR)
+ * Handle DP BW allocation status register
*
- * @link[]: array of link pointer to all possible DPIA links
- * @bw_needed[]: bw needed for each DPIA link based on timing
- * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM
+ * @link: pointer to the dc_link struct instance
+ * @status: content of DP tunneling status register
*
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ * return: none
*/
-bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
+void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status);
/*
- * Obtain all the DP overheads in dp tunneling for the dpia link
+ * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
*
- * @link: pointer to the dc_link struct instance
+ * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
+ * @count: number of DPIA validation sets
*
- * return: DP overheads in DP tunneling
+ * return: true if validation is succeeded
*/
-int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count);
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 48abeaa88678..4b01ab0a5a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -37,6 +37,7 @@
#include "link/accessories/link_dp_trace.h"
#include "link/link_dpms.h"
#include "dm_helpers.h"
+#include "link_dp_dpia_bw.h"
#define DC_LOGGER \
link->ctx->logger
@@ -226,6 +227,12 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) {
bool allow_active;
+ link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
+
+ /* Increment desync error counter if a desync error is detected */
+ if (replay_configuration.bits.DESYNC_ERROR_STATUS)
+ link->replay_settings.replay_desync_error_fail_count++;
+
if (link->replay_settings.config.force_disable_desync_error_check)
return;
@@ -281,6 +288,30 @@ void dp_handle_link_loss(struct dc_link *link)
}
}
+static void dp_handle_tunneling_irq(struct dc_link *link)
+{
+ enum dc_status retval;
+ uint8_t tunneling_status = 0;
+
+ retval = core_link_read_dpcd(
+ link, DP_TUNNELING_STATUS,
+ &tunneling_status,
+ sizeof(tunneling_status));
+
+ if (retval == DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: Got DP tunneling status on link %d status=0x%x",
+ __func__, link->link_index, tunneling_status);
+
+ if (tunneling_status & DP_TUNNELING_BW_ALLOC_BITS_MASK)
+ link_dp_dpia_handle_bw_alloc_status(link, tunneling_status);
+ }
+
+ tunneling_status = DP_TUNNELING_IRQ;
+ core_link_write_dpcd(
+ link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &tunneling_status, 1);
+}
+
static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data)
{
enum dc_status retval;
@@ -314,13 +345,19 @@ enum dc_status dp_read_hpd_rx_irq_data(
*
* For DP 1.4 we need to read those from 2002h range.
*/
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) {
retval = core_link_read_dpcd(
link,
DP_SINK_COUNT,
irq_data->raw,
- sizeof(union hpd_irq_data));
- else {
+ DP_SINK_STATUS - DP_SINK_COUNT + 1);
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
+ retval = core_link_read_dpcd(
+ link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &irq_data->bytes.link_service_irq_esi0.raw, 1);
+ }
+ } else {
/* Read 14 bytes in a single read and then copy only the required fields.
* This is more efficient than doing it in two separate AUX reads. */
@@ -341,6 +378,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.link_service_irq_esi0.raw = tmp[DP_LINK_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
/*
* This display doesn't have correct values in DPCD200Eh.
@@ -360,10 +398,12 @@ bool dp_should_allow_hpd_rx_irq(const struct dc_link *link)
* Don't handle RX IRQ unless one of following is met:
* 1) The link is established (cur_link_settings != unknown)
* 2) We know we're dealing with a branch device, SST or MST
+ * 3) The link is bw_alloc enabled.
*/
if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- is_dp_branch_device(link))
+ is_dp_branch_device(link) ||
+ link->dpia_bw_alloc_config.bw_alloc_enabled)
return true;
return false;
@@ -408,7 +448,8 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)
link->skip_fallback_on_link_loss = true;
device_service_clear.bits.AUTOMATED_TEST = 1;
@@ -482,6 +523,11 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
+ if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ)
+ dp_handle_tunneling_irq(link);
+ }
+
if (link->type == dc_connection_sst_branch &&
hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
!= link->dpcd_sink_count)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
index ac33730fedd4..87516fb3b45a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_IRQ_HANDLER_H__
#define __DC_LINK_DP_IRQ_HANDLER_H__
-#include "link.h"
+#include "link_service.h"
bool dp_parse_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index bafa52a0165a..49521ac4b0e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -75,7 +75,8 @@ void dp_disable_link_phy(struct dc_link *link,
struct dc *dc = link->ctx->dc;
if (!link->wa_flags.dp_keep_receiver_powered &&
- !link->skip_implict_edp_power_control)
+ !link->skip_implict_edp_power_control &&
+ link->type != dc_connection_none)
dpcd_write_rx_power_ctrl(link, false);
dc->hwss.disable_link_output(link, link_res, signal);
@@ -104,7 +105,7 @@ void dp_set_hw_lane_settings(
// Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b
if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
!is_immediate_downstream(link, offset) &&
- (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
+ (!((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))
return;
@@ -141,11 +142,12 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
* if the sink supports it and leave it enabled on link.
* If FEC is not supported, disable it.
*/
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
enum dc_status status = DC_OK;
uint8_t fec_config = 0;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (link_enc->funcs->fec_set_ready == NULL)
return DC_NOT_SUPPORTED;
@@ -163,8 +165,9 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
} else {
if (link->fec_state == dc_link_fec_ready) {
fec_config = 0;
- core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
- &fec_config, sizeof(fec_config));
+ if (link->type != dc_connection_none)
+ core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
+ &fec_config, sizeof(fec_config));
link_enc->funcs->fec_set_ready(link_enc, false);
link->fec_state = dc_link_fec_not_ready;
@@ -174,13 +177,14 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
return status;
}
-void dp_set_fec_enable(struct dc_link *link, bool enable)
+void dp_set_fec_enable(struct dc_link *link, const struct link_resource *link_res, bool enable)
{
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
- if (link_enc->funcs->fec_set_enable == NULL)
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc == NULL || link_enc->funcs == NULL || link_enc->funcs->fec_set_enable == NULL)
return;
if (enable && dp_should_enable_fec(link)) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
index 1eb0619d6710..58e154494582 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_PHY_H__
#define __DC_LINK_DP_PHY_H__
-#include "link.h"
+#include "link_service.h"
void dp_enable_link_phy(
struct dc_link *link,
const struct link_resource *link_res,
@@ -52,7 +52,8 @@ void dp_set_drive_settings(
enum dc_status dp_set_fec_ready(struct dc_link *link,
const struct link_resource *link_res, bool ready);
-void dp_set_fec_enable(struct dc_link *link, bool enable);
+void dp_set_fec_enable(struct dc_link *link,
+ const struct link_resource *link_res, bool enable);
void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 754c895e1bfb..08e2b572e0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -736,10 +736,12 @@ void override_training_settings(
lt_settings->pre_emphasis = overrides->pre_emphasis;
if (overrides->post_cursor2 != NULL)
lt_settings->post_cursor2 = overrides->post_cursor2;
+ if (link->wa_flags.force_dp_ffe_preset && !dp_is_lttpr_present(link))
+ lt_settings->ffe_preset = &link->forced_dp_ffe_preset;
if (overrides->ffe_preset != NULL)
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
@@ -783,7 +785,6 @@ void override_training_settings(
lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
-
}
enum dc_dp_training_pattern decide_cr_training_pattern(
@@ -799,19 +800,23 @@ enum dc_dp_training_pattern decide_cr_training_pattern(
}
enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
struct encoder_feature_support *enc_caps;
struct dpcd_caps *rx_caps = &link->dpcd_caps;
enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
- enc_caps = &link_enc->features;
-
switch (link_dp_get_encoding_format(link_settings)) {
case DP_8b_10b_ENCODING:
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (!link_enc)
+ break;
+
+ enc_caps = &link_enc->features;
if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
@@ -884,13 +889,14 @@ void dp_decide_lane_settings(
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings)
{
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- decide_8b_10b_training_settings(link, link_settings, lt_settings);
+ decide_8b_10b_training_settings(link, link_res, link_settings, lt_settings);
else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING)
- decide_128b_132b_training_settings(link, link_settings, lt_settings);
+ decide_128b_132b_training_settings(link, link_res, link_settings, lt_settings);
}
@@ -1012,7 +1018,12 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
{
enum dc_status status;
uint8_t sink_status = 0;
- uint8_t i;
+ uint32_t i;
+ uint8_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t intra_hop_disable_time_ms = (lttpr_count > 0 ? lttpr_count * 300 : 10);
+
+ // Each hop could theoretically take over 256ms (max 128b/132b AUX RD INTERVAL)
+ // To be safe, allow 300ms per LTTPR and 10ms for no LTTPR case
/* clear training pattern set */
status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
@@ -1022,7 +1033,7 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
if (encoding == DP_128b_132b_ENCODING) {
/* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < intra_hop_disable_time_ms; i++) {
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
@@ -1554,6 +1565,7 @@ enum link_training_result dp_perform_link_training(
/* decide training settings */
dp_decide_training_settings(
link,
+ link_res,
link_settings,
&lt_settings);
@@ -1567,14 +1579,15 @@ enum link_training_result dp_perform_link_training(
/* configure link prior to entering training mode */
dpcd_configure_lttpr_mode(link, &lt_settings);
- dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
dpcd_configure_channel_coding(link, &lt_settings);
/* enter training mode:
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
@@ -1716,6 +1729,15 @@ bool perform_link_training_with_retries(
break;
}
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ !link->dc->config.enable_dpia_pre_training) {
+ if (j == (attempts - 1))
+ do_fallback = true;
+ else
+ do_fallback = false;
+ }
+
if (j == (attempts - 1)) {
DC_LOG_WARNING(
"%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n",
@@ -1780,13 +1802,10 @@ bool perform_link_training_with_retries(
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
(cur_link_settings.lane_count <= LANE_COUNT_ONE));
- if (is_link_bw_low) {
+ if (is_link_bw_low)
DC_LOG_WARNING(
"%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
__func__, link->link_index, req_bw, link_bw);
-
- return false;
- }
}
msleep(delay_between_attempts);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 0b18aa35c33c..ce52de22ab7a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_TRAINING_H__
#define __DC_LINK_DP_TRAINING_H__
-#include "link.h"
+#include "link_service.h"
bool perform_link_training_with_retries(
const struct dc_link_settings *link_setting,
@@ -104,6 +104,7 @@ void start_clock_recovery_pattern_early(struct dc_link *link,
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings);
@@ -117,6 +118,7 @@ enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings);
enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings);
enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
index db87cfe37b5c..11565f187ac7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
@@ -204,6 +204,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
struct link_training_settings legacy_settings;
decide_8b_10b_training_settings(link,
+ link_res,
&lt_settings->link_settings,
&legacy_settings);
return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
@@ -227,6 +228,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
}
void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings)
{
@@ -238,7 +240,7 @@ void decide_128b_132b_training_settings(struct dc_link *link,
LINK_SPREAD_05_DOWNSPREAD_30KHZ;
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_res, link_settings);
lt_settings->eq_pattern_time = 2500;
lt_settings->eq_wait_time_limit = 400000;
lt_settings->eq_loop_count_limit = 20;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
index 2147f24efc8b..901a42edafa1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
@@ -34,6 +34,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
struct link_training_settings *lt_settings);
void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 3bdce32a85e3..66d0fb1b9b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -35,22 +35,41 @@
#define DC_LOGGER \
link->ctx->logger
+static void get_default_8b_10b_lttpr_aux_rd_interval(
+ union training_aux_rd_interval *training_rd_interval)
+{
+ /* LTTPR are required to program DPCD 0000Eh to 0x4 (16ms) upon AUX
+ * read reply to this register. Since old sinks with DPCD rev 1.1
+ * and earlier may not support this register, assume the mandatory
+ * value is programmed by the LTTPR to avoid AUX timeout issues.
+ */
+ training_rd_interval->raw = 0x4;
+}
+
static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
- const struct dc_link_settings *link_settings)
+ const struct dc_link_settings *link_settings,
+ enum lttpr_mode lttpr_mode)
{
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 100;
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
- if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
- link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- core_link_read_dpcd(
- link,
- DP_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
- if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12)
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ else if (dp_is_lttpr_present(link))
+ get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval);
+
+ if (training_rd_interval.raw != 0) {
+ if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT)
+ wait_in_micro_secs = 400;
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
}
return wait_in_micro_secs;
}
@@ -68,13 +87,15 @@ static uint32_t get_eq_training_aux_rd_interval(
DP_128B132B_TRAINING_AUX_RD_INTERVAL,
(uint8_t *)&training_rd_interval,
sizeof(training_rd_interval));
- } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
- link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- core_link_read_dpcd(
- link,
- DP_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
+ } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12)
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ else if (dp_is_lttpr_present(link))
+ get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval);
}
switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
@@ -90,7 +111,8 @@ static uint32_t get_eq_training_aux_rd_interval(
}
void decide_8b_10b_training_settings(
- struct dc_link *link,
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings)
{
@@ -110,16 +132,24 @@ void decide_8b_10b_training_settings(
*/
lt_settings->link_settings.link_spread = link->dp_ss_off ?
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_res, link_setting);
lt_settings->enhanced_framing = 1;
lt_settings->should_set_fec_ready = true;
lt_settings->disallow_per_lane_settings = true;
lt_settings->always_match_dpcd_with_hw_lane_settings = true;
lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
+ lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ /* Some embedded LTTPRs rely on receiving TPS2 before LT to interop reliably with sensitive VGA dongles
+ * This allows these LTTPRs to minimize freq/phase and skew variation during lock and deskew sequences
+ */
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) ==
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2) {
+ lt_settings->lttpr_early_tps2 = true;
+ }
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -151,6 +181,42 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
}
+static void set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t lttpr_count)
+{
+ /* Vendor-specific LTTPR early TPS2 sequence:
+ * 1. Output TPS2
+ * 2. Wait 400us
+ * 3. Set link settings as usual
+ * 4. Write TPS1 to DP_TRAINING_PATTERN_SET_PHY_REPEATERx targeting LTTPR closest to host
+ * 5. Wait 1ms
+ * 6. Begin link training as usual
+ * */
+
+ uint32_t closest_lttpr_address_offset = dp_get_closest_lttpr_offset(lttpr_count);
+
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = 1;
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE = 1;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS2. Wait 400us.\n", __func__);
+
+ dp_set_hw_training_pattern(link, link_res, DP_TRAINING_PATTERN_SEQUENCE_2, DPRX);
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+
+ udelay(400);
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + closest_lttpr_address_offset, &dpcd_pattern.raw, 1);
+
+ udelay(1000);
+ }
+
enum link_training_result perform_8b_10b_clock_recovery_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -361,7 +427,7 @@ enum link_training_result dp_perform_8b_10b_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t repeater_cnt;
+ uint8_t repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
uint8_t repeater_id;
uint8_t lane = 0;
@@ -369,14 +435,16 @@ enum link_training_result dp_perform_8b_10b_link_training(
start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
/* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
+ if (lt_settings->lttpr_early_tps2)
+ set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(link, link_res, lt_settings, repeater_cnt);
+ else
+ dpcd_set_link_settings(link, lt_settings);
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
*/
- repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
index d26de15ce954..ea0de701d83f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
@@ -54,7 +54,8 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
void decide_8b_10b_training_settings(
- struct dc_link *link,
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
index 4c6b886a9da8..f99d26290bc0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
@@ -39,6 +39,7 @@ bool dp_perform_link_training_skip_aux(
dp_decide_training_settings(
link,
+ link_res,
link_setting,
&lt_settings);
override_training_settings(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 39e4b7dc9588..603537ffd128 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -110,6 +110,7 @@ static enum link_training_result dpia_configure_link(
dp_decide_training_settings(
link,
+ link_res,
link_setting,
lt_settings);
@@ -129,11 +130,14 @@ static enum link_training_result dpia_configure_link(
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
- if (link->preferred_training_settings.fec_enable != NULL)
- fec_enable = *link->preferred_training_settings.fec_enable;
- else
- fec_enable = true;
- status = dp_set_fec_ready(link, link_res, fec_enable);
+ if (link_dp_get_encoding_format(link_setting) == DP_8b_10b_ENCODING) {
+ if (link->preferred_training_settings.fec_enable != NULL)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+ status = dp_set_fec_ready(link, link_res, fec_enable);
+ }
+
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index ccf8096dde29..ce174ce5579c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -270,7 +270,8 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
rate = get_dpcd_link_rate(&lt_settings->link_settings);
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) {
+ // Only perform toggle if FIXED_VS LTTPR reports no IEEE OUI
+ if (memcmp("\x0,\x0,\x0", &link->dpcd_caps.lttpr_caps.lttpr_ieee_oui[0], 3) == 0) {
/* Vendor specific: Toggle link rate */
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
index 08d787a1e451..c2717c678c72 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
@@ -25,7 +25,7 @@
#ifndef __LINK_DPCD_H__
#define __LINK_DPCD_H__
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
enum dc_status core_link_read_dpcd(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index e0e3bb865359..c56e69eb27ef 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -161,6 +161,9 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds && !link->dpcd_caps.panel_luminance_control)
+ return true;
+
// use internal backlight control if dmub capabilities are not present
if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX &&
!link->dc->caps.dmub_caps.aux_backlight_support) {
@@ -173,6 +176,15 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
target_luminance = (struct target_luminance_value *)&backlight_millinits;
+ //make sure we disable AMD ABC first.
+ core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_enable, sizeof(uint8_t));
+ if (backlight_enable) {
+ backlight_enable = 0;
+ core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_enable, 1);
+ }
+
core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
&backlight_enable, sizeof(uint8_t));
@@ -193,10 +205,22 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
uint8_t backlight_control = isHDR ? 1 : 0;
+ uint8_t backlight_enable = 0;
+
// OLEDs have no PWM, they can only use AUX
if (link->dpcd_sink_ext_caps.bits.oled == 1)
backlight_control = 1;
+ //make sure we disable VESA ABC first.
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable, sizeof(uint8_t));
+
+ if (backlight_enable & DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE) {
+ backlight_enable &= ~DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+ core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable, sizeof(backlight_enable));
+ }
+
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *)(&dpcd_backlight_set),
sizeof(dpcd_backlight_set)) != DC_OK)
@@ -222,6 +246,8 @@ bool edp_get_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds)
+ return false;
if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
dpcd_backlight_get.raw,
sizeof(union dpcd_source_backlight_get)))
@@ -248,6 +274,8 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable)
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds)
+ return true;
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
&backlight_enable, 1) != DC_OK)
return false;
@@ -524,7 +552,7 @@ bool edp_set_backlight_level(const struct dc_link *link,
struct dc *dc = link->ctx->dc;
uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16;
uint32_t frame_ramp = backlight_level_params->frame_ramp;
- DC_LOGGER_INIT(link->ctx->logger);
+
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
@@ -675,6 +703,32 @@ bool edp_setup_psr(struct dc_link *link,
if (!link)
return false;
+ /* This is a workaround: some vendors require the source to
+ * read the PSR cap; otherwise, the vendor's PSR feature will
+ * fall back to its default behavior, causing a misconfiguration
+ * of this feature.
+ */
+ if (link->panel_config.psr.read_psrcap_again) {
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_SUPPORT,
+ &link->dpcd_caps.psr_info.psr_version,
+ sizeof(link->dpcd_caps.psr_info.psr_version));
+ }
+
+ //Clear PSR cfg
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_EN_CFG,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+
dc = link->ctx->dc;
dmcu = dc->res_pool->dmcu;
psr = dc->res_pool->psr;
@@ -685,9 +739,6 @@ bool edp_setup_psr(struct dc_link *link,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
psr_configuration.bits.ENABLE = 1;
psr_configuration.bits.CRC_VERIFICATION = 1;
psr_configuration.bits.FRAME_CAPTURE_INDICATION =
@@ -833,6 +884,8 @@ bool edp_setup_psr(struct dc_link *link,
psr_context->dsc_slice_height = psr_config->dsc_slice_height;
+ psr_context->os_request_force_ffu = psr_config->os_request_force_ffu;
+
if (psr) {
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
link, psr_context, panel_inst);
@@ -896,7 +949,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
/* Set power optimization flag */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
if (replay != NULL && link->replay_settings.replay_feature_enabled &&
- replay->funcs->replay_set_power_opt) {
+ replay->funcs->replay_set_power_opt) {
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
link->replay_settings.replay_power_opt_active = *power_opts;
}
@@ -931,7 +984,117 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
return true;
}
-bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+static bool edp_setup_panel_replay(struct dc_link *link, const struct dc_stream_state *stream)
+{
+ /* To-do: Setup Replay */
+ struct dc *dc;
+ struct dmub_replay *replay;
+ int i;
+ unsigned int panel_inst;
+ struct replay_context replay_context = { 0 };
+ unsigned int lineTimeInNs = 0;
+
+ union panel_replay_enable_and_configuration_1 pr_config_1 = { 0 };
+ union panel_replay_enable_and_configuration_2 pr_config_2 = { 0 };
+
+ union dpcd_alpm_configuration alpm_config;
+
+ replay_context.controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ //Clear Panel Replay enable & config
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1,
+ (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t));
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2,
+ (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t));
+
+ if (!(link->replay_settings.config.replay_supported))
+ return false;
+
+ dc = link->ctx->dc;
+
+ //not sure should keep or not
+ replay = dc->res_pool->replay;
+
+ if (!replay)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel;
+ replay_context.digbe_inst = link->link_enc->transmitter;
+ replay_context.digfe_inst = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ replay_context.controllerId =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ lineTimeInNs =
+ ((stream->timing.h_total * 1000000) /
+ (stream->timing.pix_clk_100hz / 10)) + 1;
+
+ replay_context.line_time_in_ns = lineTimeInNs;
+
+ link->replay_settings.replay_feature_enabled =
+ replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
+
+ if (link->replay_settings.replay_feature_enabled) {
+ pr_config_1.bits.PANEL_REPLAY_ENABLE = 1;
+ pr_config_1.bits.PANEL_REPLAY_CRC_ENABLE = 1;
+ pr_config_1.bits.IRQ_HPD_ASSDP_MISSING = 1;
+ pr_config_1.bits.IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR = 1;
+ pr_config_1.bits.IRQ_HPD_RFB_ERROR = 1;
+ pr_config_1.bits.IRQ_HPD_ACTIVE_FRAME_CRC_ERROR = 1;
+ pr_config_1.bits.PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE = 1;
+ pr_config_1.bits.PANEL_REPLAY_EARLY_TRANSPORT_ENABLE = 1;
+
+ pr_config_2.bits.SINK_REFRESH_RATE_UNLOCK_GRANTED = 0;
+ pr_config_2.bits.SU_Y_GRANULARITY_EXT_VALUE_ENABLED = 0;
+ pr_config_2.bits.SU_REGION_SCAN_LINE_CAPTURE_INDICATION = 0;
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1,
+ (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t));
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2,
+ (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t));
+
+ //ALPM Setup
+ memset(&alpm_config, 0, sizeof(alpm_config));
+ alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
+
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ alpm_config.bits.ALPM_MODE_SEL = 1;
+ alpm_config.bits.ACDS_PERIOD_DURATION = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_RECEIVER_ALPM_CONFIG,
+ &alpm_config.raw,
+ sizeof(alpm_config.raw));
+ }
+
+ return true;
+}
+
+static bool edp_setup_freesync_replay(struct dc_link *link, const struct dc_stream_state *stream)
{
/* To-do: Setup Replay */
struct dc *dc;
@@ -950,6 +1113,16 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
if (!link)
return false;
+ //Clear Replay config
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_SINK_PR_ENABLE_AND_CONFIGURATION,
+ (uint8_t *)&(replay_config.raw), sizeof(uint8_t));
+
+ if (!(link->replay_settings.config.replay_supported))
+ return false;
+
+ link->replay_settings.config.replay_error_status.raw = 0;
+
dc = link->ctx->dc;
replay = dc->res_pool->replay;
@@ -982,6 +1155,8 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
replay_context.line_time_in_ns = lineTimeInNs;
+ replay_context.os_request_force_ffu = link->replay_settings.config.os_request_force_ffu;
+
link->replay_settings.replay_feature_enabled =
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
if (link->replay_settings.replay_feature_enabled) {
@@ -995,7 +1170,13 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
(uint8_t *)&(replay_config.raw), sizeof(uint8_t));
memset(&alpm_config, 0, sizeof(alpm_config));
- alpm_config.bits.ENABLE = 1;
+ alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
+
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ alpm_config.bits.ALPM_MODE_SEL = 1;
+ alpm_config.bits.ACDS_PERIOD_DURATION = 0;
+ }
+
dm_helpers_dp_write_dpcd(
link->ctx,
link,
@@ -1003,9 +1184,24 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
&alpm_config.raw,
sizeof(alpm_config.raw));
}
+
+ link->replay_settings.config.replay_video_conferencing_optimization_enabled = false;
+
return true;
}
+bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+{
+ if (!link)
+ return false;
+ if (link->replay_settings.config.replay_version == DC_VESA_PANEL_REPLAY)
+ return edp_setup_panel_replay(link, stream);
+ else if (link->replay_settings.config.replay_version == DC_FREESYNC_REPLAY)
+ return edp_setup_freesync_replay(link, stream);
+ else
+ return false;
+}
+
/*
* This is general Interface for Replay to set an 32 bit variable to dmub
* replay_FW_Message_type: Indicates which instruction or variable pass to DMUB
@@ -1036,7 +1232,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1048,9 +1244,11 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
- replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst);
+ if (coasting_vtotal && (link->replay_settings.coasting_vtotal != coasting_vtotal ||
+ link->replay_settings.frame_skip_number != frame_skip_number)) {
+ replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst, frame_skip_number);
link->replay_settings.coasting_vtotal = coasting_vtotal;
+ link->replay_settings.frame_skip_number = frame_skip_number;
}
return true;
@@ -1078,7 +1276,7 @@ bool edp_replay_residency(const struct dc_link *link,
}
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal)
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1089,13 +1287,16 @@ bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
/* Only both power and coasting vtotal changed, this func could return true */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts &&
- coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+ (coasting_vtotal &&
+ (link->replay_settings.coasting_vtotal != coasting_vtotal ||
+ link->replay_settings.frame_skip_number != frame_skip_number))) {
if (link->replay_settings.replay_feature_enabled &&
replay->funcs->replay_set_power_opt_and_coasting_vtotal) {
replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay,
- *power_opts, panel_inst, coasting_vtotal);
+ *power_opts, panel_inst, coasting_vtotal, frame_skip_number);
link->replay_settings.replay_power_opt_active = *power_opts;
link->replay_settings.coasting_vtotal = coasting_vtotal;
+ link->replay_settings.frame_skip_number = frame_skip_number;
} else
return false;
} else
@@ -1111,11 +1312,11 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
struct abm *abm = NULL;
for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx.stream;
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
if (stream && stream->link == link) {
- abm = pipe_ctx.stream_res.abm;
+ abm = pipe_ctx->stream_res.abm;
break;
}
}
@@ -1151,6 +1352,16 @@ int edp_get_target_backlight_pwm(const struct dc_link *link)
return (int) abm->funcs->get_target_backlight(abm);
}
+bool is_smartmux_suported(struct dc_link *link)
+{
+ if (link->dc->caps.is_apu)
+ return false;
+ if (!link->dc->config.smart_mux_version)
+ return false;
+
+ return true;
+}
+
static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
struct link_resource *link_res, bool enable)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index bcfa6ac5d4e7..dd79c7cd2828 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -25,11 +25,12 @@
#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__
#define __DC_LINK_EDP_PANEL_CONTROL_H__
-#include "link.h"
+#include "link_service.h"
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+bool is_smartmux_suported(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,
@@ -58,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const enum pr_residency_mode mode);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
index 4fb526b264f9..af529328ba17 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_HPD_H__
#define __DC_LINK_HPD_H__
-#include "link.h"
+#include "link_service.h"
enum hpd_source_id get_hpd_line(struct dc_link *link);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
index eab196c57c6c..2d4b7a85847d 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
@@ -50,5 +50,5 @@ MMHUBBUB_DCN35 = dcn35_mmhubbub.o
AMD_DAL_MMHUBBUB_DCN35 = $(addprefix $(AMDDALPATH)/dc/mmhubbub/dcn35/,$(MMHUBBUB_DCN35))
AMD_DISPLAY_FILES += $(AMD_DAL_MMHUBBUB_DCN35)
-
endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
index 259a98e4ee2c..2a422e223bf2 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
@@ -284,7 +284,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf);
- memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height);
+ memcpy(dest_luma_buffer, luma_buffer, (size_t)mcif_params->luma_pitch * dest_height);
memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2);
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
index f2f55565e98a..b23c64004dd5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
@@ -142,22 +142,6 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
return NULL;
}
-bool mpc1_is_mpcc_idle(struct mpc *mpc, int mpcc_id)
-{
- struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- unsigned int top_sel;
- unsigned int opp_id;
- unsigned int idle;
-
- REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
- REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
- REG_GET(MPCC_STATUS[mpcc_id], MPCC_IDLE, &idle);
- if (top_sel == 0xf && opp_id == 0xf && idle)
- return true;
- else
- return false;
-}
-
void mpc1_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
index dbfffc6383dc..874e36e39e1b 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
@@ -173,10 +173,6 @@ void mpc1_update_stereo_mix(
struct mpcc_sm_cfg *sm_cfg,
int mpcc_id);
-bool mpc1_is_mpcc_idle(
- struct mpc *mpc,
- int mpcc_id);
-
void mpc1_assert_mpcc_idle_before_connect(
struct mpc *mpc,
int mpcc_id);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index 85298b8a1b5e..6bfd2c1294e5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -1514,6 +1514,21 @@ static void mpc3_read_mpcc_state(
MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
}
+void mpc3_read_reg_state(
+ struct mpc *mpc,
+ int mpcc_inst, struct dcn_mpc_reg_state *mpc_reg_state)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc_reg_state->mpcc_bot_sel = REG_READ(MPCC_BOT_SEL[mpcc_inst]);
+ mpc_reg_state->mpcc_control = REG_READ(MPCC_CONTROL[mpcc_inst]);
+ mpc_reg_state->mpcc_ogam_control = REG_READ(MPCC_OGAM_CONTROL[mpcc_inst]);
+ mpc_reg_state->mpcc_opp_id = REG_READ(MPCC_OPP_ID[mpcc_inst]);
+ mpc_reg_state->mpcc_status = REG_READ(MPCC_STATUS[mpcc_inst]);
+ mpc_reg_state->mpcc_top_sel = REG_READ(MPCC_TOP_SEL[mpcc_inst]);
+
+}
+
static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -1544,6 +1559,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
.set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index 103f29900a2c..e2f147d17178 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
@@ -1096,6 +1096,11 @@ void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
+void mpc3_read_reg_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
+
void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst);
enum dc_lut_mode mpc3_get_ogam_current(
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index a0e9e9f0441a..83bbbf34bcac 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -30,7 +30,6 @@
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#include "dc.h"
-#include "dcn401/dcn401_mpc.h"
#define REG(reg)\
mpc30->mpc_regs->reg
@@ -370,275 +369,279 @@ void mpc32_program_shaper_luta_settings(
MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-}
-
-
-void mpc32_program_shaper_lutb_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id)
-{
- const struct gamma_curve *curve;
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
-
- curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+}
+
+
+void mpc32_program_shaper_lutb_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id)
+{
+ const struct gamma_curve *curve;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
}
@@ -875,7 +878,7 @@ void mpc32_set3dlut_ram10(
}
-static void mpc32_set_3dlut_mode(
+void mpc32_set_3dlut_mode(
struct mpc *mpc,
enum dc_lut_mode mode,
bool is_color_channel_12bits,
@@ -1017,9 +1020,8 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
- .set_movable_cm_location = mpc401_set_movable_cm_location,
- .populate_lut = mpc401_populate_lut,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
index 9622518826c9..8c9b20bcca85 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
@@ -391,4 +391,12 @@ void mpc32_select_3dlut_ram(
enum dc_lut_mode mode,
bool is_color_channel_12bits,
uint32_t mpcc_id);
+
+void mpc32_set_3dlut_mode(
+ struct mpc *mpc,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ bool is_lut_size17x17x17,
+ uint32_t mpcc_id);
+
#endif //__DC_MPCC_DCN32_H__
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index 37ab5a4eefc7..eeac13fdd6f5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -40,23 +40,13 @@
#define FN(reg_name, field_name) \
mpc401->mpc_shift->field_name, mpc401->mpc_mask->field_name
-static void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp_idx)
+void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp_idx)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
REG_SET(MPCC_MCM_3DLUT_FAST_LOAD_SELECT[mpcc_id], 0, MPCC_MCM_3DLUT_FL_SEL, hubp_idx);
}
-static void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_GET_3(MPCC_MCM_3DLUT_FAST_LOAD_STATUS[mpcc_id],
- MPCC_MCM_3DLUT_FL_DONE, done,
- MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW, soft_underflow,
- MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW, hard_underflow);
-}
-
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
@@ -297,14 +287,7 @@ void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_I
}
}
-void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1);
-}
-
-static void program_gamut_remap(
+void mpc_program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
const uint16_t *regval,
@@ -436,7 +419,7 @@ void mpc401_set_gamut_remap(
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) {
/* Bypass / Disable if type is bypass or hw */
- program_gamut_remap(mpc, mpcc_id, NULL,
+ mpc_program_gamut_remap(mpc, mpcc_id, NULL,
adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0);
} else {
struct fixed31_32 arr_matrix[12];
@@ -470,12 +453,12 @@ void mpc401_set_gamut_remap(
else
mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2;
- program_gamut_remap(mpc, mpcc_id, arr_reg_val,
+ mpc_program_gamut_remap(mpc, mpcc_id, arr_reg_val,
adjust->mpcc_gamut_remap_block_id, mode_select);
}
}
-static void read_gamut_remap(struct mpc *mpc,
+void mpc_read_gamut_remap(struct mpc *mpc,
int mpcc_id,
uint16_t *regval,
enum mpcc_gamut_remap_id gamut_remap_block_id,
@@ -571,9 +554,9 @@ void mpc401_get_gamut_remap(struct mpc *mpc,
struct mpc_grph_gamut_adjustment *adjust)
{
uint16_t arr_reg_val[12] = {0};
- uint32_t mode_select;
+ uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0;
- read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
+ mpc_read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -615,14 +598,13 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
- .get_3dlut_fast_load_status = mpc401_get_3dlut_fast_load_status,
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
- .program_3dlut_size = mpc401_program_3dlut_size,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index af44054c2477..fdc42f8ab3ff 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -63,7 +63,7 @@
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_B[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_B[MAX_MPCC]; \
uint32_t MPCC_MCM_3DLUT_FAST_LOAD_SELECT[MAX_MPCC]; \
- uint32_t MPCC_MCM_3DLUT_FAST_LOAD_STATUS[MAX_MPCC]
+ uint32_t MPCC_MCM_3DLUT_FAST_LOAD_STATUS[MAX_MPCC];
#define MPC_COMMON_MASK_SH_LIST_DCN4_01(mask_sh) \
MPC_COMMON_MASK_SH_LIST_DCN32(mask_sh), \
@@ -183,7 +183,7 @@ struct dcn401_mpc_mask {
};
struct dcn401_mpc_registers {
- MPC_REG_VARIABLE_LIST_DCN4_01;
+ MPC_REG_VARIABLE_LIST_DCN4_01
};
struct dcn401_mpc {
@@ -221,11 +221,6 @@ void mpc401_program_lut_read_write_control(
bool lut_bank_a,
int mpcc_id);
-void mpc401_program_3dlut_size(
- struct mpc *mpc,
- bool is_17x17x17,
- int mpcc_id);
-
void mpc401_set_gamut_remap(
struct mpc *mpc,
int mpcc_id,
@@ -236,4 +231,27 @@ void mpc401_get_gamut_remap(
int mpcc_id,
struct mpc_grph_gamut_adjustment *adjust);
+void mpc401_update_3dlut_fast_load_select(
+ struct mpc *mpc,
+ int mpcc_id,
+ int hubp_idx);
+
+void mpc_program_gamut_remap(
+ struct mpc *mpc,
+ unsigned int mpcc_id,
+ const uint16_t *regval,
+ enum mpcc_gamut_remap_id gamut_remap_block_id,
+ enum mpcc_gamut_remap_mode_select mode_select);
+
+void mpc_read_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ uint16_t *regval,
+ enum mpcc_gamut_remap_id gamut_remap_block_id,
+ uint32_t *mode_select);
+
+void mpc401_update_3dlut_fast_load_select(
+ struct mpc *mpc,
+ int mpcc_id,
+ int hubp_idx);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
index 71e9288d60ed..45d418636d0c 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
@@ -372,6 +372,17 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval);
}
+
+void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+}
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -392,7 +403,8 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
.dpg_is_pending = NULL,
- .opp_destroy = opp1_destroy
+ .opp_destroy = opp1_destroy,
+ .opp_read_reg_state = opp1_read_reg_state
};
void dcn10_opp_construct(struct dcn10_opp *oppn10,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
index c87de68a509e..38d0d530a9b7 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
@@ -63,7 +63,8 @@
uint32_t OPPBUF_CONTROL1; \
uint32_t OPPBUF_3D_PARAMETERS_0; \
uint32_t OPPBUF_3D_PARAMETERS_1; \
- uint32_t OPP_PIPE_CONTROL
+ uint32_t OPP_PIPE_CONTROL; \
+ uint32_t OPP_PIPE_CRC_CONTROL
#define OPP_MASK_SH_LIST_DCN(mask_sh) \
OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
@@ -153,7 +154,6 @@ struct dcn10_opp {
const struct dcn10_opp_registers *regs;
const struct dcn10_opp_shift *opp_shift;
const struct dcn10_opp_mask *opp_mask;
-
bool is_write_to_ram_a_safe;
};
@@ -188,4 +188,6 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable);
void opp1_destroy(struct output_pixel_processor **opp);
+void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
index f5fe0cac7cb0..ce826a5be4c7 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
@@ -377,6 +377,18 @@ uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp
return 0;
}
+void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ opp_reg_state->dpg_control = REG_READ(DPG_CONTROL);
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+ opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG);
+}
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -395,6 +407,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
.opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count,
+ .opp_read_reg_state = opp2_read_reg_state
};
void dcn20_opp_construct(struct dcn20_opp *oppn20,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
index 34936e6c49f3..fb0c047c1788 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
@@ -59,7 +59,8 @@
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
uint32_t DPG_RAMP_CONTROL; \
- uint32_t DPG_STATUS
+ uint32_t DPG_STATUS; \
+ uint32_t DSCRM_DSC_FORWARD_CONFIG
#define OPP_DPG_MASK_SH_LIST(mask_sh) \
OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \
@@ -171,4 +172,7 @@ void opp2_program_left_edge_extra_pixel (
uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding, bool is_primary);
+
+void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
index 3542b51c9aac..e11c4e16402f 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
@@ -51,3 +51,16 @@ void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable)
{
REG_UPDATE(OPP_TOP_CLK_CONTROL, OPP_FGCG_REP_DIS, !enable);
}
+
+void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ opp_reg_state->dpg_control = REG_READ(DPG_CONTROL);
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_abm_control = REG_READ(OPP_ABM_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+ opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
index a9a413527801..c6cace90e8f2 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
@@ -31,7 +31,8 @@
#define OPP_REG_VARIABLE_LIST_DCN3_5 \
OPP_REG_VARIABLE_LIST_DCN2_0; \
- uint32_t OPP_TOP_CLK_CONTROL
+ uint32_t OPP_TOP_CLK_CONTROL; \
+ uint32_t OPP_ABM_CONTROL
#define OPP_MASK_SH_LIST_DCN35(mask_sh) \
OPP_MASK_SH_LIST_DCN20(mask_sh), \
@@ -64,4 +65,5 @@ void dcn35_opp_construct(struct dcn20_opp *oppn20,
void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable);
+void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 097d06023e64..6f7b0f816f2a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -302,7 +302,6 @@ void optc1_program_timing(
/* Enable stereo - only when we need to pack 3D frame. Other types
* of stereo handled in explicit call
*/
-
if (optc->funcs->is_two_pixels_per_container(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
@@ -1313,7 +1312,7 @@ bool optc1_get_hw_timing(struct timing_generator *tg,
if (tg == NULL || hw_crtc_timing == NULL)
return false;
- optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ optc1_read_otg_state(tg, &s);
hw_crtc_timing->h_total = s.h_total + 1;
hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
@@ -1329,9 +1328,11 @@ bool optc1_get_hw_timing(struct timing_generator *tg,
}
-void optc1_read_otg_state(struct optc *optc1,
+void optc1_read_otg_state(struct timing_generator *optc,
struct dcn_otg_state *s)
{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
REG_GET(OTG_CONTROL,
OTG_MASTER_EN, &s->otg_enabled);
@@ -1471,37 +1472,71 @@ bool optc1_configure_crc(struct timing_generator *optc,
if (!optc1_is_tg_enabled(optc))
return false;
- REG_WRITE(OTG_CRC_CNTL, 0);
+ if (!params->enable || params->reset)
+ REG_WRITE(OTG_CRC_CNTL, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
- OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
- OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
-
- /* Window A y axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
- OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
-
- /* Window B x axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
- OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
- OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
-
- /* Window B y axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
- OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- REG_UPDATE_3(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_X_CONTROL,
+ OTG_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_Y_CONTROL,
+ OTG_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_X_CONTROL,
+ OTG_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_Y_CONTROL,
+ OTG_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
@@ -1510,6 +1545,7 @@ bool optc1_configure_crc(struct timing_generator *optc,
* optc1_get_crc - Capture CRC result per component
*
* @optc: timing_generator instance.
+ * @idx: index of crc engine to get CRC from
* @r_cr: 16-bit primary CRC signature for red data.
* @g_y: 16-bit primary CRC signature for green data.
* @b_cb: 16-bit primary CRC signature for blue data.
@@ -1521,7 +1557,7 @@ bool optc1_configure_crc(struct timing_generator *optc,
* If CRC is disabled, return false; otherwise, return true, and the CRC
* results in the parameters.
*/
-bool optc1_get_crc(struct timing_generator *optc,
+bool optc1_get_crc(struct timing_generator *optc, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t field = 0;
@@ -1533,14 +1569,30 @@ bool optc1_get_crc(struct timing_generator *optc,
if (!field)
return false;
- /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
- REG_GET_2(OTG_CRC0_DATA_RG,
- CRC0_R_CR, r_cr,
- CRC0_G_Y, g_y);
+ switch (idx) {
+ case 0:
+ /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
+ REG_GET_2(OTG_CRC0_DATA_RG,
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
- /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
- REG_GET(OTG_CRC0_DATA_B,
- CRC0_B_CB, b_cb);
+ /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
+ REG_GET(OTG_CRC0_DATA_B,
+ CRC0_B_CB, b_cb);
+ break;
+ case 1:
+ /* OTG_CRC1_DATA_RG has the CRC16 results for the red and green component */
+ REG_GET_2(OTG_CRC1_DATA_RG,
+ CRC1_R_CR, r_cr,
+ CRC1_G_Y, g_y);
+
+ /* OTG_CRC1_DATA_B has the CRC16 results for the blue component */
+ REG_GET(OTG_CRC1_DATA_B,
+ CRC1_B_CB, b_cb);
+ break;
+ default:
+ return false;
+ }
return true;
}
@@ -1613,6 +1665,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.setup_manual_trigger = optc1_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 40757f20d73f..803bcc25601c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -86,6 +86,12 @@
SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC1_DATA_B, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWB_Y_CONTROL, OTG, inst),\
SR(GSL_SOURCE_SELECT),\
SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst)
@@ -98,111 +104,151 @@
SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst)
+#define OPTC_REG_VARIABLE_LIST_DCN \
+ uint32_t OTG_GLOBAL_CONTROL1; \
+ uint32_t OTG_GLOBAL_CONTROL2; \
+ uint32_t OTG_VERT_SYNC_CONTROL; \
+ uint32_t OTG_MASTER_UPDATE_MODE; \
+ uint32_t OTG_GSL_CONTROL; \
+ uint32_t OTG_VSTARTUP_PARAM; \
+ uint32_t OTG_VUPDATE_PARAM; \
+ uint32_t OTG_VREADY_PARAM; \
+ uint32_t OTG_BLANK_CONTROL; \
+ uint32_t OTG_MASTER_UPDATE_LOCK; \
+ uint32_t OTG_GLOBAL_CONTROL0; \
+ uint32_t OTG_DOUBLE_BUFFER_CONTROL; \
+ uint32_t OTG_H_TOTAL; \
+ uint32_t OTG_H_BLANK_START_END; \
+ uint32_t OTG_H_SYNC_A; \
+ uint32_t OTG_H_SYNC_A_CNTL; \
+ uint32_t OTG_H_TIMING_CNTL; \
+ uint32_t OTG_V_TOTAL; \
+ uint32_t OTG_V_BLANK_START_END; \
+ uint32_t OTG_V_SYNC_A; \
+ uint32_t OTG_V_SYNC_A_CNTL; \
+ uint32_t OTG_INTERLACE_CONTROL; \
+ uint32_t OTG_CONTROL; \
+ uint32_t OTG_STEREO_CONTROL; \
+ uint32_t OTG_3D_STRUCTURE_CONTROL; \
+ uint32_t OTG_STEREO_STATUS; \
+ uint32_t OTG_V_TOTAL_MAX; \
+ uint32_t OTG_V_TOTAL_MID; \
+ uint32_t OTG_V_TOTAL_MIN; \
+ uint32_t OTG_V_TOTAL_CONTROL; \
+ uint32_t OTG_V_COUNT_STOP_CONTROL; \
+ uint32_t OTG_V_COUNT_STOP_CONTROL2; \
+ uint32_t OTG_TRIGA_CNTL; \
+ uint32_t OTG_TRIGA_MANUAL_TRIG; \
+ uint32_t OTG_MANUAL_FLOW_CONTROL; \
+ uint32_t OTG_FORCE_COUNT_NOW_CNTL; \
+ uint32_t OTG_STATIC_SCREEN_CONTROL; \
+ uint32_t OTG_STATUS_FRAME_COUNT; \
+ uint32_t OTG_STATUS; \
+ uint32_t OTG_STATUS_POSITION; \
+ uint32_t OTG_NOM_VERT_POSITION; \
+ uint32_t OTG_BLACK_COLOR; \
+ uint32_t OTG_TEST_PATTERN_PARAMETERS; \
+ uint32_t OTG_TEST_PATTERN_CONTROL; \
+ uint32_t OTG_TEST_PATTERN_COLOR; \
+ uint32_t OTG_CLOCK_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT0_POSITION; \
+ uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT1_POSITION; \
+ uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; \
+ uint32_t OPTC_INPUT_CLOCK_CONTROL; \
+ uint32_t OPTC_DATA_SOURCE_SELECT; \
+ uint32_t OPTC_MEMORY_CONFIG; \
+ uint32_t OPTC_INPUT_GLOBAL_CONTROL; \
+ uint32_t CONTROL; \
+ uint32_t OTG_GSL_WINDOW_X; \
+ uint32_t OTG_GSL_WINDOW_Y; \
+ uint32_t OTG_VUPDATE_KEEPOUT; \
+ uint32_t OTG_CRC_CNTL; \
+ uint32_t OTG_CRC_CNTL2; \
+ uint32_t OTG_CRC0_DATA_RG; \
+ uint32_t OTG_CRC0_DATA_B; \
+ uint32_t OTG_CRC1_DATA_B; \
+ uint32_t OTG_CRC2_DATA_B; \
+ uint32_t OTG_CRC3_DATA_B; \
+ uint32_t OTG_CRC1_DATA_RG; \
+ uint32_t OTG_CRC2_DATA_RG; \
+ uint32_t OTG_CRC3_DATA_RG; \
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWA_X_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWA_Y_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWB_X_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWB_Y_CONTROL; \
+ uint32_t GSL_SOURCE_SELECT; \
+ uint32_t DWB_SOURCE_SELECT; \
+ uint32_t OTG_DSC_START_POSITION; \
+ uint32_t OPTC_DATA_FORMAT_CONTROL; \
+ uint32_t OPTC_BYTES_PER_PIXEL; \
+ uint32_t OPTC_WIDTH_CONTROL; \
+ uint32_t OTG_DRR_CONTROL; \
+ uint32_t OTG_BLANK_DATA_COLOR; \
+ uint32_t OTG_BLANK_DATA_COLOR_EXT; \
+ uint32_t OTG_DRR_TRIGGER_WINDOW; \
+ uint32_t OTG_M_CONST_DTO0; \
+ uint32_t OTG_M_CONST_DTO1; \
+ uint32_t OTG_DRR_V_TOTAL_CHANGE; \
+ uint32_t OTG_GLOBAL_CONTROL4; \
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWA_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWA_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWB_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK; \
+ uint32_t OPTC_CLOCK_CONTROL; \
+ uint32_t OPTC_WIDTH_CONTROL2; \
+ uint32_t OTG_PSTATE_REGISTER; \
+ uint32_t OTG_PIPE_UPDATE_STATUS; \
+ uint32_t INTERRUPT_DEST; \
+ uint32_t OPTC_INPUT_SPARE_REGISTER; \
+ uint32_t OPTC_RSMU_UNDERFLOW; \
+ uint32_t OPTC_UNDERFLOW_THRESHOLD; \
+ uint32_t OTG_COUNT_CONTROL; \
+ uint32_t OTG_COUNT_RESET; \
+ uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \
+ uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \
+ uint32_t OTG_DLPC_CONTROL; \
+ uint32_t OTG_DRR_CONTROL2; \
+ uint32_t OTG_DRR_TIMING_INT_STATUS; \
+ uint32_t OTG_GLOBAL_CONTROL3; \
+ uint32_t OTG_GLOBAL_SYNC_STATUS; \
+ uint32_t OTG_GSL_VSYNC_GAP; \
+ uint32_t OTG_INTERLACE_STATUS; \
+ uint32_t OTG_INTERRUPT_CONTROL; \
+ uint32_t OTG_LONG_VBLANK_STATUS; \
+ uint32_t OTG_MANUAL_FORCE_VSYNC_NEXT_LINE; \
+ uint32_t OTG_MASTER_EN; \
+ uint32_t OTG_PIXEL_DATA_READBACK0; \
+ uint32_t OTG_PIXEL_DATA_READBACK1; \
+ uint32_t OTG_REQUEST_CONTROL; \
+ uint32_t OTG_SNAPSHOT_CONTROL; \
+ uint32_t OTG_SNAPSHOT_FRAME; \
+ uint32_t OTG_SNAPSHOT_POSITION; \
+ uint32_t OTG_SNAPSHOT_STATUS; \
+ uint32_t OTG_SPARE_REGISTER; \
+ uint32_t OTG_STATUS_HV_COUNT; \
+ uint32_t OTG_STATUS_VF_COUNT; \
+ uint32_t OTG_STEREO_FORCE_NEXT_EYE; \
+ uint32_t OTG_TRIG_MANUAL_CONTROL; \
+ uint32_t OTG_TRIGB_CNTL; \
+ uint32_t OTG_TRIGB_MANUAL_TRIG; \
+ uint32_t OTG_UPDATE_LOCK; \
+ uint32_t OTG_V_TOTAL_INT_STATUS; \
+ uint32_t OTG_VSYNC_NOM_INT_STATUS
+
+
struct dcn_optc_registers {
- uint32_t OTG_GLOBAL_CONTROL1;
- uint32_t OTG_GLOBAL_CONTROL2;
- uint32_t OTG_VERT_SYNC_CONTROL;
- uint32_t OTG_MASTER_UPDATE_MODE;
- uint32_t OTG_GSL_CONTROL;
- uint32_t OTG_VSTARTUP_PARAM;
- uint32_t OTG_VUPDATE_PARAM;
- uint32_t OTG_VREADY_PARAM;
- uint32_t OTG_BLANK_CONTROL;
- uint32_t OTG_MASTER_UPDATE_LOCK;
- uint32_t OTG_GLOBAL_CONTROL0;
- uint32_t OTG_DOUBLE_BUFFER_CONTROL;
- uint32_t OTG_H_TOTAL;
- uint32_t OTG_H_BLANK_START_END;
- uint32_t OTG_H_SYNC_A;
- uint32_t OTG_H_SYNC_A_CNTL;
- uint32_t OTG_H_TIMING_CNTL;
- uint32_t OTG_V_TOTAL;
- uint32_t OTG_V_BLANK_START_END;
- uint32_t OTG_V_SYNC_A;
- uint32_t OTG_V_SYNC_A_CNTL;
- uint32_t OTG_INTERLACE_CONTROL;
- uint32_t OTG_CONTROL;
- uint32_t OTG_STEREO_CONTROL;
- uint32_t OTG_3D_STRUCTURE_CONTROL;
- uint32_t OTG_STEREO_STATUS;
- uint32_t OTG_V_TOTAL_MAX;
- uint32_t OTG_V_TOTAL_MID;
- uint32_t OTG_V_TOTAL_MIN;
- uint32_t OTG_V_TOTAL_CONTROL;
- uint32_t OTG_V_COUNT_STOP_CONTROL;
- uint32_t OTG_V_COUNT_STOP_CONTROL2;
- uint32_t OTG_TRIGA_CNTL;
- uint32_t OTG_TRIGA_MANUAL_TRIG;
- uint32_t OTG_MANUAL_FLOW_CONTROL;
- uint32_t OTG_FORCE_COUNT_NOW_CNTL;
- uint32_t OTG_STATIC_SCREEN_CONTROL;
- uint32_t OTG_STATUS_FRAME_COUNT;
- uint32_t OTG_STATUS;
- uint32_t OTG_STATUS_POSITION;
- uint32_t OTG_NOM_VERT_POSITION;
- uint32_t OTG_BLACK_COLOR;
- uint32_t OTG_TEST_PATTERN_PARAMETERS;
- uint32_t OTG_TEST_PATTERN_CONTROL;
- uint32_t OTG_TEST_PATTERN_COLOR;
- uint32_t OTG_CLOCK_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT0_POSITION;
- uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT1_POSITION;
- uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
- uint32_t OPTC_INPUT_CLOCK_CONTROL;
- uint32_t OPTC_DATA_SOURCE_SELECT;
- uint32_t OPTC_MEMORY_CONFIG;
- uint32_t OPTC_INPUT_GLOBAL_CONTROL;
- uint32_t CONTROL;
- uint32_t OTG_GSL_WINDOW_X;
- uint32_t OTG_GSL_WINDOW_Y;
- uint32_t OTG_VUPDATE_KEEPOUT;
- uint32_t OTG_CRC_CNTL;
- uint32_t OTG_CRC_CNTL2;
- uint32_t OTG_CRC0_DATA_RG;
- uint32_t OTG_CRC0_DATA_B;
- uint32_t OTG_CRC1_DATA_B;
- uint32_t OTG_CRC2_DATA_B;
- uint32_t OTG_CRC3_DATA_B;
- uint32_t OTG_CRC1_DATA_RG;
- uint32_t OTG_CRC2_DATA_RG;
- uint32_t OTG_CRC3_DATA_RG;
- uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
- uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
- uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
- uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
- uint32_t OTG_CRC1_WINDOWA_X_CONTROL;
- uint32_t OTG_CRC1_WINDOWA_Y_CONTROL;
- uint32_t OTG_CRC1_WINDOWB_X_CONTROL;
- uint32_t OTG_CRC1_WINDOWB_Y_CONTROL;
- uint32_t GSL_SOURCE_SELECT;
- uint32_t DWB_SOURCE_SELECT;
- uint32_t OTG_DSC_START_POSITION;
- uint32_t OPTC_DATA_FORMAT_CONTROL;
- uint32_t OPTC_BYTES_PER_PIXEL;
- uint32_t OPTC_WIDTH_CONTROL;
- uint32_t OTG_DRR_CONTROL;
- uint32_t OTG_BLANK_DATA_COLOR;
- uint32_t OTG_BLANK_DATA_COLOR_EXT;
- uint32_t OTG_DRR_TRIGGER_WINDOW;
- uint32_t OTG_M_CONST_DTO0;
- uint32_t OTG_M_CONST_DTO1;
- uint32_t OTG_DRR_V_TOTAL_CHANGE;
- uint32_t OTG_GLOBAL_CONTROL4;
- uint32_t OTG_CRC0_WINDOWA_X_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWA_Y_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWB_X_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWB_Y_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWA_X_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWA_Y_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWB_X_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK;
- uint32_t OPTC_CLOCK_CONTROL;
- uint32_t OPTC_WIDTH_CONTROL2;
- uint32_t OTG_PSTATE_REGISTER;
- uint32_t OTG_PIPE_UPDATE_STATUS;
+ OPTC_REG_VARIABLE_LIST_DCN;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -315,6 +361,7 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC1_SELECT, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
@@ -327,6 +374,17 @@ struct dcn_optc_registers {
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_RG, CRC1_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_RG, CRC1_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_B, CRC1_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_END, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
@@ -482,6 +540,7 @@ struct dcn_optc_registers {
type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
type OTG_CRC_CONT_EN;\
type OTG_CRC0_SELECT;\
+ type OTG_CRC1_SELECT;\
type OTG_CRC_EN;\
type CRC0_R_CR;\
type CRC0_G_Y;\
@@ -572,6 +631,7 @@ struct dcn_optc_registers {
type OTG_DC_REG_UPDATE_PENDING;\
type OTG_CURSOR_UPDATE_PENDING;\
type OTG_VUPDATE_KEEPOUT_STATUS;\
+ type OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index b4694985a40a..e7a90a437fff 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -502,7 +502,7 @@ void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *ref
REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate);
}
-static struct timing_generator_funcs dcn20_tg_funcs = {
+static const struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -562,6 +562,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.get_hw_timing = optc1_get_hw_timing,
.align_vblanks = optc2_align_vblanks,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn20_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
index 49c2efdfa403..772a8bfb949c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
@@ -129,7 +129,7 @@ static void optc201_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 1;
}
-static struct timing_generator_funcs dcn201_tg_funcs = {
+static const struct timing_generator_funcs dcn201_tg_funcs = {
.validate_timing = optc201_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -180,6 +180,7 @@ static struct timing_generator_funcs dcn201_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn201_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index 4c95c0958612..ee4665aa49e9 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
@@ -357,7 +357,7 @@ void optc3_tg_init(struct timing_generator *optc)
optc1_clear_optc_underflow(optc);
}
-static struct timing_generator_funcs dcn30_tg_funcs = {
+static const struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -420,6 +420,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn30_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
index d7a45ef2d01b..38f85bc2681a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
@@ -109,7 +109,7 @@ void optc301_setup_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_CLEAR, 1);
}
-static struct timing_generator_funcs dcn30_tg_funcs = {
+static const struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -172,6 +172,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn301_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 4b6446ed4ce4..c6417538090f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -245,7 +245,207 @@ void optc3_init_odm(struct timing_generator *optc)
optc1->opp_count = 1;
}
-static struct timing_generator_funcs dcn31_tg_funcs = {
+void optc31_read_otg_state(struct timing_generator *optc,
+ struct dcn_otg_state *s)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CONTROL,
+ OTG_MASTER_EN, &s->otg_enabled);
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &s->v_blank_start,
+ OTG_V_BLANK_END, &s->v_blank_end);
+
+ REG_GET(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, &s->v_sync_a_pol);
+
+ REG_GET(OTG_V_TOTAL,
+ OTG_V_TOTAL, &s->v_total);
+
+ REG_GET(OTG_V_TOTAL_MAX,
+ OTG_V_TOTAL_MAX, &s->v_total_max);
+
+ REG_GET(OTG_V_TOTAL_MIN,
+ OTG_V_TOTAL_MIN, &s->v_total_min);
+
+ REG_GET(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel);
+
+ REG_GET(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel);
+
+ REG_GET_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, &s->v_sync_a_start,
+ OTG_V_SYNC_A_END, &s->v_sync_a_end);
+
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &s->h_blank_start,
+ OTG_H_BLANK_END, &s->h_blank_end);
+
+ REG_GET_2(OTG_H_SYNC_A,
+ OTG_H_SYNC_A_START, &s->h_sync_a_start,
+ OTG_H_SYNC_A_END, &s->h_sync_a_end);
+
+ REG_GET(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, &s->h_sync_a_pol);
+
+ REG_GET(OTG_H_TOTAL,
+ OTG_H_TOTAL, &s->h_total);
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL,
+ OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION,
+ OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
+ OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION,
+ OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line);
+
+ REG_GET(INTERRUPT_DEST,
+ OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, &s->vertical_interrupt2_dest);
+
+ s->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
+}
+
+void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ optc_reg_state->optc_bytes_per_pixel = REG_READ(OPTC_BYTES_PER_PIXEL);
+ optc_reg_state->optc_data_format_control = REG_READ(OPTC_DATA_FORMAT_CONTROL);
+ optc_reg_state->optc_data_source_select = REG_READ(OPTC_DATA_SOURCE_SELECT);
+ optc_reg_state->optc_input_clock_control = REG_READ(OPTC_INPUT_CLOCK_CONTROL);
+ optc_reg_state->optc_input_global_control = REG_READ(OPTC_INPUT_GLOBAL_CONTROL);
+ optc_reg_state->optc_input_spare_register = REG_READ(OPTC_INPUT_SPARE_REGISTER);
+ optc_reg_state->optc_memory_config = REG_READ(OPTC_MEMORY_CONFIG);
+ optc_reg_state->optc_rsmu_underflow = REG_READ(OPTC_RSMU_UNDERFLOW);
+ optc_reg_state->optc_underflow_threshold = REG_READ(OPTC_UNDERFLOW_THRESHOLD);
+ optc_reg_state->optc_width_control = REG_READ(OPTC_WIDTH_CONTROL);
+ optc_reg_state->otg_3d_structure_control = REG_READ(OTG_3D_STRUCTURE_CONTROL);
+ optc_reg_state->otg_clock_control = REG_READ(OTG_CLOCK_CONTROL);
+ optc_reg_state->otg_control = REG_READ(OTG_CONTROL);
+ optc_reg_state->otg_count_control = REG_READ(OTG_COUNT_CONTROL);
+ optc_reg_state->otg_count_reset = REG_READ(OTG_COUNT_RESET);
+ optc_reg_state->otg_crc_cntl = REG_READ(OTG_CRC_CNTL);
+ optc_reg_state->otg_crc_sig_blue_control_mask = REG_READ(OTG_CRC_SIG_BLUE_CONTROL_MASK);
+ optc_reg_state->otg_crc_sig_red_green_mask = REG_READ(OTG_CRC_SIG_RED_GREEN_MASK);
+ optc_reg_state->otg_crc0_data_b = REG_READ(OTG_CRC0_DATA_B);
+ optc_reg_state->otg_crc0_data_rg = REG_READ(OTG_CRC0_DATA_RG);
+ optc_reg_state->otg_crc0_windowa_x_control = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL);
+ optc_reg_state->otg_crc0_windowa_x_control_readback = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowa_y_control = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL);
+ optc_reg_state->otg_crc0_windowa_y_control_readback = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowb_x_control = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL);
+ optc_reg_state->otg_crc0_windowb_x_control_readback = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowb_y_control = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL);
+ optc_reg_state->otg_crc0_windowb_y_control_readback = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_data_b = REG_READ(OTG_CRC1_DATA_B);
+ optc_reg_state->otg_crc1_data_rg = REG_READ(OTG_CRC1_DATA_RG);
+ optc_reg_state->otg_crc1_windowa_x_control = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL);
+ optc_reg_state->otg_crc1_windowa_x_control_readback = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowa_y_control = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL);
+ optc_reg_state->otg_crc1_windowa_y_control_readback = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowb_x_control = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL);
+ optc_reg_state->otg_crc1_windowb_x_control_readback = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowb_y_control = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL);
+ optc_reg_state->otg_crc1_windowb_y_control_readback = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc2_data_b = REG_READ(OTG_CRC2_DATA_B);
+ optc_reg_state->otg_crc2_data_rg = REG_READ(OTG_CRC2_DATA_RG);
+ optc_reg_state->otg_crc3_data_b = REG_READ(OTG_CRC3_DATA_B);
+ optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG);
+ optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL);
+ optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
+ optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2);
+ optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL);
+ optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS);
+ optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW);
+ optc_reg_state->otg_drr_v_total_change = REG_READ(OTG_DRR_V_TOTAL_CHANGE);
+ optc_reg_state->otg_dsc_start_position = REG_READ(OTG_DSC_START_POSITION);
+ optc_reg_state->otg_force_count_now_cntl = REG_READ(OTG_FORCE_COUNT_NOW_CNTL);
+ optc_reg_state->otg_global_control0 = REG_READ(OTG_GLOBAL_CONTROL0);
+ optc_reg_state->otg_global_control1 = REG_READ(OTG_GLOBAL_CONTROL1);
+ optc_reg_state->otg_global_control2 = REG_READ(OTG_GLOBAL_CONTROL2);
+ optc_reg_state->otg_global_control3 = REG_READ(OTG_GLOBAL_CONTROL3);
+ optc_reg_state->otg_global_control4 = REG_READ(OTG_GLOBAL_CONTROL4);
+ optc_reg_state->otg_global_sync_status = REG_READ(OTG_GLOBAL_SYNC_STATUS);
+ optc_reg_state->otg_gsl_control = REG_READ(OTG_GSL_CONTROL);
+ optc_reg_state->otg_gsl_vsync_gap = REG_READ(OTG_GSL_VSYNC_GAP);
+ optc_reg_state->otg_gsl_window_x = REG_READ(OTG_GSL_WINDOW_X);
+ optc_reg_state->otg_gsl_window_y = REG_READ(OTG_GSL_WINDOW_Y);
+ optc_reg_state->otg_h_blank_start_end = REG_READ(OTG_H_BLANK_START_END);
+ optc_reg_state->otg_h_sync_a = REG_READ(OTG_H_SYNC_A);
+ optc_reg_state->otg_h_sync_a_cntl = REG_READ(OTG_H_SYNC_A_CNTL);
+ optc_reg_state->otg_h_timing_cntl = REG_READ(OTG_H_TIMING_CNTL);
+ optc_reg_state->otg_h_total = REG_READ(OTG_H_TOTAL);
+ optc_reg_state->otg_interlace_control = REG_READ(OTG_INTERLACE_CONTROL);
+ optc_reg_state->otg_interlace_status = REG_READ(OTG_INTERLACE_STATUS);
+ optc_reg_state->otg_interrupt_control = REG_READ(OTG_INTERRUPT_CONTROL);
+ optc_reg_state->otg_long_vblank_status = REG_READ(OTG_LONG_VBLANK_STATUS);
+ optc_reg_state->otg_m_const_dto0 = REG_READ(OTG_M_CONST_DTO0);
+ optc_reg_state->otg_m_const_dto1 = REG_READ(OTG_M_CONST_DTO1);
+ optc_reg_state->otg_manual_force_vsync_next_line = REG_READ(OTG_MANUAL_FORCE_VSYNC_NEXT_LINE);
+ optc_reg_state->otg_master_en = REG_READ(OTG_MASTER_EN);
+ optc_reg_state->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ optc_reg_state->otg_master_update_mode = REG_READ(OTG_MASTER_UPDATE_MODE);
+ optc_reg_state->otg_nom_vert_position = REG_READ(OTG_NOM_VERT_POSITION);
+ optc_reg_state->otg_pipe_update_status = REG_READ(OTG_PIPE_UPDATE_STATUS);
+ optc_reg_state->otg_pixel_data_readback0 = REG_READ(OTG_PIXEL_DATA_READBACK0);
+ optc_reg_state->otg_pixel_data_readback1 = REG_READ(OTG_PIXEL_DATA_READBACK1);
+ optc_reg_state->otg_request_control = REG_READ(OTG_REQUEST_CONTROL);
+ optc_reg_state->otg_snapshot_control = REG_READ(OTG_SNAPSHOT_CONTROL);
+ optc_reg_state->otg_snapshot_frame = REG_READ(OTG_SNAPSHOT_FRAME);
+ optc_reg_state->otg_snapshot_position = REG_READ(OTG_SNAPSHOT_POSITION);
+ optc_reg_state->otg_snapshot_status = REG_READ(OTG_SNAPSHOT_STATUS);
+ optc_reg_state->otg_spare_register = REG_READ(OTG_SPARE_REGISTER);
+ optc_reg_state->otg_static_screen_control = REG_READ(OTG_STATIC_SCREEN_CONTROL);
+ optc_reg_state->otg_status = REG_READ(OTG_STATUS);
+ optc_reg_state->otg_status_frame_count = REG_READ(OTG_STATUS_FRAME_COUNT);
+ optc_reg_state->otg_status_hv_count = REG_READ(OTG_STATUS_HV_COUNT);
+ optc_reg_state->otg_status_position = REG_READ(OTG_STATUS_POSITION);
+ optc_reg_state->otg_status_vf_count = REG_READ(OTG_STATUS_VF_COUNT);
+ optc_reg_state->otg_stereo_control = REG_READ(OTG_STEREO_CONTROL);
+ optc_reg_state->otg_stereo_force_next_eye = REG_READ(OTG_STEREO_FORCE_NEXT_EYE);
+ optc_reg_state->otg_stereo_status = REG_READ(OTG_STEREO_STATUS);
+ optc_reg_state->otg_trig_manual_control = REG_READ(OTG_TRIG_MANUAL_CONTROL);
+ optc_reg_state->otg_triga_cntl = REG_READ(OTG_TRIGA_CNTL);
+ optc_reg_state->otg_triga_manual_trig = REG_READ(OTG_TRIGA_MANUAL_TRIG);
+ optc_reg_state->otg_trigb_cntl = REG_READ(OTG_TRIGB_CNTL);
+ optc_reg_state->otg_trigb_manual_trig = REG_READ(OTG_TRIGB_MANUAL_TRIG);
+ optc_reg_state->otg_update_lock = REG_READ(OTG_UPDATE_LOCK);
+ optc_reg_state->otg_v_blank_start_end = REG_READ(OTG_V_BLANK_START_END);
+ optc_reg_state->otg_v_count_stop_control = REG_READ(OTG_V_COUNT_STOP_CONTROL);
+ optc_reg_state->otg_v_count_stop_control2 = REG_READ(OTG_V_COUNT_STOP_CONTROL2);
+ optc_reg_state->otg_v_sync_a = REG_READ(OTG_V_SYNC_A);
+ optc_reg_state->otg_v_sync_a_cntl = REG_READ(OTG_V_SYNC_A_CNTL);
+ optc_reg_state->otg_v_total = REG_READ(OTG_V_TOTAL);
+ optc_reg_state->otg_v_total_control = REG_READ(OTG_V_TOTAL_CONTROL);
+ optc_reg_state->otg_v_total_int_status = REG_READ(OTG_V_TOTAL_INT_STATUS);
+ optc_reg_state->otg_v_total_max = REG_READ(OTG_V_TOTAL_MAX);
+ optc_reg_state->otg_v_total_mid = REG_READ(OTG_V_TOTAL_MID);
+ optc_reg_state->otg_v_total_min = REG_READ(OTG_V_TOTAL_MIN);
+ optc_reg_state->otg_vert_sync_control = REG_READ(OTG_VERT_SYNC_CONTROL);
+ optc_reg_state->otg_vertical_interrupt0_control = REG_READ(OTG_VERTICAL_INTERRUPT0_CONTROL);
+ optc_reg_state->otg_vertical_interrupt0_position = REG_READ(OTG_VERTICAL_INTERRUPT0_POSITION);
+ optc_reg_state->otg_vertical_interrupt1_control = REG_READ(OTG_VERTICAL_INTERRUPT1_CONTROL);
+ optc_reg_state->otg_vertical_interrupt1_position = REG_READ(OTG_VERTICAL_INTERRUPT1_POSITION);
+ optc_reg_state->otg_vertical_interrupt2_control = REG_READ(OTG_VERTICAL_INTERRUPT2_CONTROL);
+ optc_reg_state->otg_vertical_interrupt2_position = REG_READ(OTG_VERTICAL_INTERRUPT2_POSITION);
+ optc_reg_state->otg_vready_param = REG_READ(OTG_VREADY_PARAM);
+ optc_reg_state->otg_vstartup_param = REG_READ(OTG_VSTARTUP_PARAM);
+ optc_reg_state->otg_vsync_nom_int_status = REG_READ(OTG_VSYNC_NOM_INT_STATUS);
+ optc_reg_state->otg_vupdate_keepout = REG_READ(OTG_VUPDATE_KEEPOUT);
+ optc_reg_state->otg_vupdate_param = REG_READ(OTG_VUPDATE_PARAM);
+}
+
+static const struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -306,6 +506,8 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.get_hw_timing = optc1_get_hw_timing,
.init_odm = optc3_init_odm,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn31_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index fbbe86d00c2e..98f7d2e299c5 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
@@ -100,7 +100,8 @@
SRI(OTG_CRC_CNTL2, OTG, inst),\
SR(DWB_SOURCE_SELECT),\
SRI(OTG_DRR_CONTROL, OTG, inst),\
- SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst),\
+ SRI(INTERRUPT_DEST, OTG, inst)
#define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
@@ -260,6 +261,7 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn31_timing_generator_init(struct optc *optc1);
@@ -269,4 +271,9 @@ void optc31_set_drr(struct timing_generator *optc, const struct drr_params *para
void optc3_init_odm(struct timing_generator *optc);
+void optc31_read_otg_state(struct timing_generator *optc,
+ struct dcn_otg_state *s);
+
+void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 633d62addd4d..43ff957288b2 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
@@ -192,7 +192,7 @@ static void optc314_set_h_timing_div_manual_mode(struct timing_generator *optc,
}
-static struct timing_generator_funcs dcn314_tg_funcs = {
+static const struct timing_generator_funcs dcn314_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -255,6 +255,8 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.set_odm_combine = optc314_set_odm_combine,
.set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn314_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
index 0ff72b97b465..6bfdee3fcf5f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
@@ -99,7 +99,8 @@
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
SRI(OTG_DRR_CONTROL, OTG, inst),\
- SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst),\
+ SRI(INTERRUPT_DEST, OTG, inst)
#define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
@@ -254,6 +255,7 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn314_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index c217f653b3c8..3dcb0d0c931c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -297,7 +297,7 @@ static void optc32_set_drr(
optc32_setup_manual_trigger(optc);
}
-static struct timing_generator_funcs dcn32_tg_funcs = {
+static const struct timing_generator_funcs dcn32_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -364,6 +364,8 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
+ .read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn32_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 0b0964a9da74..ead92ad78a23 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -62,6 +62,7 @@
SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
@@ -181,7 +182,8 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
- SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn32_timing_generator_init(struct optc *optc1);
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index dfa9364fe5a6..f699e95059f3 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -162,6 +162,8 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+ REG_WAIT(OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+
optc1_clear_optc_underflow(optc);
return true;
@@ -183,34 +185,87 @@ static bool optc35_configure_crc(struct timing_generator *optc,
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ /* Cannot configure crc on a CRTC that is disabled */
if (!optc1_is_tg_enabled(optc))
return false;
- REG_WRITE(OTG_CRC_CNTL, 0);
+
+ if (!params->enable || params->reset)
+ REG_WRITE(OTG_CRC_CNTL, 0);
+
if (!params->enable)
return true;
- REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
- OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
- OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
- OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
- OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
- OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
- OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
- if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0) {
- REG_UPDATE_4(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1,
- OTG_CRC_WINDOW_DB_EN, 1);
- } else
- REG_UPDATE_3(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1);
+
+ /* Program frame boundaries */
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0)
+ REG_UPDATE_4(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1,
+ OTG_CRC_WINDOW_DB_EN, 1);
+ else
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_X_CONTROL,
+ OTG_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_Y_CONTROL,
+ OTG_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_X_CONTROL,
+ OTG_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_Y_CONTROL,
+ OTG_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0)
+ REG_UPDATE_4(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1,
+ OTG_CRC_WINDOW_DB_EN, 1);
+ else
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
@@ -375,7 +430,22 @@ static void optc35_set_long_vtotal(
}
}
-static struct timing_generator_funcs dcn35_tg_funcs = {
+static void optc35_wait_otg_disable(struct timing_generator *optc)
+{
+ struct optc *optc1;
+ uint32_t is_master_en;
+
+ if (!optc || !optc->ctx)
+ return;
+
+ optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CONTROL, OTG_MASTER_EN, &is_master_en);
+ if (!is_master_en)
+ REG_WAIT(OTG_CLOCK_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+}
+
+static const struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -426,6 +496,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc35_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
+ .wait_otg_disable = optc35_wait_otg_disable,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
@@ -439,6 +510,8 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.init_odm = optc3_init_odm,
.set_long_vtotal = optc35_set_long_vtotal,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn35_timing_generator_init(struct optc *optc1)
@@ -453,6 +526,7 @@ void dcn35_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
+ optc1->max_frame_count = 0xFFFFFF;
dcn35_timing_generator_set_fgcg(
optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index be749ab41dce..733a2f149d9a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
@@ -71,7 +71,8 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
- SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn35_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 338a0cad23a5..a8e978d1fae8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -101,7 +101,7 @@ static uint32_t decide_odm_mem_bit_map(int *opp_id, int opp_cnt, int h_active)
return memory_bit_map;
}
-static void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
+void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
int opp_cnt, int segment_width, int last_segment_width)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -162,7 +162,7 @@ static void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
optc1->opp_count = opp_cnt;
}
-static void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
+void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -177,7 +177,7 @@ static void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc,
*
* Return: Always returns true
*/
-static bool optc401_enable_crtc(struct timing_generator *optc)
+bool optc401_enable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -203,7 +203,7 @@ static bool optc401_enable_crtc(struct timing_generator *optc)
}
/* disable_crtc */
-static bool optc401_disable_crtc(struct timing_generator *optc)
+bool optc401_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -226,6 +226,11 @@ static bool optc401_disable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
+ // wait until CRTC_CURRENT_MASTER_EN_STATE == 0
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 0, 10, 15000);
+
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
@@ -234,7 +239,7 @@ static bool optc401_disable_crtc(struct timing_generator *optc)
return true;
}
-static void optc401_phantom_crtc_post_enable(struct timing_generator *optc)
+void optc401_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -245,7 +250,7 @@ static void optc401_phantom_crtc_post_enable(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);
}
-static void optc401_disable_phantom_otg(struct timing_generator *optc)
+void optc401_disable_phantom_otg(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -259,7 +264,7 @@ static void optc401_disable_phantom_otg(struct timing_generator *optc)
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
}
-static void optc401_set_odm_bypass(struct timing_generator *optc,
+void optc401_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -365,7 +370,7 @@ void optc401_set_drr(
}
}
-static void optc401_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest)
+void optc401_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -396,7 +401,7 @@ void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, i
}
}
-static void optc401_program_global_sync(
+void optc401_program_global_sync(
struct timing_generator *optc,
int vready_offset,
int vstartup_start,
@@ -430,7 +435,7 @@ static void optc401_program_global_sync(
REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout);
}
-static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable)
+void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable)
{
struct optc *optc1 = DCN10TG_FROM_TG(tg);
@@ -442,7 +447,7 @@ static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable
return;
}
-static bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
+bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
{
struct optc *optc1 = DCN10TG_FROM_TG(tg);
uint32_t lock_status = 0;
@@ -459,7 +464,7 @@ static bool optc401_wait_update_lock_status(struct timing_generator *tg, bool lo
return true;
}
-static struct timing_generator_funcs dcn401_tg_funcs = {
+static const struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -527,6 +532,8 @@ static struct timing_generator_funcs dcn401_tg_funcs = {
.get_pipe_update_pending = optc3_get_pipe_update_pending,
.set_vupdate_keepout = optc401_set_vupdate_keepout,
.wait_update_lock_status = optc401_wait_update_lock_status,
+ .read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn401_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
index 1be89571986f..fa62737b5b1b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
@@ -163,7 +163,8 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
- SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn401_timing_generator_init(struct optc *optc1);
@@ -172,5 +173,24 @@ void optc401_set_drr(
const struct drr_params *params);
void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
void optc401_setup_manual_trigger(struct timing_generator *optc);
+void optc401_program_global_sync(
+ struct timing_generator *optc,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ int pstate_keepout);
+bool optc401_enable_crtc(struct timing_generator *optc);
+bool optc401_disable_crtc(struct timing_generator *optc);
+void optc401_phantom_crtc_post_enable(struct timing_generator *optc);
+void optc401_disable_phantom_otg(struct timing_generator *optc);
+void optc401_set_odm_bypass(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing);
+void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
+ int opp_cnt, int segment_width, int last_segment_width);
+void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
+void optc401_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest);
+bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked);
+void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable);
#endif /* __DC_OPTC_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index f2ba76c1e0c0..782316348941 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -31,6 +31,7 @@
#include <linux/kgdb.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
#include <asm/byteorder.h>
diff --git a/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
index af21c0a27f86..72bd43f9bbe2 100644
--- a/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
@@ -79,16 +79,12 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl = 0;
- bool block_enabled;
-
- /*need to enable dscclk regardless DSC_PG*/
- if (pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc && power_on)
- pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc(
- pg_cntl->ctx->dc->res_pool->dccg, dsc_inst);
+ bool block_enabled = false;
+ bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
+ pg_cntl->ctx->dc->debug.disable_dsc_power_gate ||
+ pg_cntl->ctx->dc->idle_optimizations_allowed;
- if (pg_cntl->ctx->dc->debug.ignore_pg ||
- pg_cntl->ctx->dc->debug.disable_dsc_power_gate ||
- pg_cntl->ctx->dc->idle_optimizations_allowed)
+ if (skip_pg && !power_on)
return;
block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, dsc_inst);
@@ -111,7 +107,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
REG_WAIT(DOMAIN16_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
+ 1, 10000);
break;
case 1: /* DSC1 */
REG_UPDATE(DOMAIN17_PG_CONFIG,
@@ -119,7 +115,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
REG_WAIT(DOMAIN17_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
+ 1, 10000);
break;
case 2: /* DSC2 */
REG_UPDATE(DOMAIN18_PG_CONFIG,
@@ -127,7 +123,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
REG_WAIT(DOMAIN18_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
+ 1, 10000);
break;
case 3: /* DSC3 */
REG_UPDATE(DOMAIN19_PG_CONFIG,
@@ -135,7 +131,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
REG_WAIT(DOMAIN19_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
+ 1, 10000);
break;
default:
BREAK_TO_DEBUGGER();
@@ -144,12 +140,6 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
if (dsc_inst < MAX_PIPES)
pg_cntl->pg_pipe_res_enable[PG_DSC][dsc_inst] = power_on;
-
- if (pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on) {
- /*this is to disable dscclk*/
- pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc(
- pg_cntl->ctx->dc->res_pool->dccg, dsc_inst);
- }
}
static bool pg_cntl35_hubp_dpp_pg_status(struct pg_cntl *pg_cntl, unsigned int hubp_dpp_inst)
@@ -189,11 +179,12 @@ void pg_cntl35_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dp
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
bool block_enabled;
+ bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
+ pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
+ pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
+ pg_cntl->ctx->dc->idle_optimizations_allowed;
- if (pg_cntl->ctx->dc->debug.ignore_pg ||
- pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
- pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
- pg_cntl->ctx->dc->idle_optimizations_allowed)
+ if (skip_pg && !power_on)
return;
block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst);
@@ -213,22 +204,22 @@ void pg_cntl35_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dp
case 0:
/* DPP0 & HUBP0 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
- REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
break;
case 1:
/* DPP1 & HUBP1 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
- REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
break;
case 2:
/* DPP2 & HUBP2 */
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
- REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
break;
case 3:
/* DPP3 & HUBP3 */
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
- REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
break;
default:
BREAK_TO_DEBUGGER();
@@ -501,6 +492,36 @@ void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl)
pg_cntl->pg_res_enable[PG_DWB] = block_enabled;
}
+static void pg_cntl35_print_pg_status(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log)
+{
+ int i = 0;
+ bool block_enabled = false;
+
+ DC_LOG_DEBUG("%s: %s", debug_func, debug_log);
+
+ DC_LOG_DEBUG("PG_CNTL status:\n");
+
+ block_enabled = pg_cntl35_io_clk_status(pg_cntl);
+ DC_LOG_DEBUG("ONO0=%d (DCCG, DIO, DCIO)\n", block_enabled ? 1 : 0);
+
+ block_enabled = pg_cntl35_mem_status(pg_cntl);
+ DC_LOG_DEBUG("ONO1=%d (DCHUBBUB, DCHVM, DCHUBBUBMEM)\n", block_enabled ? 1 : 0);
+
+ block_enabled = pg_cntl35_plane_otg_status(pg_cntl);
+ DC_LOG_DEBUG("ONO2=%d (MPC, OPP, OPTC, DWB)\n", block_enabled ? 1 : 0);
+
+ block_enabled = pg_cntl35_hpo_pg_status(pg_cntl);
+ DC_LOG_DEBUG("ONO3=%d (HPO)\n", block_enabled ? 1 : 0);
+
+ for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
+ block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, i);
+ DC_LOG_DEBUG("ONO%d=%d (DCHUBP%d, DPP%d)\n", 4 + i * 2, block_enabled ? 1 : 0, i, i);
+
+ block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, i);
+ DC_LOG_DEBUG("ONO%d=%d (DSC%d)\n", 5 + i * 2, block_enabled ? 1 : 0, i);
+ }
+}
+
static const struct pg_cntl_funcs pg_cntl35_funcs = {
.init_pg_status = pg_cntl35_init_pg_status,
.dsc_pg_control = pg_cntl35_dsc_pg_control,
@@ -511,7 +532,8 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = {
.mpcc_pg_control = pg_cntl35_mpcc_pg_control,
.opp_pg_control = pg_cntl35_opp_pg_control,
.optc_pg_control = pg_cntl35_optc_pg_control,
- .dwb_pg_control = pg_cntl35_dwb_pg_control
+ .dwb_pg_control = pg_cntl35_dwb_pg_control,
+ .print_pg_status = pg_cntl35_print_pg_status
};
struct pg_cntl *pg_cntl35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index 09320344d8e9..5b42da8b79c2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+RESOURCE_DCE60 = dce60_resource.o
+
+AMD_DAL_RESOURCE_DCE60 = $(addprefix $(AMDDALPATH)/dc/resource/dce60/,$(RESOURCE_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE60)
+endif
+
+###############################################################################
+
+RESOURCE_DCE80 = dce80_resource.o
+
+AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
+
+###############################################################################
+
RESOURCE_DCE100 = dce100_resource.o
AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100))
@@ -57,14 +75,6 @@ AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOUR
AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120)
-###############################################################################
-
-RESOURCE_DCE80 = dce80_resource.o
-
-AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
@@ -198,6 +208,14 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351)
###############################################################################
+RESOURCE_DCN36 = dcn36_resource.o
+
+AMD_DAL_RESOURCE_DCN36 = $(addprefix $(AMDDALPATH)/dc/resource/dcn36/,$(RESOURCE_DCN36))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN36)
+
+###############################################################################
+
RESOURCE_DCN401 = dcn401_resource.o
AMD_DAL_RESOURCE_DCN401 = $(addprefix $(AMDDALPATH)/dc/resource/dcn401/,$(RESOURCE_DCN401))
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index e698543ec937..d40d91ec2035 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -29,6 +29,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
@@ -77,6 +78,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -224,6 +226,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(4),
link_regs(5),
link_regs(6),
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -367,6 +370,7 @@ static const struct dce_abm_mask abm_mask = {
#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -374,6 +378,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -401,8 +406,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
#define CTX ctx
@@ -483,6 +490,11 @@ static struct stream_encoder *dce100_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id], &se_shift, &se_mask);
return &enc110->base;
@@ -623,7 +635,20 @@ static struct link_encoder *dce100_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -836,17 +861,24 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce100_validate_bandwidth(
+enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
+ struct dc_stream_state *stream = NULL;
+ const uint32_t max_pix_clk_khz = max(dc->clk_mgr->clks.max_supported_dispclk_khz, 400000);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
+ stream = context->res_ctx.pipe_ctx[i].stream;
+ if (stream) {
at_least_one_pipe = true;
+
+ if (stream->timing.pix_clk_100hz >= max_pix_clk_khz * 10)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
}
if (at_least_one_pipe) {
@@ -854,11 +886,20 @@ static bool dce100_validate_bandwidth(
context->bw_ctx.bw.dce.dispclk_khz = 681000;
context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
+ /* On DCE 6.0 and 6.4 the PLL0 is both the display engine clock and
+ * the DP clock, and shouldn't be turned off. Just select the display
+ * clock value from its low power mode.
+ */
+ if (dc->ctx->dce_version == DCE_VERSION_6_0 ||
+ dc->ctx->dce_version == DCE_VERSION_6_4)
+ context->bw_ctx.bw.dce.dispclk_khz = 352000;
+ else
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce100_validate_surface_sets(
@@ -881,7 +922,7 @@ static bool dce100_validate_surface_sets(
return true;
}
-static enum dc_status dce100_validate_global(
+enum dc_status dce100_validate_global(
struct dc *dc,
struct dc_state *context)
{
@@ -935,6 +976,10 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
int i;
int j = -1;
struct dc_link *link = stream->link;
+ enum engine_id preferred_engine = link->link_enc->preferred_engine;
+
+ if (dc_is_rgb_signal(stream->signal))
+ preferred_engine = link->link_enc->analog_engine;
for (i = 0; i < pool->stream_enc_count; i++) {
if (!res_ctx->is_stream_enc_acquired[i] &&
@@ -943,8 +988,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
* in daisy chain use case
*/
j = i;
- if (pool->stream_enc[i]->id ==
- link->link_enc->preferred_engine)
+ if (pool->stream_enc[i]->id == preferred_engine)
return pool->stream_enc[i];
}
}
@@ -1069,13 +1113,14 @@ static bool dce100_resource_construct(
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
- dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.i2c_speed_in_khz_hdcp = 40;
dc->caps.max_cursor_size = 128;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dual_link_dvi = true;
dc->caps.disable_dp_clk_share = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
index fecab7c560f5..dd150a4b4610 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
@@ -41,6 +41,15 @@ struct resource_pool *dce100_create_resource_pool(
enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+enum dc_status dce100_validate_global(
+ struct dc *dc,
+ struct dc_state *context);
+
+enum dc_status dce100_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ enum dc_validate_mode validate_mode);
+
enum dc_status dce100_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index 035c6cfdaee5..cd54382c0af3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -82,6 +82,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -377,6 +378,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -424,7 +426,9 @@ static const struct dc_plane_cap plane_cap = {
64
};
-static const struct dc_debug_options debug_defaults = {
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
.enable_legacy_fast_update = true,
};
@@ -960,10 +964,10 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce110_validate_bandwidth(
+static enum dc_status dce110_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool result = false;
@@ -1031,7 +1035,7 @@ static bool dce110_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
@@ -1376,6 +1380,7 @@ static bool dce110_resource_construct(
dc->caps.is_apu = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 480a50967385..3f0a6bc4dcc2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -76,6 +76,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -385,6 +386,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -429,8 +431,10 @@ static const struct dc_plane_cap plane_cap = {
64
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
#define CTX ctx
@@ -883,10 +887,10 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool result = false;
@@ -952,7 +956,7 @@ bool dce112_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status resource_map_phy_clock_resources(
@@ -1111,12 +1115,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
+ (int64_t)clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
1000);
return;
@@ -1152,12 +1156,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1247,6 +1251,7 @@ static bool dce112_resource_construct(
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 1f57ebc6f9b4..3efc4c55d2d2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
@@ -42,10 +42,10 @@ enum dc_status dce112_validate_with_context(
struct dc_state *context,
struct dc_state *old_context);
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
enum dc_status dce112_add_stream_to_ctx(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index c63c59623433..b1570b6b1af3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -67,6 +67,7 @@
#include "reg_helper.h"
#include "dce100/dce100_resource.h"
+#include "link_service.h"
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -490,6 +491,7 @@ static struct dce_i2c_hw *dce120_i2c_hw_create(
return dce_i2c_hw;
}
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0 + NBIO_BASE(mmBIOS_SCRATCH_0_BASE_IDX),
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX),
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
};
@@ -525,8 +527,11 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults = {
- .disable_clock_gate = true,
- .enable_legacy_fast_update = true,
+ .disable_clock_gate = true,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static struct clock_source *dce120_clock_source_create(
@@ -659,6 +664,12 @@ static void dce120_resource_destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
static void read_dce_straps(
@@ -983,12 +994,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
memory_type_multiplier = MEMORY_TYPE_HBM;
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1054,6 +1065,7 @@ static bool dce120_resource_construct(
struct dc *dc,
struct dce110_resource_pool *pool)
{
+ struct ddc_service_init_data ddc_init_data = {0};
unsigned int i;
int j;
struct dc_context *ctx = dc->ctx;
@@ -1081,6 +1093,7 @@ static bool dce120_resource_construct(
dc->caps.psp_setup_panel_mode = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
@@ -1257,6 +1270,15 @@ static bool dce120_resource_construct(
bw_calcs_data_update_from_pplib(dc);
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
+ }
+
return true;
irqs_create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 889f314cac65..f0152933bee2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -34,6 +34,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "irq/dce60/irq_service_dce60.h"
#include "dce110/dce110_timing_generator.h"
@@ -48,7 +49,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
-#include "dce60/dce60_hw_sequencer.h"
+#include "dce60/dce60_hwseq.h"
#include "dce100/dce100_resource.h"
#include "dce/dce_panel_cntl.h"
@@ -79,6 +80,7 @@
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -239,7 +241,9 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(2),
link_regs(3),
link_regs(4),
- link_regs(5)
+ link_regs(5),
+ {0},
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -365,6 +369,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -372,8 +377,9 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 6,
};
@@ -381,6 +387,7 @@ static const struct resource_caps res_cap_61 = {
.num_timing_generator = 4,
.num_audio = 6,
.num_stream_encoder = 6,
+ .num_analog_stream_encoder = 1,
.num_pll = 3,
.num_ddc = 6,
};
@@ -388,8 +395,9 @@ static const struct resource_caps res_cap_61 = {
static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 2,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 2,
};
@@ -403,13 +411,13 @@ static const struct dc_plane_cap plane_cap = {
},
.max_upscale_factor = {
- .argb8888 = 16000,
+ .argb8888 = 1,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
- .argb8888 = 250,
+ .argb8888 = 1,
.nv12 = 1,
.fp16 = 1
}
@@ -598,6 +606,11 @@ static struct stream_encoder *dce60_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -717,7 +730,20 @@ static struct link_encoder *dce60_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -863,61 +889,6 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce60_validate_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- bool fast_validate)
-{
- int i;
- bool at_least_one_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
- at_least_one_pipe = true;
- }
-
- if (at_least_one_pipe) {
- /* TODO implement when needed but for now hardcode max value*/
- context->bw_ctx.bw.dce.dispclk_khz = 681000;
- context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
- } else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
- context->bw_ctx.bw.dce.yclk_khz = 0;
- }
-
- return true;
-}
-
-static bool dce60_validate_surface_sets(
- struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count == 0)
- continue;
-
- if (context->stream_status[i].plane_count > 1)
- return false;
-
- if (context->stream_status[i].plane_states[0]->format
- >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return false;
- }
-
- return true;
-}
-
-static enum dc_status dce60_validate_global(
- struct dc *dc,
- struct dc_state *context)
-{
- if (!dce60_validate_surface_sets(context))
- return DC_FAIL_SURFACE_VALIDATE;
-
- return DC_OK;
-}
-
static void dce60_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -931,10 +902,10 @@ static const struct resource_funcs dce60_res_pool_funcs = {
.destroy = dce60_destroy_resource_pool,
.link_enc_create = dce60_link_encoder_create,
.panel_cntl_create = dce60_panel_cntl_create,
- .validate_bandwidth = dce60_validate_bandwidth,
+ .validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce60_validate_global,
+ .validate_global = dce100_validate_global,
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
@@ -973,21 +944,24 @@ static bool dce60_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
@@ -1365,21 +1339,24 @@ static bool dce64_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
index 5d653a76b0b0..5d653a76b0b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 3d5113f010bb..8687104cabb7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -32,6 +32,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "irq/dce80/irq_service_dce80.h"
#include "dce110/dce110_timing_generator.h"
@@ -77,6 +78,7 @@
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -240,6 +242,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(4),
link_regs(5),
link_regs(6),
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -367,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -374,6 +378,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -382,6 +387,7 @@ static const struct resource_caps res_cap = {
static const struct resource_caps res_cap_81 = {
.num_timing_generator = 4,
.num_audio = 7,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 7,
.num_pll = 3,
.num_ddc = 6,
@@ -390,6 +396,7 @@ static const struct resource_caps res_cap_81 = {
static const struct resource_caps res_cap_83 = {
.num_timing_generator = 2,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 2,
.num_ddc = 2,
@@ -417,8 +424,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dce_dmcu_registers dmcu_regs = {
@@ -604,6 +613,11 @@ static struct stream_encoder *dce80_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -723,7 +737,20 @@ static struct link_encoder *dce80_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -869,61 +896,6 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce80_validate_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- bool fast_validate)
-{
- int i;
- bool at_least_one_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
- at_least_one_pipe = true;
- }
-
- if (at_least_one_pipe) {
- /* TODO implement when needed but for now hardcode max value*/
- context->bw_ctx.bw.dce.dispclk_khz = 681000;
- context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
- } else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
- context->bw_ctx.bw.dce.yclk_khz = 0;
- }
-
- return true;
-}
-
-static bool dce80_validate_surface_sets(
- struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count == 0)
- continue;
-
- if (context->stream_status[i].plane_count > 1)
- return false;
-
- if (context->stream_status[i].plane_states[0]->format
- >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return false;
- }
-
- return true;
-}
-
-static enum dc_status dce80_validate_global(
- struct dc *dc,
- struct dc_state *context)
-{
- if (!dce80_validate_surface_sets(context))
- return DC_FAIL_SURFACE_VALIDATE;
-
- return DC_OK;
-}
-
static void dce80_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -937,10 +909,10 @@ static const struct resource_funcs dce80_res_pool_funcs = {
.destroy = dce80_destroy_resource_pool,
.link_enc_create = dce80_link_encoder_create,
.panel_cntl_create = dce80_panel_cntl_create,
- .validate_bandwidth = dce80_validate_bandwidth,
+ .validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce80_validate_global,
+ .validate_global = dce100_validate_global,
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
@@ -973,6 +945,7 @@ static bool dce80_construct(
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
@@ -1374,6 +1347,7 @@ static bool dce83_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.is_apu = true;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 770a380cc03d..f12367adf145 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -23,6 +23,7 @@
*
*/
+#include "core_status.h"
#include "dm_services.h"
#include "dc.h"
@@ -555,10 +556,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.recovery_enabled = false, /*enable this by default after testing.*/
.max_downscale_src_width = 3840,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn10_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN10_DPP(*dpp));
@@ -1125,18 +1129,18 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
-static bool dcn10_validate_bandwidth(
+static enum dc_status dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
DC_FP_START();
- voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
+ voltage_supported = dcn_validate_bandwidth(dc, context, validate_mode);
DC_FP_END();
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
@@ -1245,6 +1249,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && pool->stream_enc[i]->id ==
+ link->dpia_preferred_eng_id)
+ return pool->stream_enc[i];
}
}
@@ -1258,6 +1266,11 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
return NULL;
}
+unsigned int dcn10_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx)
+{
+ return pipe_ctx->pipe_dlg_param.vstartup_start;
+}
+
static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
@@ -1272,7 +1285,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
.patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1384,6 +1398,8 @@ static bool dcn10_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 0;
+ dc->debug = debug_defaults_drv;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
index bf8e33cd8147..7bc1be53e800 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
@@ -51,6 +51,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
const struct resource_pool *pool,
struct dc_stream_state *stream);
+unsigned int dcn10_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx);
#endif /* __DC_RESOURCE_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 5c616b1f7bf7..6679c1a14f2f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -85,7 +85,7 @@
#include "vm_helper.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -718,10 +718,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
void dcn20_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -733,7 +736,7 @@ struct dpp *dcn20_dpp_create(
uint32_t inst)
{
struct dcn20_dpp *dpp =
- kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
@@ -751,7 +754,7 @@ struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
BREAK_TO_DEBUGGER();
@@ -768,7 +771,7 @@ struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
- kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
@@ -785,7 +788,7 @@ struct dce_aux *dcn20_aux_engine_create(
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
@@ -823,7 +826,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
@@ -835,8 +838,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
}
struct mpc *dcn20_mpc_create(struct dc_context *ctx)
{
- struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
- GFP_ATOMIC);
+ struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL);
if (!mpc20)
return NULL;
@@ -853,8 +855,7 @@ struct mpc *dcn20_mpc_create(struct dc_context *ctx)
struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
{
int i;
- struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_ATOMIC);
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL);
if (!hubbub)
return NULL;
@@ -882,7 +883,7 @@ struct timing_generator *dcn20_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -962,7 +963,7 @@ static struct clock_source *dcn20_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
@@ -1061,7 +1062,7 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
- kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
@@ -1198,7 +1199,7 @@ struct hubp *dcn20_hubp_create(
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
- kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
if (!hubp2)
return NULL;
@@ -1220,7 +1221,7 @@ static void get_pixel_clock_parameters(
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
struct dc_link *link = stream->link;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dce_hwseq *hws = dc->hwseq;
@@ -1229,7 +1230,8 @@ static void get_pixel_clock_parameters(
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
pixel_clk_params->encoder_object_id = link_enc->id;
@@ -1667,6 +1669,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
return false;
@@ -2006,7 +2009,7 @@ bool dcn20_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -2020,7 +2023,7 @@ bool dcn20_fast_validate_bw(
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@@ -2123,22 +2126,22 @@ validate_out:
return out;
}
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
display_e2e_pipe_params_st *pipes;
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
@@ -2229,7 +2232,8 @@ static const struct resource_funcs dcn20_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
@@ -2284,7 +2288,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
@@ -2470,6 +2474,7 @@ static bool dcn20_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2734,6 +2739,8 @@ static bool dcn20_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2761,7 +2768,7 @@ struct resource_pool *dcn20_create_resource_pool(
struct dc *dc)
{
struct dcn20_resource_pool *pool =
- kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index 4cee3fa11a7f..e997d35a8b86 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
@@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params(
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode);
void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context);
@@ -158,7 +158,7 @@ bool dcn20_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 9f37f0097feb..055107843a70 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -614,10 +614,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_tri_buf = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn201_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN201_DPP(*dpp));
@@ -629,7 +632,7 @@ static struct dpp *dcn201_dpp_create(
uint32_t inst)
{
struct dcn201_dpp *dpp =
- kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
@@ -646,7 +649,7 @@ static struct input_pixel_processor *dcn201_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
return NULL;
@@ -662,7 +665,7 @@ static struct output_pixel_processor *dcn201_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn201_opp *opp =
- kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_opp), GFP_KERNEL);
if (!opp) {
return NULL;
@@ -677,7 +680,7 @@ static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
@@ -710,7 +713,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
@@ -723,8 +726,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
{
- struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc),
- GFP_ATOMIC);
+ struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), GFP_KERNEL);
if (!mpc201)
return NULL;
@@ -740,8 +742,7 @@ static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx)
{
- struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_ATOMIC);
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL);
if (!hubbub)
return NULL;
@@ -759,7 +760,7 @@ static struct timing_generator *dcn201_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -793,7 +794,7 @@ static struct link_encoder *dcn201_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
- kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
struct dcn10_link_encoder *enc10;
if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
@@ -821,7 +822,7 @@ static struct clock_source *dcn201_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
@@ -856,7 +857,7 @@ static struct stream_encoder *dcn201_stream_encoder_create(
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
- kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
if (!enc1)
return NULL;
@@ -883,7 +884,7 @@ static const struct dce_hwseq_mask hwseq_mask = {
static struct dce_hwseq *dcn201_hwseq_create(
struct dc_context *ctx)
{
- struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC);
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
@@ -983,7 +984,7 @@ static struct hubp *dcn201_hubp_create(
uint32_t inst)
{
struct dcn201_hubp *hubp201 =
- kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_hubp), GFP_KERNEL);
if (!hubp201)
return NULL;
@@ -1079,7 +1080,8 @@ static struct resource_funcs dcn201_res_pool_funcs = {
.populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn201_resource_construct(
@@ -1152,6 +1154,7 @@ static bool dcn201_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->debug = debug_defaults_drv;
+ dc->check_config = config_defaults;
/*a0 only, remove later*/
dc->work_arounds.no_connect_phy_config = true;
@@ -1284,6 +1287,8 @@ static bool dcn201_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
return true;
@@ -1300,7 +1305,7 @@ struct resource_pool *dcn201_create_resource_pool(
struct dc *dc)
{
struct dcn201_resource_pool *pool =
- kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 021ba8ac5c8c..2060acd5ae09 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -626,10 +626,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -769,7 +772,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -783,7 +786,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@@ -923,22 +926,22 @@ validate_out:
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
-static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
display_e2e_pipe_params_st *pipes;
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, validate_mode, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_NOT_SUPPORTED;
}
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
@@ -1378,6 +1381,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
.get_panel_config_defaults = dcn21_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn21_resource_construct(
@@ -1412,9 +1416,9 @@ static bool dcn21_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
@@ -1457,6 +1461,7 @@ static bool dcn21_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1683,6 +1688,8 @@ static bool dcn21_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
index f7ecc002c2f7..a017fd9854d1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
@@ -51,6 +51,6 @@ bool dcn21_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* _DCN21_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index bfd0eccbed28..d0ebb733e802 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -60,7 +60,7 @@
#include "dml/display_mode_vba.h"
#include "dcn30/dcn30_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -727,10 +727,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1319,13 +1322,13 @@ static struct clock_source *dcn30_clock_source_create(
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
DC_FP_START();
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1627,7 +1630,7 @@ noinline bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate,
+ enum dc_validate_mode validate_mode,
bool allow_self_refresh_only)
{
bool out = false;
@@ -1646,7 +1649,7 @@ noinline bool dcn30_internal_validate_bw(
context->bw_ctx.dml.vba.VoltageLevel = 0;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -1655,7 +1658,7 @@ noinline bool dcn30_internal_validate_bw(
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (!fast_validate || !allow_self_refresh_only) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING || !allow_self_refresh_only) {
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
@@ -1669,7 +1672,7 @@ noinline bool dcn30_internal_validate_bw(
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
if (allow_self_refresh_only &&
- (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
/*
* If mode is unsupported or there's still no p-state support
@@ -1678,7 +1681,7 @@ noinline bool dcn30_internal_validate_bw(
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
@@ -1865,7 +1868,7 @@ noinline bool dcn30_internal_validate_bw(
}
if (repopulate_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
@@ -1891,8 +1894,6 @@ static int get_refresh_rate(struct dc_state *context)
/* check if refresh rate at least 120hz */
timing = &context->streams[0]->timing;
- if (timing == NULL)
- return 0;
h_v_total = timing->h_total * timing->v_total;
if (h_v_total == 0)
@@ -2037,9 +2038,9 @@ void dcn30_calculate_wm_and_dlg(
DC_FP_END();
}
-bool dcn30_validate_bandwidth(struct dc *dc,
+enum dc_status dcn30_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -2047,7 +2048,8 @@ bool dcn30_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
@@ -2056,7 +2058,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
goto validate_fail;
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true);
DC_FP_END();
if (pipe_cnt == 0)
@@ -2067,7 +2069,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -2093,7 +2095,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -2193,7 +2195,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
@@ -2250,6 +2252,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
.update_bw_bounding_box = dcn30_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn30_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
#define CTX ctx
@@ -2374,6 +2377,7 @@ static bool dcn30_resource_construct(
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2586,6 +2590,8 @@ static bool dcn30_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 8e6b8b7368fd..2c967fe55712 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
@@ -56,15 +56,15 @@ unsigned int dcn30_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
-bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate);
+enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode);
bool dcn30_internal_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate,
+ enum dc_validate_mode validate_mode,
bool allow_self_refresh_only);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
@@ -78,7 +78,7 @@ void dcn30_populate_dml_writeback_from_context(
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dcn30_acquire_post_bldn_3dlut(
struct resource_context *res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index a9816affd312..3ad6a3d4858e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -671,9 +671,9 @@ static const struct dc_plane_cap plane_cap = {
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 167,
- .nv12 = 167,
- .fp16 = 167
+ .argb8888 = 358,
+ .nv12 = 358,
+ .fp16 = 358
},
64,
64
@@ -693,7 +693,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 7680,/*upto 8K*/
+ .max_downscale_src_width = 4096,/*upto true 4k*/
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
@@ -701,10 +701,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = false,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn301_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1400,7 +1403,8 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn301_update_bw_bounding_box,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn301_resource_construct(
@@ -1497,6 +1501,7 @@ static bool dcn301_resource_construct(
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1705,6 +1710,8 @@ static bool dcn301_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 7baefc910a3d..c0d4a1dc94f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -47,7 +47,8 @@
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
+
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
#include "dce/dce_aux.h"
@@ -97,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1151,6 +1155,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.update_bw_bounding_box = dcn302_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn302_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct dc_cap_funcs cap_funcs = {
@@ -1288,6 +1293,7 @@ static bool dcn302_resource_construct(
&is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1480,6 +1486,8 @@ static bool dcn302_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 8a57d46ad15f..75e09c2c283e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -47,7 +47,7 @@
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
@@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1096,6 +1099,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
.update_bw_bounding_box = dcn303_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn303_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct dc_cap_funcs cap_funcs = {
@@ -1233,6 +1237,7 @@ static bool dcn303_resource_construct(
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1413,6 +1418,8 @@ static bool dcn303_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 54ec3d8e920c..0d667b54ccf8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -888,12 +888,15 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1616,14 +1619,14 @@ static bool is_dual_plane(enum surface_pixel_format format)
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.gpuvm = 1;
@@ -1641,7 +1644,7 @@ int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1649,7 +1652,7 @@ int dcn31_populate_dml_pipes_from_context(
bool upscaled = false;
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1758,9 +1761,9 @@ dcn31_set_mcif_arb_params(struct dc *dc,
DC_FP_END();
}
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1768,7 +1771,8 @@ bool dcn31_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
@@ -1777,19 +1781,19 @@ bool dcn31_validate_bandwidth(struct dc *dc,
goto validate_fail;
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
- fast_validate = false;
+ validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1812,7 +1816,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
@@ -1849,6 +1853,9 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static struct clock_source *dcn30_clock_source_create(
@@ -1952,6 +1959,9 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.disable_hbr_audio_dp2 = true;
@@ -1971,6 +1981,7 @@ static bool dcn31_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2197,6 +2208,8 @@ static bool dcn31_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
@@ -2226,3 +2239,35 @@ struct resource_pool *dcn31_create_resource_pool(
kfree(pool);
return NULL;
}
+
+enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output)
+{
+ struct dc_state *state = link->dc->current_state;
+ int i;
+
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ link->dc->hwss.calculate_pix_rate_divider((struct dc *)link->dc, state, state->streams[i]);
+
+ for (i = 0; i < pipe_count; i++) {
+ link->dc->res_pool->funcs->build_pipe_pix_clk_params(&pipes[i]);
+
+ // Setup audio
+ if (pipes[i].stream_res.audio != NULL)
+ build_audio_output(state, &pipes[i], &audio_output[i]);
+ }
+#else
+ /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching,
+ * but the above will not compile on architectures without an FPU.
+ */
+ DC_LOG_WARNING("%s: DP1<-->DP2 link retraining will not work on this DCN on non-FPU platforms", __func__);
+ ASSERT(0);
+#endif
+
+ return DC_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 551ad912f7be..c32c85ef0ba4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -37,9 +37,9 @@ struct dcn31_resource_pool {
struct resource_pool base;
};
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -48,7 +48,7 @@ void dcn31_calculate_wm_and_dlg(
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void
dcn31_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
@@ -66,6 +66,12 @@ struct resource_pool *dcn31_create_resource_pool(
unsigned int dcn31_get_det_buffer_size(
const struct dc_state *context);
+enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
+
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 2794473f2aff..3ccde75a4ecb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -924,8 +924,13 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
+ .disable_dsc_power_gate = true,
+ .min_disp_clk_khz = 100000,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1667,12 +1672,12 @@ static struct clock_source *dcn31_clock_source_create(
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt;
DC_FP_START();
- pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
DC_FP_END();
return pipe_cnt;
@@ -1694,9 +1699,9 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1704,7 +1709,8 @@ bool dcn314_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
@@ -1714,19 +1720,19 @@ bool dcn314_validate_bandwidth(struct dc *dc,
DC_FP_START();
// do not support self refresh only
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, false);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
- fast_validate = false;
+ validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1749,7 +1755,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -1778,6 +1784,9 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static struct clock_source *dcn30_clock_source_create(
@@ -1883,6 +1892,9 @@ static bool dcn314_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -1901,6 +1913,7 @@ static bool dcn314_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2112,6 +2125,8 @@ static bool dcn314_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index 49ffe71018df..ac9bb7f097d5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
@@ -39,9 +39,9 @@ struct dcn314_resource_pool {
struct resource_pool base;
};
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
struct resource_pool *dcn314_create_resource_pool(
const struct dc_init_data *init_data,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 4ee33eb3381d..4e962f522f1b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -887,9 +887,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .enable_legacy_fast_update = true,
.psr_power_use_phy_fsm = 0,
.using_dml2 = false,
+ .min_disp_clk_khz = 100000,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1664,7 +1668,7 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1674,7 +1678,7 @@ static int dcn315_populate_dml_pipes_from_context(
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1698,7 +1702,7 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ if (pixel_rate_crb) {
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
/* Ceil to crb segment size */
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
@@ -1755,28 +1759,26 @@ static int dcn315_populate_dml_pipes_from_context(
continue;
}
- if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
- bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
- || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
-
- if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
- pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
- (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
- if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
- /* Clamp to 2 pipe split max det segments */
- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
- pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
- }
- if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
- /* If we are splitting we must have an even number of segments */
- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
- pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
- }
- /* Convert segments into size for DML use */
- pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
-
- crb_idx++;
+ bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
+ || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+
+ if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
+ pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
+ (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
+ if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
+ /* Clamp to 2 pipe split max det segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
+ pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
+ }
+ if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
+ /* If we are splitting we must have an even number of segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
+ /* Convert segments into size for DML use */
+ pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
+
+ crb_idx++;
pipe_cnt++;
}
}
@@ -1846,6 +1848,9 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
.get_power_profile = dcn315_get_power_profile,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn315_resource_construct(
@@ -1938,6 +1943,7 @@ static bool dcn315_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2141,6 +2147,8 @@ static bool dcn315_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 79eddbafe3c2..5a95dd54cb42 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -882,10 +882,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1610,7 +1613,7 @@ static bool is_dual_plane(enum surface_pixel_format format)
static int dcn316_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1618,7 +1621,7 @@ static int dcn316_populate_dml_pipes_from_context(
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1720,6 +1723,9 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn316_resource_construct(
@@ -1812,6 +1818,7 @@ static bool dcn316_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2007,6 +2014,8 @@ static bool dcn316_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 12d247a7ec45..b276fec3e479 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -24,6 +24,7 @@
*
*/
+#include "dc_types.h"
#include "dm_services.h"
#include "dc.h"
@@ -68,7 +69,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -91,7 +92,7 @@
#include "dc_state_priv.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -737,6 +738,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dp_plus_plus_wa = true,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
+ .disable_stutter_for_wm_program = true
+};
+
+static const struct dc_check_config config_defaults = {
.enable_legacy_fast_update = false,
};
@@ -1741,7 +1746,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
}
}
-static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate)
+static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1749,7 +1754,8 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
/* To handle Freesync properly, setting FreeSync DML parameters
* to its default state for the first stage of validation
@@ -1765,7 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
goto validate_fail;
DC_FP_START();
- out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode);
DC_FP_END();
if (pipe_cnt == 0)
@@ -1776,7 +1782,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1805,25 +1811,62 @@ validate_out:
return out;
}
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- out = dml1_validate(dc, context, fast_validate);
- return out;
+ status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ }
+ }
+
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ else
+ status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1839,7 +1882,7 @@ int dcn32_populate_dml_pipes_from_context(
int num_subvp_none = 0;
int odm_slice_count;
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
/* For single display subvp, look for subvp main so if we have phantom
* pipe, we can set odm policy to match main pipe
@@ -1921,7 +1964,7 @@ int dcn32_populate_dml_pipes_from_context(
/* Only populate DML input with subvp info for full updates.
* This is just a workaround -- needs a proper fix.
*/
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
switch (dc_state_get_pipe_subvp_type(context, pipe)) {
case SUBVP_MAIN:
pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport;
@@ -2022,25 +2065,31 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ bool limit_cur_to_buf;
+
+ limit_cur_to_buf = dc_state_get_stream_subvp_cursor_limit(stream, state) &&
+ !stream->hw_cursor_req;
+
+ return limit_cur_to_buf ? dc->caps.max_buffered_cursor_size : dc->caps.max_cursor_size;
+}
+
static struct resource_funcs dcn32_res_pool_funcs = {
.destroy = dcn32_destroy_resource_pool,
.link_enc_create = dcn32_link_encoder_create,
@@ -2066,6 +2115,8 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2112,8 +2163,6 @@ static bool dcn32_resource_construct(
#define REG_STRUCT dccg_regs
dccg_regs_init();
- DC_FP_START();
-
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn32;
@@ -2151,6 +2200,8 @@ static bool dcn32_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ /* floor(sqrt(buf_size_bytes / bpp ) * bpp, fixed_req_size) / bpp = max_width */
+ dc->caps.max_buffered_cursor_size = 64; // floor(sqrt(16 * 1024 / 4) * 4, 256) / 4 = 64
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
@@ -2205,7 +2256,7 @@ static bool dcn32_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -2224,6 +2275,7 @@ static bool dcn32_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -2246,6 +2298,7 @@ static bool dcn32_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2453,6 +2506,8 @@ static bool dcn32_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2467,7 +2522,6 @@ static bool dcn32_resource_construct(
}
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
@@ -2499,14 +2553,14 @@ static bool dcn32_resource_construct(
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;
- DC_FP_END();
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
return true;
create_fail:
- DC_FP_END();
-
dcn32_resource_destruct(pool);
return false;
@@ -2804,7 +2858,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head(
free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx];
free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx];
free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst;
- free_pipe->hblank_borrow = otg_master->hblank_borrow;
+ free_pipe->dsc_padding_params = otg_master->dsc_padding_params;
if (free_pipe->stream->timing.flags.DSC == 1) {
dcn20_acquire_dsc(free_pipe->stream->ctx->dc,
&new_ctx->res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 86c6e5e8c42e..99f0432288b4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -98,14 +98,14 @@ void dcn32_add_phantom_pipes(struct dc *dc,
unsigned int pipe_cnt,
unsigned int index);
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
@@ -188,6 +188,10 @@ void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes);
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
@@ -1055,7 +1059,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
- SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst)
+ SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst), \
+ SRI_ARR(INTERRUPT_DEST, OTG, inst)
/* HUBP */
@@ -1136,7 +1141,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
- SRI_ARR(HUBP_CLK_CNTL, HUBP, id)
+ SRI_ARR(HUBP_CLK_CNTL, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
@@ -1224,7 +1230,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SR(DCHUBBUB_ARB_MALL_CNTL), \
SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \
- SR(SDPIF_REQUEST_RATE_LIMIT)
+ SR(SDPIF_REQUEST_RATE_LIMIT), \
+ SR(DCHUBBUB_SDPIF_CFG0)
/* DCCG */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 06b9479c8bd3..3466ca34c93f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -72,7 +72,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -731,11 +731,14 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_subvp_high_refresh = false,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
- .enable_legacy_fast_update = false,
.disable_dc_mode_overwrite = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn321_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1580,21 +1583,15 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -1624,6 +1621,8 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1708,6 +1707,7 @@ static bool dcn321_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
@@ -1758,8 +1758,8 @@ static bool dcn321_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
- dc->caps.color.dpp.ogam_ram = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 0;
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
@@ -1777,6 +1777,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -1799,6 +1800,7 @@ static bool dcn321_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2001,6 +2003,8 @@ static bool dcn321_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2015,7 +2019,6 @@ static bool dcn321_resource_construct(
}
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
@@ -2043,6 +2046,10 @@ static bool dcn321_resource_construct(
dc->dml2_options.max_segments_per_hubp = 18;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 89e2adcf2a28..ef69898d2cc5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -33,7 +33,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn35_resource.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
@@ -61,7 +61,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -767,7 +767,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -788,6 +787,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1732,26 +1735,47 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
- return out;
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
+enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ plane_state->tiling_info.gfxversion = DcGfxVersion9;
+ dcn20_patch_unknown_plane_state(plane_state);
+ return DC_OK;
+}
+
+
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+}
static struct resource_funcs dcn35_res_pool_funcs = {
.destroy = dcn35_destroy_resource_pool,
@@ -1763,7 +1787,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.validate_bandwidth = dcn35_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1775,10 +1799,13 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn35_resource_construct(
@@ -1831,9 +1858,9 @@ static bool dcn35_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 2;
- dc->caps.max_slave_yuv_planes = 2;
- dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->config.forceHBR2CP2520)
@@ -1866,7 +1893,7 @@ static bool dcn35_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1885,6 +1912,10 @@ static bool dcn35_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
@@ -1895,12 +1926,13 @@ static bool dcn35_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+
dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
@@ -1917,6 +1949,7 @@ static bool dcn35_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2139,12 +2172,13 @@ static bool dcn35_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index f97bb4cb3761..9c56ae76e0c7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -35,6 +35,7 @@
extern struct _vcs_dpi_ip_params_st dcn3_5_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc;
+enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state);
struct dcn35_resource_pool {
struct resource_pool base;
@@ -304,7 +305,8 @@ struct resource_pool *dcn35_create_resource_pool(
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst),\
SRI_ARR(OTG_DRR_CONTROL, OTG, inst),\
- SRI2_ARR(OPTC_CLOCK_CONTROL, OPTC, inst)
+ SRI2_ARR(OPTC_CLOCK_CONTROL, OPTC, inst),\
+ SRI_ARR(INTERRUPT_DEST, OTG, inst)
/* DPP */
#define DPP_REG_LIST_DCN35_RI(id)\
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 263a37c1cd3a..f3c614c4490c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -40,7 +40,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -83,7 +83,7 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -747,7 +747,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -768,6 +767,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1712,24 +1715,39 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn351_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
- return out;
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+}
+
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn351_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+
}
static struct resource_funcs dcn351_res_pool_funcs = {
@@ -1742,7 +1760,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.validate_bandwidth = dcn351_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1754,10 +1772,13 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn351_resource_construct(
@@ -1810,9 +1831,9 @@ static bool dcn351_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 2;
- dc->caps.max_slave_yuv_planes = 2;
- dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->config.forceHBR2CP2520)
@@ -1845,7 +1866,7 @@ static bool dcn351_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1864,6 +1885,10 @@ static bool dcn351_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
@@ -1876,6 +1901,7 @@ static bool dcn351_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+
/* Use psp mailbox to enable assr */
dc->config.use_assr_psp_message = true;
@@ -1894,6 +1920,7 @@ static bool dcn351_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2117,13 +2144,14 @@ static bool dcn351_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
new file mode 100644
index 000000000000..6469d5fe2e6d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -0,0 +1,2196 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn31/dcn31_init.h"
+#include "dcn35/dcn35_init.h"
+#include "dcn36/dcn36_resource.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn36_resource.h"
+#include "dml2_0/dml2_wrapper.h"
+
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn35/dcn35_resource.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn30/dcn30_hubbub.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn35/dcn35_hubbub.h"
+#include "dcn32/dcn32_mpc.h"
+#include "dcn35/dcn35_hubp.h"
+#include "irq/dcn36/irq_service_dcn36.h"
+#include "dcn35/dcn35_dpp.h"
+#include "dcn35/dcn35_optc.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dce110/dce110_hwseq.h"
+#include "dcn35/dcn35_opp.h"
+#include "dcn35/dcn35_dsc.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn30/dcn30_afmt.h"
+#include "dcn31/dcn31_dio_link_encoder.h"
+#include "dcn35/dcn35_dio_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_link_encoder.h"
+#include "dcn32/dcn32_hpo_dp_link_encoder.h"
+#include "link_service.h"
+#include "dcn31/dcn31_apg.h"
+#include "dcn32/dcn32_dio_link_encoder.h"
+#include "dcn31/dcn31_vpg.h"
+#include "dcn31/dcn31_afmt.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "clk_mgr.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dml/display_mode_vba.h"
+#include "dcn35/dcn35_dccg.h"
+#include "dcn35/dcn35_pg_cntl.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn31/dcn31_panel_cntl.h"
+#include "dcn35/dcn35_hwseq.h"
+#include "dcn35/dcn35_dio_link_encoder.h"
+#include "dml/dcn31/dcn31_fpu.h" /*todo*/
+#include "dml/dcn35/dcn35_fpu.h"
+#include "dcn35/dcn35_dwb.h"
+#include "dcn35/dcn35_mmhubbub.h"
+
+#include "dcn/dcn_3_6_0_offset.h"
+#include "dcn/dcn_3_6_0_sh_mask.h"
+
+#define regBIF_BX2_BIOS_SCRATCH_2 0x2ffc004e
+#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 5
+
+#define regBIF_BX2_BIOS_SCRATCH_3 0x2ffc004f
+#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 5
+
+#define regBIF_BX2_BIOS_SCRATCH_6 0x2ffc0052
+#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 5
+
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
+#include "reg_helper.h"
+#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "dml/dcn31/display_mode_vba_31.h" /*temp*/
+#include "vm_helper.h"
+#include "dcn20/dcn20_vmid.h"
+
+#include "dc_state_priv.h"
+
+#include "link_enc_cfg.h"
+#define DC_LOGGER_INIT(logger)
+
+enum dcn36_clk_src_array_id {
+ DCN36_CLK_SRC_PLL0,
+ DCN36_CLK_SRC_PLL1,
+ DCN36_CLK_SRC_PLL2,
+ DCN36_CLK_SRC_PLL3,
+ DCN36_CLK_SRC_PLL4,
+ DCN36_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file
+ */
+
+/* DCN */
+/* TODO awful hack. fixup dcn20_dwb.h */
+#undef BASE_INNER
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SR_ARR(reg_name, id) \
+ REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SR_ARR_INIT(reg_name, id, value) \
+ REG_STRUCT[id].reg_name = value
+
+#define SRI(reg_name, block, id)\
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI2(reg_name, block, id)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_MPC_RMU(reg_name, block, id)\
+ .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_DWB(reg_name, temp_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
+
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCCG_SRII(reg_name, block, id)\
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX2_ ## reg_name
+
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX2_ ## reg_name
+
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
+
+static struct bios_registers bios_regs;
+
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK)
+};
+
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
+
+static struct dce_abm_registers abm_regs[4];
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
+
+static struct dce_audio_registers audio_regs[7];
+
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define vpg_regs_init(id)\
+ VPG_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_vpg_registers vpg_regs[10];
+
+static const struct dcn31_vpg_shift vpg_shift = {
+ DCN31_VPG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_vpg_mask vpg_mask = {
+ DCN31_VPG_MASK_SH_LIST(_MASK)
+};
+
+#define afmt_regs_init(id)\
+ AFMT_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_afmt_registers afmt_regs[6];
+
+static const struct dcn31_afmt_shift afmt_shift = {
+ DCN31_AFMT_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_afmt_mask afmt_mask = {
+ DCN31_AFMT_MASK_SH_LIST(_MASK)
+};
+
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_apg_registers apg_regs[4];
+
+static const struct dcn31_apg_shift apg_shift = {
+ DCN31_APG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_apg_mask apg_mask = {
+ DCN31_APG_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs_init(id)\
+ SE_DCN35_REG_LIST_RI(id)
+
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
+
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
+
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
+
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
+
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN35_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
+
+static struct dcn10_link_enc_registers link_enc_regs[5];
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN35(__SHIFT), \
+ //DPCS_DCN31_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN35(_MASK), \
+ //DPCS_DCN31_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
+
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
+
+static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
+
+static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
+ DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
+ DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN35_RI(id)
+
+static struct dcn3_dpp_registers dpp_regs[4];
+
+static const struct dcn35_dpp_shift tf_shift = {
+ DPP_REG_LIST_SH_MASK_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dpp_mask tf_mask = {
+ DPP_REG_LIST_SH_MASK_DCN35(_MASK)
+};
+
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN35_RI(id)
+
+static struct dcn35_opp_registers opp_regs[4];
+
+static const struct dcn35_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define aux_engine_regs_init(id)\
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), \
+ SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK) \
+ )
+
+static struct dce110_aux_registers aux_engine_regs[5];
+
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dcn30_dwbc_registers dwbc35_regs[1];
+
+static const struct dcn35_dwbc_shift dwbc35_shift = {
+ DWBC_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dwbc_mask dwbc35_mask = {
+ DWBC_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(id)
+
+static struct dcn35_mmhubbub_registers mcif_wb35_regs[1];
+
+static const struct dcn35_mmhubbub_shift mcif_wb35_shift = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT)
+};
+
+static const struct dcn35_mmhubbub_mask mcif_wb35_mask = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(_MASK)
+};
+
+#define dsc_regsDCN35_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
+
+static struct dcn20_dsc_registers dsc_regs[4];
+
+static const struct dcn35_dsc_shift dsc_shift = {
+ DSC_REG_LIST_SH_MASK_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dsc_mask dsc_mask = {
+ DSC_REG_LIST_SH_MASK_DCN35(_MASK)
+};
+
+static struct dcn30_mpc_registers mpc_regs;
+
+#define dcn_mpc_regs_init() \
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
+
+static const struct dcn30_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
+
+static const struct dcn30_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
+
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_5_RI(id)
+
+static struct dcn_optc_registers optc_regs[4];
+
+static const struct dcn_optc_shift optc_shift = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT)
+};
+
+static const struct dcn_optc_mask optc_mask = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK)
+};
+
+#define hubp_regs_init(id)\
+ HUBP_REG_LIST_DCN30_RI(id)
+
+static struct dcn_hubp2_registers hubp_regs[4];
+
+
+static const struct dcn35_hubp2_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_hubp2_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct dcn_hubbub_registers hubbub_reg;
+
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN35(0)
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN35()
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct pg_cntl_registers pg_cntl_regs;
+
+#define pg_cntl_dcn35_regs_init() \
+ PG_CNTL_REG_LIST_DCN35()
+
+static const struct pg_cntl_shift pg_cntl_shift = {
+ PG_CNTL_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct pg_cntl_mask pg_cntl_mask = {
+ PG_CNTL_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define SRII2(reg_name_pre, reg_name_post, id)\
+ .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \
+ ## id ## _ ## reg_name_post ## _BASE_IDX) + \
+ reg ## reg_name_pre ## id ## _ ## reg_name_post
+
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN36_REG_LIST()
+
+#define HWSEQ_DCN36_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
+ HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_R_DMU_GATE_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_G_RBBMIF_GATE_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, RBBMIF_FGCG_REP_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPREFCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPPCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DTBCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DCFCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPIACLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh)
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN36_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN36_MASK_SH_LIST(_MASK)
+};
+
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
+
+static struct dcn_vmid_registers vmid_regs[16];
+
+static const struct dcn20_vmid_shift vmid_shifts = {
+ DCN20_VMID_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn20_vmid_mask vmid_masks = {
+ DCN20_VMID_MASK_SH_LIST(_MASK)
+};
+
+static const struct resource_caps res_cap_dcn36 = {
+ .num_timing_generator = 4,
+ .num_opp = 4,
+ .num_video_plane = 4,
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_dig_link_enc = 5,
+ .num_hpo_dp_stream_encoder = 4,
+ .num_hpo_dp_link_encoder = 2,
+ .num_pll = 4,/*1 c10 edp, 3xc20 combo PHY*/
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 16,
+ .num_mpc_3dlut = 2,
+ .num_dsc = 4,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true,
+ .p010 = true,
+ .ayuv = false,
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 16000
+ },
+
+ // 6:1 downscaling ratio: 1000/6 = 166.666
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 167,
+ .fp16 = 167
+ },
+ 64,
+ 64
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = false,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .disable_dpp_power_gate = true,
+ .disable_hubp_power_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
+ .disable_clock_gate = false,
+ .disable_dsc_power_gate = true,
+ .vsr_support = true,
+ .performance_trace = false,
+ .max_downscale_src_width = 4096,/*upto true 4k*/
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .dwb_fi_phase = -1, // -1 = disable,
+ .dmub_command_table = true,
+ .pstate_enabled = true,
+ .use_max_lb = true,
+ .enable_mem_low_power = {
+ .bits = {
+ .vga = false,
+ .i2c = true,
+ .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
+ .dscl = true,
+ .cm = true,
+ .mpc = true,
+ .optc = true,
+ .vpg = true,
+ .afmt = true,
+ }
+ },
+ .root_clock_optimization = {
+ .bits = {
+ .dpp = true,
+ .dsc = true,/*dscclk and dsc pg*/
+ .hdmistream = true,
+ .hdmichar = true,
+ .dpstream = true,
+ .symclk32_se = true,
+ .symclk32_le = true,
+ .symclk_fe = true,
+ .physymclk = false,
+ .dpiasymclk = true,
+ }
+ },
+ .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
+ .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 1, /* Always allow when other conditions are met */
+ .using_dml2 = true,
+ .support_eDP1_5 = true,
+ .enable_hpo_pg_support = false,
+ .enable_single_display_2to1_odm_policy = true,
+ .disable_idle_power_optimizations = false,
+ .dmcub_emulation = false,
+ .disable_boot_optimizations = false,
+ .disable_unbounded_requesting = false,
+ .disable_mem_low_power = false,
+ //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions
+ .enable_double_buffered_dsc_pg_support = true,
+ .enable_dp_dig_pixel_rate_div_policy = 1,
+ .disable_z10 = false,
+ .ignore_pg = true,
+ .psp_disabled_wa = true,
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = false,
+ .static_screen_wait_frames = 2,
+ .disable_timeout = true,
+ .min_disp_clk_khz = 50000,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
+static const struct dc_panel_config panel_config_defaults = {
+ .psr = {
+ .disable_psr = false,
+ .disallow_psrsu = false,
+ .disallow_replay = false,
+ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
+static void dcn35_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN20_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn35_dpp_create(struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
+ bool success = (dpp != NULL);
+
+ if (!success)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
+ success = dpp35_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift,
+ &tf_mask);
+ if (success) {
+ dpp35_set_fgcg(
+ dpp,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.dpp);
+ return &dpp->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ kfree(dpp);
+ return NULL;
+}
+
+static struct output_pixel_processor *dcn35_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_opp *opp =
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
+ dcn35_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+
+ dcn35_opp_set_fgcg(opp, ctx->dc->debug.enable_fine_grain_clock_gating.bits.opp);
+
+ return &opp->base;
+}
+
+static struct dce_aux *dcn31_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[5];
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+/* ========================================================== */
+
+/*
+ * DPIA index | Preferred Encoder | Host Router
+ * 0 | C | 0
+ * 1 | First Available | 0
+ * 2 | D | 1
+ * 3 | First Available | 1
+ */
+/* ========================================================== */
+static const enum engine_id dpia_to_preferred_enc_id_table[] = {
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGD
+};
+
+static enum engine_id dcn36_get_preferred_eng_id_dpia(unsigned int dpia_index)
+{
+ return dpia_to_preferred_enc_id_table[dpia_index];
+}
+
+static struct dce_i2c_hw *dcn31_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+static struct mpc *dcn35_mpc_create(
+ struct dc_context *ctx,
+ int num_mpcc,
+ int num_rmu)
+{
+ struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL);
+
+ if (!mpc30)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
+ dcn32_mpc_construct(mpc30, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ num_mpcc,
+ num_rmu);
+
+ return &mpc30->base;
+}
+
+static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx)
+{
+ int i;
+
+ struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_KERNEL);
+
+ if (!hubbub3)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
+ hubbub35_construct(hubbub3, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask,
+ 384,/*ctx->dc->dml.ip.det_buffer_size_kbytes,*/
+ 8, /*ctx->dc->dml.ip.pixel_chunk_size_kbytes,*/
+ 1792 /*ctx->dc->dml.ip.config_return_buffer_size_in_kbytes*/);
+
+
+ for (i = 0; i < res_cap_dcn36.num_vmid; i++) {
+ struct dcn20_vmid *vmid = &hubbub3->vmid[i];
+
+ vmid->ctx = ctx;
+
+ vmid->regs = &vmid_regs[i];
+ vmid->shifts = &vmid_shifts;
+ vmid->masks = &vmid_masks;
+ }
+
+ return &hubbub3->base;
+}
+
+static struct timing_generator *dcn35_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &optc_regs[instance];
+ tgn10->tg_shift = &optc_shift;
+ tgn10->tg_mask = &optc_mask;
+
+ dcn35_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+static struct link_encoder *dcn35_link_encoder_create(
+ struct dc_context *ctx,
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
+ dcn35_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc20->enc10.base;
+}
+
+/* Create a minimal link encoder object not associated with a particular
+ * physical connector.
+ * resource_funcs.link_enc_create_minimal
+ */
+static struct link_encoder *dcn31_link_enc_create_minimal(
+ struct dc_context *ctx, enum engine_id eng_id)
+{
+ struct dcn20_link_encoder *enc20;
+
+ if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc)
+ return NULL;
+
+ enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ if (!enc20)
+ return NULL;
+
+ dcn31_link_encoder_construct_minimal(
+ enc20,
+ ctx,
+ &link_enc_feature,
+ &link_enc_regs[eng_id - ENGINE_ID_DIGA],
+ eng_id);
+
+ return &enc20->enc10.base;
+}
+
+static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dcn31_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dcn31_panel_cntl_construct(panel_cntl, init_data);
+
+ return &panel_cntl->base;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+
+}
+
+static struct audio *dcn31_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+ audio_regs_init(5);
+ audio_regs_init(6);
+
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct vpg *dcn31_vpg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
+
+ if (!vpg31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
+ vpg31_construct(vpg31, ctx, inst,
+ &vpg_regs[inst],
+ &vpg_shift,
+ &vpg_mask);
+
+ return &vpg31->base;
+}
+
+static struct afmt *dcn31_afmt_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
+
+ if (!afmt31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
+ afmt31_construct(afmt31, ctx, inst,
+ &afmt_regs[inst],
+ &afmt_shift,
+ &afmt_mask);
+
+ // Light sleep by default, no need to power down here
+
+ return &afmt31->base;
+}
+
+static struct apg *dcn31_apg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
+
+ if (!apg31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
+ apg31_construct(apg31, ctx, inst,
+ &apg_regs[inst],
+ &apg_shift,
+ &apg_mask);
+
+ return &apg31->base;
+}
+
+static struct stream_encoder *dcn35_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1;
+ struct vpg *vpg;
+ struct afmt *afmt;
+ int vpg_inst;
+ int afmt_inst;
+
+ /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
+ if (eng_id <= ENGINE_ID_DIGF) {
+ vpg_inst = eng_id;
+ afmt_inst = eng_id;
+ } else
+ return NULL;
+
+ enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ afmt = dcn31_afmt_create(ctx, afmt_inst);
+
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
+ dcn35_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
+ eng_id, vpg, afmt,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
+ struct vpg *vpg;
+ struct apg *apg;
+ uint32_t hpo_dp_inst;
+ uint32_t vpg_inst;
+ uint32_t apg_inst;
+
+ ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
+ hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
+
+ /* Mapping of VPG register blocks to HPO DP block instance:
+ * VPG[6] -> HPO_DP[0]
+ * VPG[7] -> HPO_DP[1]
+ * VPG[8] -> HPO_DP[2]
+ * VPG[9] -> HPO_DP[3]
+ */
+ vpg_inst = hpo_dp_inst + 6;
+
+ /* Mapping of APG register blocks to HPO DP block instance:
+ * APG[0] -> HPO_DP[0]
+ * APG[1] -> HPO_DP[1]
+ * APG[2] -> HPO_DP[2]
+ * APG[3] -> HPO_DP[3]
+ */
+ apg_inst = hpo_dp_inst;
+
+ /* allocate HPO stream encoder and create VPG sub-block */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ apg = dcn31_apg_create(ctx, apg_inst);
+
+ if (!hpo_dp_enc31 || !vpg || !apg) {
+ kfree(hpo_dp_enc31);
+ kfree(vpg);
+ kfree(apg);
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
+ dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
+ hpo_dp_inst, eng_id, vpg, apg,
+ &hpo_dp_stream_enc_regs[hpo_dp_inst],
+ &hpo_dp_se_shift, &hpo_dp_se_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ uint8_t inst,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
+
+ /* allocate HPO link encoder */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
+
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
+ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ &hpo_dp_link_enc_regs[inst],
+ &hpo_dp_le_shift, &hpo_dp_le_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct dce_hwseq *dcn36_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn31_create_audio,
+ .create_stream_encoder = dcn35_stream_encoder_create,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
+ .create_hwseq = dcn36_hwseq_create,
+};
+
+static void dcn36_resource_destruct(struct dcn36_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ if (pool->base.stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
+ pool->base.stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.stream_enc[i]->afmt != NULL) {
+ kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
+ pool->base.stream_enc[i]->afmt = NULL;
+ }
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
+ if (pool->base.hpo_dp_stream_enc[i] != NULL) {
+ if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
+ pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
+ kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
+ pool->base.hpo_dp_stream_enc[i]->apg = NULL;
+ }
+ kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
+ pool->base.hpo_dp_stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
+ if (pool->base.hpo_dp_link_enc[i] != NULL) {
+ kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
+ pool->base.hpo_dp_link_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ if (pool->base.dscs[i] != NULL)
+ dcn20_dsc_destroy(&pool->base.dscs[i]);
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN20_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn35_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
+ if (pool->base.dwbc[i] != NULL) {
+ kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
+ pool->base.dwbc[i] = NULL;
+ }
+ if (pool->base.mcif_wb[i] != NULL) {
+ kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
+ pool->base.mcif_wb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
+ if (pool->base.mpc_lut[i] != NULL) {
+ dc_3dlut_func_release(pool->base.mpc_lut[i]);
+ pool->base.mpc_lut[i] = NULL;
+ }
+ if (pool->base.mpc_shaper[i] != NULL) {
+ dc_transfer_func_release(pool->base.mpc_shaper[i]);
+ pool->base.mpc_shaper[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn20_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.multiple_abms[i] != NULL)
+ dce_abm_destroy(&pool->base.multiple_abms[i]);
+ }
+
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
+ if (pool->base.pg_cntl != NULL)
+ dcn_pg_cntl_destroy(&pool->base.pg_cntl);
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+}
+
+static struct hubp *dcn35_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_hubp *hubp2 =
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+
+ if (!hubp2)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
+ if (hubp35_construct(hubp2, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp2->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(hubp2);
+ return NULL;
+}
+
+static void dcn35_dwbc_init(struct dcn30_dwbc *dwbc30, struct dc_context *ctx)
+{
+ dcn35_dwbc_set_fgcg(
+ dwbc30, ctx->dc->debug.enable_fine_grain_clock_gating.bits.dwb);
+}
+
+static bool dcn35_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
+ GFP_KERNEL);
+
+ if (!dwbc30) {
+ dm_error("DC: failed to create dwbc30!\n");
+ return false;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT dwbc35_regs
+ dwbc_regs_dcn3_init(0);
+
+ dcn35_dwbc_construct(dwbc30, ctx,
+ &dwbc35_regs[i],
+ &dwbc35_shift,
+ &dwbc35_mask,
+ i);
+
+ pool->dwbc[i] = &dwbc30->base;
+
+ dcn35_dwbc_init(dwbc30, ctx);
+ }
+ return true;
+}
+
+static void dcn35_mmhubbub_init(struct dcn30_mmhubbub *mcif_wb30,
+ struct dc_context *ctx)
+{
+ dcn35_mmhubbub_set_fgcg(
+ mcif_wb30,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.mmhubbub);
+}
+
+static bool dcn35_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
+ GFP_KERNEL);
+
+ if (!mcif_wb30) {
+ dm_error("DC: failed to create mcif_wb30!\n");
+ return false;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb35_regs
+ mcif_wb_regs_dcn3_init(0);
+
+ dcn35_mmhubbub_construct(mcif_wb30, ctx,
+ &mcif_wb35_regs[i],
+ &mcif_wb35_shift,
+ &mcif_wb35_mask,
+ i);
+
+ dcn35_mmhubbub_init(mcif_wb30, ctx);
+
+ pool->mcif_wb[i] = &mcif_wb30->base;
+ }
+ return true;
+}
+
+static struct display_stream_compressor *dcn35_dsc_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_dsc *dsc =
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+
+ if (!dsc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN35_init(0),
+ dsc_regsDCN35_init(1),
+ dsc_regsDCN35_init(2),
+ dsc_regsDCN35_init(3);
+
+ dsc35_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
+ dsc35_set_fgcg(dsc,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.dsc);
+ return &dsc->base;
+}
+
+static void dcn36_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn36_resource_pool *dcn36_pool = TO_DCN36_RES_POOL(*pool);
+
+ dcn36_resource_destruct(dcn36_pool);
+ kfree(dcn36_pool);
+ *pool = NULL;
+}
+
+static struct clock_source *dcn35_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+};
+
+static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
+
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ enum dc_validate_mode validate_mode)
+{
+ bool out = false;
+
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ validate_mode);
+
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ DC_FP_START();
+ dcn35_decide_zstate_support(dc, context);
+ DC_FP_END();
+
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+}
+
+
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+}
+
+static struct resource_funcs dcn36_res_pool_funcs = {
+ .destroy = dcn36_destroy_resource_pool,
+ .link_enc_create = dcn35_link_encoder_create,
+ .link_enc_create_minimal = dcn31_link_enc_create_minimal,
+ .link_encs_assign = link_enc_cfg_link_encs_assign,
+ .link_enc_unassign = link_enc_cfg_link_enc_unassign,
+ .panel_cntl_create = dcn31_panel_cntl_create,
+ .validate_bandwidth = dcn35_validate_bandwidth,
+ .calculate_wm_and_dlg = NULL,
+ .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
+ .release_pipe = dcn20_release_pipe,
+ .add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn35_get_panel_config_defaults,
+ .get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
+};
+
+static bool dcn36_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn36_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data init_data;
+
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+ bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_dcn36;
+
+ pool->base.funcs = &dcn36_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 600;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 100;
+ dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
+ dc->caps.dmdata_alloc_size = 2048;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ if (dc->config.forceHBR2CP2520)
+ dc->caps.force_dp_tps4_for_cp2520 = false;
+ dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
+
+ dc->caps.edp_dsc_support = true;
+ dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
+ dc->caps.seamless_odm = true;
+
+ dc->caps.zstate_support = true;
+ dc->caps.ips_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 1;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
+ dc->caps.color.dpp.post_csc = 1;
+ dc->caps.color.dpp.gamma_corr = 1;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
+ // no OGAM ROM on DCN301
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 1;
+ dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
+ /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
+ * to provide some margin.
+ * It's expected for furture ASIC to have equal or higher value, in order to
+ * have determinstic power improvement from generate to genration.
+ * (i.e., we should not expect new ASIC generation with lower vmin rate)
+ */
+ dc->caps.max_disp_clock_khz_at_vmin = 650000;
+
+ /* Sequential ONO is based on ASIC. */
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
+ dc->caps.sequential_ono = true;
+
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
+ dc->config.disable_hbr_audio_dp2 = true;
+ /* read VBIOS LTTPR caps */
+ {
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ /* interop bit is implicit */
+ {
+ dc->caps.vbios_lttpr_aware = true;
+ }
+ }
+ dc->check_config = config_defaults;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+
+ /*HW default is to have all the FGCG enabled, SW no need to program them*/
+ dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ /* Clock Sources for Pixel Clock*/
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL0] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL1] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL2] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL3] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL4] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+
+ pool->base.clk_src_count = DCN36_CLK_SRC_TOTAL;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+ /*temp till dml2 fully work without dml1*/
+ dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31);
+
+ /* TODO: DCCG */
+ pool->base.dccg = dccg35_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT pg_cntl_regs
+ pg_cntl_dcn35_regs_init();
+
+ pool->base.pg_cntl = pg_cntl35_create(ctx, &pg_cntl_regs, &pg_cntl_shift, &pg_cntl_mask);
+ if (pool->base.pg_cntl == NULL) {
+ dm_error("DC: failed to create power gate control!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* TODO: IRQ */
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn36_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+
+ /* HUBBUB */
+ pool->base.hubbub = dcn35_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ /* HUBPs, DPPs, OPPs and TGs */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn35_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create hubps!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn35_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn35_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn35_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+ pool->base.timing_generator_count = i;
+
+ /* PSR */
+ pool->base.psr = dmub_psr_create(ctx);
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* ABM */
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.multiple_abms[i] = dmub_abm_create(ctx,
+ &abm_regs[i],
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.multiple_abms[i] == NULL) {
+ dm_error("DC: failed to create abm for pipe %d!\n", i);
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ /* MPC and DSC */
+ pool->base.mpc = dcn35_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ pool->base.dscs[i] = dcn35_dsc_create(ctx, i);
+ if (pool->base.dscs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create display stream compressor %d!\n", i);
+ goto create_fail;
+ }
+ }
+
+ /* DWB and MMHUBBUB */
+ if (!dcn35_dwbc_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create dwbc!\n");
+ goto create_fail;
+ }
+
+ if (!dcn35_mmhubbub_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mcif_wb!\n");
+ goto create_fail;
+ }
+
+ /* AUX and I2C */
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn31_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ /* DCN3.5 has 6 DPIA */
+ pool->base.usb4_dpia_count = 4;
+ if (dc->debug.dpia_debug.bits.disable_dpia)
+ pool->base.usb4_dpia_count = 0;
+
+ /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto create_fail;
+
+ /* HW Sequencer and Plane caps */
+ dcn35_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->caps.max_odm_combine_factor = 4;
+
+ dc->cap_funcs = cap_funcs;
+
+ dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
+
+ dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
+ dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
+ if (dc->config.EnableMinDispClkODM)
+ dc->dml2_options.minimize_dispclk_using_odm = true;
+ dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
+
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
+ dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
+
+ dc->dml2_options.max_segments_per_hubp = 24;
+ dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+ dc->dml2_options.override_det_buffer_size_kbytes = true;
+
+ if (dc->config.sdpif_request_limit_words_per_umc == 0)
+ dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
+
+ return true;
+
+create_fail:
+
+ dcn36_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn36_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn36_resource_pool *pool =
+ kzalloc(sizeof(struct dcn36_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn36_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h
new file mode 100644
index 000000000000..5490c9975e23
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#ifndef _DCN36_RESOURCE_H_
+#define _DCN36_RESOURCE_H_
+
+#include "core_types.h"
+
+extern struct _vcs_dpi_ip_params_st dcn3_6_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_6_soc;
+
+#define TO_DCN36_RES_POOL(pool)\
+ container_of(pool, struct dcn36_resource_pool, base)
+
+struct dcn36_resource_pool {
+ struct resource_pool base;
+};
+
+struct resource_pool *dcn36_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+#define HWSEQ_DCN36_REG_LIST()\
+ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
+ SR(DIO_MEM_PWR_CTRL), \
+ SR(ODM_MEM_PWR_CTRL3), \
+ SR(MMHUBBUB_MEM_PWR_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCCG_GATE_DISABLE_CNTL4), \
+ SR(DCCG_GATE_DISABLE_CNTL5), \
+ SR(DCFCLK_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+ SRII(PIXEL_RATE_CNTL, OTG, 0), \
+ SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PIXEL_RATE_CNTL, OTG, 3),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\
+ SR(MICROSECOND_TIME_BASE_DIV), \
+ SR(MILLISECOND_TIME_BASE_DIV), \
+ SR(DISPCLK_FREQ_CHANGE_CNTL), \
+ SR(RBBMIF_TIMEOUT_DIS), \
+ SR(RBBMIF_TIMEOUT_DIS_2), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_CTRL), \
+ SR(MPC_CRC_CTRL), \
+ SR(DOMAIN0_PG_CONFIG), \
+ SR(DOMAIN1_PG_CONFIG), \
+ SR(DOMAIN2_PG_CONFIG), \
+ SR(DOMAIN3_PG_CONFIG), \
+ SR(DOMAIN16_PG_CONFIG), \
+ SR(DOMAIN17_PG_CONFIG), \
+ SR(DOMAIN18_PG_CONFIG), \
+ SR(DOMAIN19_PG_CONFIG), \
+ SR(DOMAIN0_PG_STATUS), \
+ SR(DOMAIN1_PG_STATUS), \
+ SR(DOMAIN2_PG_STATUS), \
+ SR(DOMAIN3_PG_STATUS), \
+ SR(DOMAIN16_PG_STATUS), \
+ SR(DOMAIN17_PG_STATUS), \
+ SR(DOMAIN18_PG_STATUS), \
+ SR(DOMAIN19_PG_STATUS), \
+ SR(DC_IP_REQUEST_CNTL), \
+ SR(AZALIA_AUDIO_DTO), \
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_HW_CONTROL),\
+ SR(DMU_CLK_CNTL)
+
+#endif /* _DCN36_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 09f5b8b40791..875ae97489d3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -50,7 +50,7 @@
#include "dml/display_mode_vba.h"
#include "dcn401/dcn401_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "link_enc_cfg.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -70,14 +70,10 @@
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "dml/dcn401/dcn401_fpu.h"
#include "dc_state_priv.h"
-#include "dml2/dml2_wrapper.h"
-
-#include "spl/dc_spl_scl_easf_filters.h"
-#include "spl/dc_spl_isharp_filters.h"
+#include "dml2_0/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -712,6 +708,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.use_max_lb = true,
.force_disable_subvp = false,
+ .disable_force_pstate_allow_on_hw_release = false,
.exit_idle_opt_for_cursor_updates = true,
.using_dml2 = true,
.using_dml21 = true,
@@ -724,7 +721,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false,
- .enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
.fams_version = {
.minor = 1,
@@ -737,7 +733,11 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_stall_recovery = true,
}
},
- .force_cositing = CHROMA_COSITING_TOPLEFT + 1,
+ .force_cositing = CHROMA_COSITING_NONE + 1,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
};
static struct dce_aux *dcn401_aux_engine_create(
@@ -1297,6 +1297,29 @@ static struct hpo_dp_link_encoder *dcn401_hpo_dp_link_encoder_create(
return &hpo_dp_enc31->base;
}
+static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans)
+{
+ unsigned int num_available_chans = 1;
+
+ /* channels for MALL must be a power of 2 */
+ while (num_chans > 1) {
+ num_available_chans = (num_available_chans << 1);
+ num_chans = (num_chans >> 1);
+ }
+
+ /* cannot be odd */
+ num_available_chans &= ~1;
+
+ /* clamp to max available channels for MALL per ASIC */
+ if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 16 ? 16 : num_available_chans;
+ } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 8 ? 8 : num_available_chans;
+ }
+
+ return num_available_chans;
+}
+
static struct dce_hwseq *dcn401_hwseq_create(
struct dc_context *ctx)
{
@@ -1588,41 +1611,78 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
+ /* re-calculate the available MALL size if required */
+ if (bw_params->num_channels > 0) {
+ dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
+ dc, bw_params->num_channels) *
+ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
+ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
+ }
DC_FP_START();
- dcn401_update_bw_bounding_box_fpu(dc, bw_params);
-
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
+ plane_state->tiling_info.gfxversion = DcGfxAddr3;
plane_state->tiling_info.gfx_addr3.swizzle = DC_ADDR3_SW_64KB_2D;
return DC_OK;
}
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status = DC_OK;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
+
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
- return out;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ }
+ }
+
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
void dcn401_prepare_mcache_programming(struct dc *dc,
@@ -1637,12 +1697,16 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct pixel_clk_params *pixel_clk_params = &pipe_ctx->stream_res.pix_clk_params;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ pixel_clk_params->requested_pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
pixel_clk_params->encoder_object_id = link_enc->id;
@@ -1708,27 +1772,9 @@ static int dcn401_get_power_profile(const struct dc_state *context)
return dpm_level;
}
-static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans)
+static unsigned int dcn401_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx)
{
- unsigned int num_available_chans = 1;
-
- /* channels for MALL must be a power of 2 */
- while (num_chans > 1) {
- num_available_chans = (num_available_chans << 1);
- num_chans = (num_chans >> 1);
- }
-
- /* cannot be odd */
- num_available_chans &= ~1;
-
- /* clamp to max available channels for MALL per ASIC */
- if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) {
- num_available_chans = num_available_chans > 16 ? 16 : num_available_chans;
- } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) {
- num_available_chans = num_available_chans > 8 ? 8 : num_available_chans;
- }
-
- return num_available_chans;
+ return pipe_ctx->global_sync.dcn4x.vstartup_lines;
}
static struct resource_funcs dcn401_res_pool_funcs = {
@@ -1758,6 +1804,8 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.get_power_profile = dcn401_get_power_profile,
+ .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1833,8 +1881,9 @@ static bool dcn401_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 95;
dc->caps.i2c_speed_in_khz_hdcp = 95; /*1.4 w/a applied by default*/
- /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ /* used to set cursor pitch, so must be aligned to power of 2 (HW actually supported 78x78) */
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64;
dc->caps.cursor_not_scaled = true;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
@@ -1857,9 +1906,9 @@ static bool dcn401_resource_construct(
dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin
- dc->caps.max_slave_planes = 2;
- dc->caps.max_slave_yuv_planes = 2;
- dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
@@ -1887,8 +1936,8 @@ static bool dcn401_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
- dc->caps.color.dpp.ogam_ram = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 0;
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
@@ -1906,11 +1955,34 @@ static bool dcn401_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
dc->config.use_spl = true;
dc->config.prefer_easf = true;
+
+ dc->config.dcn_sharpness_range.sdr_rgb_min = 0;
+ dc->config.dcn_sharpness_range.sdr_rgb_max = 1750;
+ dc->config.dcn_sharpness_range.sdr_rgb_mid = 750;
+ dc->config.dcn_sharpness_range.sdr_yuv_min = 0;
+ dc->config.dcn_sharpness_range.sdr_yuv_max = 3500;
+ dc->config.dcn_sharpness_range.sdr_yuv_mid = 1500;
+ dc->config.dcn_sharpness_range.hdr_rgb_min = 0;
+ dc->config.dcn_sharpness_range.hdr_rgb_max = 2750;
+ dc->config.dcn_sharpness_range.hdr_rgb_mid = 1500;
+
+ dc->config.dcn_override_sharpness_range.sdr_rgb_min = 0;
+ dc->config.dcn_override_sharpness_range.sdr_rgb_max = 3250;
+ dc->config.dcn_override_sharpness_range.sdr_rgb_mid = 1250;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_min = 0;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_max = 3500;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_mid = 1500;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_min = 0;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_max = 2750;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500;
+
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -1926,6 +1998,7 @@ static bool dcn401_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2125,6 +2198,8 @@ static bool dcn401_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2143,7 +2218,6 @@ static bool dcn401_resource_construct(
dc->config.sdpif_request_limit_words_per_umc = 16;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.map_dc_pipes_with_callbacks = true;
@@ -2174,10 +2248,12 @@ static bool dcn401_resource_construct(
dc->dml2_options.det_segment_size = DCN4_01_CRB_SEGMENT_SIZE_KB;
/* SPL */
- spl_init_easf_filter_coeffs();
- spl_init_blur_scale_coeffs();
dc->caps.scl_caps.sharpener_support = true;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 19568c359669..e1fa2e80a15a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -22,9 +22,9 @@ struct resource_pool *dcn401_create_resource_pool(
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state);
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
@@ -140,7 +140,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
HUBP_3DLUT_FL_REG_LIST_DCN401(id), \
SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \
- SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id)
+ SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
/* ABM */
#define ABM_DCN401_REG_LIST_RI(id) \
@@ -226,7 +227,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
#define LE_DCN401_REG_LIST_RI(id) \
LE_DCN3_REG_LIST_RI(id), \
SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
- SRI_ARR(DIG_BE_CLK_CNTL, DIG, id)
+ SRI_ARR(DIG_BE_CLK_CNTL, DIG, id),\
+ SR_ARR(DIO_CLK_CNTL, id)
/* DPP */
#define DPP_REG_LIST_DCN401_COMMON_RI(id) \
@@ -538,7 +540,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst), \
- SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst)
+ SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst), \
+ SRI_ARR(INTERRUPT_DEST, OTG, inst)
/* HUBBUB */
#define HUBBUB_REG_LIST_DCN4_01_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
new file mode 100644
index 000000000000..bc93356a0b5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+# Makefile for bounding box component.
+# Floating point required due to nature of bounding box values
+
+soc_and_ip_translator_ccflags := $(CC_FLAGS_FPU)
+soc_and_ip_translator_rcflags := $(CC_FLAGS_NO_FPU)
+
+CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
+
+soc_and_ip_translator := soc_and_ip_translator.o
+soc_and_ip_translator += dcn401/dcn401_soc_and_ip_translator.o
+
+AMD_DAL_soc_and_ip_translator := $(addprefix $(AMDDALPATH)/dc/soc_and_ip_translator/, $(soc_and_ip_translator))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_soc_and_ip_translator)
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
new file mode 100644
index 000000000000..3190c76eb482
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn4_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void get_default_soc_bb(struct dml2_soc_bb *soc_bb)
+{
+ memcpy(soc_bb, &dml2_socbb_dcn401, sizeof(struct dml2_soc_bb));
+ memcpy(&soc_bb->qos_parameters, &dml_dcn4_variant_a_soc_qos_params, sizeof(struct dml2_soc_qos_parameters));
+}
+
+/*
+ * DC clock table is obtained from SMU during runtime.
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void dcn401_convert_dc_clock_table_to_soc_bb_clock_table(
+ struct dml2_soc_state_table *dml_clk_table,
+ const struct clk_bw_params *dc_bw_params,
+ bool use_clock_dc_limits)
+{
+ int i;
+ const struct clk_limit_table *dc_clk_table;
+
+ if (dc_bw_params == NULL)
+ /* skip if bw params could not be obtained from smu */
+ return;
+
+ dc_clk_table = &dc_bw_params->clk_table;
+
+ /* dcfclk */
+ if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dcfclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
+ dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* fclk */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->fclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
+ dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
+ dml_clk_table->fclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
+ dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
+ dml_clk_table->uclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
+ dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
+ dml_clk_table->dispclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
+ dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
+ dml_clk_table->dppclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
+ dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
+ dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
+ dml_clk_table->socclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dram config */
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+}
+
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ soc_bb->dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000;
+ soc_bb->dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ soc_bb->mall_allocated_for_dcn_mbytes = dc->caps.mall_size_total / (1024 * 1024);
+
+ if (dc->clk_mgr->funcs->is_smu_present &&
+ dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) {
+ dcn401_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table,
+ dc->clk_mgr->bw_params,
+ config->use_clock_dc_limits);
+ }
+}
+
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ soc_bb->dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ soc_bb->xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+
+ /* latencies in vbios are platform specific and should be used if provided */
+ if (dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns / 10.0;
+}
+
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ /* set if the value is provided */
+ if (dc->bb_overrides.sr_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.fclk_clock_change_latency_ns)
+ soc_bb->power_management_parameters.fclk_change_blackout_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
+
+ //Z8 values not expected nor used on DCN401 but still added for completeness
+ if (dc->bb_overrides.sr_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+}
+
+static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ /* Individual modification can be overwritten even if it was obtained by a previous function.
+ * Modifications are acquired in order of priority (lowest to highest).
+ */
+ dc_assert_fp_enabled();
+
+ dcn401_update_soc_bb_with_values_from_clk_mgr(soc_bb, dc, config);
+ dcn401_update_soc_bb_with_values_from_vbios(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_software_policy(soc_bb, dc);
+}
+
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ //get default soc_bb with static values
+ get_default_soc_bb(soc_bb);
+ //update soc_bb values with more accurate values
+ apply_soc_bb_updates(soc_bb, dc, config);
+}
+
+static void dcn401_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn401_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn401_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn401_get_ip_caps,
+};
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn401_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
new file mode 100644
index 000000000000..88c11b6be004
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN401_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN401_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "soc_and_ip_translator.h"
+#include "dml2_0/dml21/inc/dml_top_soc_parameter_types.h"
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+/* Functions that can be re-used by higher DCN revisions of this component */
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+
+#endif /* _DCN401_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
new file mode 100644
index 000000000000..c9e224d262c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn42_soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn42_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void dcn42_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn42_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn42_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn42_get_ip_caps,
+};
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn42_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
new file mode 100644
index 000000000000..914dcbb369a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN42_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN42_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "dml_top_soc_parameter_types.h"
+#include "soc_and_ip_translator.h"
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+#endif /* _DCN42_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
new file mode 100644
index 000000000000..0fc0e5a6c171
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+
+static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator,
+ enum dce_version dc_version)
+{
+ switch (dc_version) {
+ case DCN_VERSION_4_01:
+ dcn401_construct_soc_and_ip_translator(soc_and_ip_translator);
+ break;
+ default:
+ break;
+ }
+}
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version)
+{
+ struct soc_and_ip_translator *soc_and_ip_translator;
+
+ soc_and_ip_translator = kzalloc(sizeof(*soc_and_ip_translator), GFP_KERNEL);
+ if (!soc_and_ip_translator)
+ return NULL;
+
+ dc_construct_soc_and_ip_translator(soc_and_ip_translator, dc_version);
+
+ return soc_and_ip_translator;
+}
+
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator)
+{
+ kfree(*soc_and_ip_translator);
+ *soc_and_ip_translator = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
deleted file mode 100644
index 02a2d6725ed5..000000000000
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DC_SPL_H__
-#define __DC_SPL_H__
-
-#include "dc_spl_types.h"
-#define BLACK_OFFSET_RGB_Y 0x0
-#define BLACK_OFFSET_CBCR 0x8000
-
-/* SPL interfaces */
-
-bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out);
-
-bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out);
-
-#endif /* __DC_SPL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
deleted file mode 100644
index 48202bc4f81e..000000000000
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DC_SPL_SCL_FILTERS_H__
-#define __DC_SPL_SCL_FILTERS_H__
-
-#include "dc_spl_types.h"
-
-const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_2tap_16p(void);
-const uint16_t *spl_get_filter_2tap_64p(void);
-const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
-
-#endif /* __DC_SPL_SCL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/Makefile b/drivers/gpu/drm/amd/display/dc/sspl/Makefile
index 5edf3c6cf3e2..5e3e4aa13820 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/sspl/Makefile
@@ -25,7 +25,7 @@
SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_easf_filters.o dc_spl_isharp_filters.o dc_spl_filters.o spl_fixpt31_32.o spl_custom_float.o
-AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/spl/,$(SPL))
+AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/sspl/,$(SPL))
AMD_DISPLAY_FILES += $(AMD_DAL_SPL)
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 1306ce0321e2..7a839984dbc0 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -3,14 +3,48 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl.h"
-#include "dc_spl_scl_filters.h"
#include "dc_spl_scl_easf_filters.h"
#include "dc_spl_isharp_filters.h"
#include "spl_debug.h"
-#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (spl_fixpt_u3d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
+static bool spl_is_yuv420(enum spl_pixel_format format)
+{
+ if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
+ (format <= SPL_PIXEL_FORMAT_420BPP10))
+ return true;
+
+ return false;
+}
+
+static bool spl_is_rgb8(enum spl_pixel_format format)
+{
+ if (format == SPL_PIXEL_FORMAT_ARGB8888)
+ return true;
+
+ return false;
+}
+
+static bool spl_is_video_format(enum spl_pixel_format format)
+{
+ if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
+ && format <= SPL_PIXEL_FORMAT_VIDEO_END)
+ return true;
+ else
+ return false;
+}
+
+static bool spl_is_subsampled_format(enum spl_pixel_format format)
+{
+ if (format >= SPL_PIXEL_FORMAT_SUBSAMPLED_BEGIN
+ && format <= SPL_PIXEL_FORMAT_SUBSAMPLED_END)
+ return true;
+ else
+ return false;
+}
+
static struct spl_rect intersect_rec(const struct spl_rect *r0, const struct spl_rect *r1)
{
struct spl_rect rec;
@@ -41,6 +75,21 @@ static struct spl_rect shift_rec(const struct spl_rect *rec_in, int x, int y)
return rec_out;
}
+static void spl_opp_adjust_rect(struct spl_rect *rec, const struct spl_opp_adjust *adjust)
+{
+ if ((rec->x + adjust->x) >= 0)
+ rec->x += adjust->x;
+
+ if ((rec->y + adjust->y) >= 0)
+ rec->y += adjust->y;
+
+ if ((rec->width + adjust->width) >= 1)
+ rec->width += adjust->width;
+
+ if ((rec->height + adjust->height) >= 1)
+ rec->height += adjust->height;
+}
+
static struct spl_rect calculate_plane_rec_in_timing_active(
struct spl_in *spl_in,
const struct spl_rect *rec_in)
@@ -147,7 +196,12 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
struct spl_rect mpc_rec;
- if (use_recout_width_aligned) {
+ if (spl_in->basic_in.custom_width != 0) {
+ mpc_rec.width = spl_in->basic_in.custom_width;
+ mpc_rec.x = spl_in->basic_in.custom_x;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ } else if (use_recout_width_aligned) {
mpc_rec.width = recout_width_align;
if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) {
mpc_rec.width = plane_clip_rec->width % recout_width_align;
@@ -170,7 +224,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
/* extra pixels in the division remainder need to go to pipes after
* the extra pixel index minus one(epimo) defined here as:
*/
- if (mpc_slice_idx > epimo) {
+ if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) {
mpc_rec.x += mpc_slice_idx - epimo - 1;
mpc_rec.width += 1;
}
@@ -203,10 +257,10 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i
odm_rec.x = odm_slice_width * odm_slice_idx;
odm_rec.width = is_last_odm_slice ?
- /* last slice width is the reminder of h_active */
- h_active - odm_slice_width * (odm_slice_count - 1) :
- /* odm slice width is the floor of h_active / count */
- odm_slice_width;
+ /* last slice width is the reminder of h_active */
+ h_active - odm_slice_width * (odm_slice_count - 1) :
+ /* odm slice width is the floor of h_active / count */
+ odm_slice_width;
odm_rec.y = 0;
odm_rec.height = v_active;
@@ -408,8 +462,7 @@ static void spl_calculate_scaling_ratios(struct spl_in *spl_in,
spl_scratch->scl_data.ratios.horz_c = spl_scratch->scl_data.ratios.horz;
spl_scratch->scl_data.ratios.vert_c = spl_scratch->scl_data.ratios.vert;
- if (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
- || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) {
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
spl_scratch->scl_data.ratios.horz_c.value /= 2;
spl_scratch->scl_data.ratios.vert_c.value /= 2;
}
@@ -546,41 +599,6 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
*vp_offset = src_size - *vp_offset - *vp_size;
}
-static bool spl_is_yuv420(enum spl_pixel_format format)
-{
- if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
- (format <= SPL_PIXEL_FORMAT_420BPP10))
- return true;
-
- return false;
-}
-
-static bool spl_is_rgb8(enum spl_pixel_format format)
-{
- if (format == SPL_PIXEL_FORMAT_ARGB8888)
- return true;
-
- return false;
-}
-
-static bool spl_is_video_format(enum spl_pixel_format format)
-{
- if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
- && format <= SPL_PIXEL_FORMAT_VIDEO_END)
- return true;
- else
- return false;
-}
-
-static bool spl_is_subsampled_format(enum spl_pixel_format format)
-{
- if (format >= SPL_PIXEL_FORMAT_SUBSAMPLED_BEGIN
- && format <= SPL_PIXEL_FORMAT_SUBSAMPLED_END)
- return true;
- else
- return false;
-}
-
/*Calculate inits and viewport */
static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
struct spl_scratch *spl_scratch)
@@ -591,8 +609,7 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
struct spl_rect recout_clip_in_recout_dst;
struct spl_rect overlap_in_active_timing;
struct spl_rect odm_slice = calculate_odm_slice_in_timing_active(spl_in);
- int vpc_div = (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
- || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ int vpc_div = spl_is_subsampled_format(spl_in->basic_in.format) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
struct spl_fixed31_32 init_adj_h = spl_fixpt_zero;
struct spl_fixed31_32 init_adj_v = spl_fixpt_zero;
@@ -620,28 +637,23 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
&flip_vert_scan_dir,
&flip_horz_scan_dir);
- if (orthogonal_rotation) {
- spl_swap(src.width, src.height);
- spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
- }
-
if (spl_is_subsampled_format(spl_in->basic_in.format)) {
/* this gives the direction of the cositing (negative will move
* left, right otherwise)
*/
- int sign = 1;
+ int h_sign = flip_horz_scan_dir ? -1 : 1;
+ int v_sign = flip_vert_scan_dir ? -1 : 1;
switch (spl_in->basic_in.cositing) {
-
+ case CHROMA_COSITING_TOPLEFT:
+ init_adj_h = spl_fixpt_from_fraction(h_sign, 4);
+ init_adj_v = spl_fixpt_from_fraction(v_sign, 4);
+ break;
case CHROMA_COSITING_LEFT:
- init_adj_h = spl_fixpt_zero;
- init_adj_v = spl_fixpt_from_fraction(sign, 4);
+ init_adj_h = spl_fixpt_from_fraction(h_sign, 4);
+ init_adj_v = spl_fixpt_zero;
break;
case CHROMA_COSITING_NONE:
- init_adj_h = spl_fixpt_from_fraction(sign, 4);
- init_adj_v = spl_fixpt_from_fraction(sign, 4);
- break;
- case CHROMA_COSITING_TOPLEFT:
default:
init_adj_h = spl_fixpt_zero;
init_adj_v = spl_fixpt_zero;
@@ -649,6 +661,12 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
}
}
+ if (orthogonal_rotation) {
+ spl_swap(src.width, src.height);
+ spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
+ spl_swap(init_adj_h, init_adj_v);
+ }
+
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
@@ -724,13 +742,15 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
}
}
-static void spl_clamp_viewport(struct spl_rect *viewport)
+static void spl_clamp_viewport(struct spl_rect *viewport, int min_viewport_size)
{
+ if (min_viewport_size == 0)
+ min_viewport_size = MIN_VIEWPORT_SIZE;
/* Clamp minimum viewport size */
- if (viewport->height < MIN_VIEWPORT_SIZE)
- viewport->height = MIN_VIEWPORT_SIZE;
- if (viewport->width < MIN_VIEWPORT_SIZE)
- viewport->width = MIN_VIEWPORT_SIZE;
+ if (viewport->height < min_viewport_size)
+ viewport->height = min_viewport_size;
+ if (viewport->width < min_viewport_size)
+ viewport->width = min_viewport_size;
}
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
@@ -761,32 +781,20 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
* Do not bypass UV at 1:1 for cositing to be applied
*/
if (!enable_isharp) {
- if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one && !spl_in->basic_out.always_scale)
return SCL_MODE_SCALING_420_LUMA_BYPASS;
}
return SCL_MODE_SCALING_420_YCBCR_ENABLE;
}
-static bool spl_choose_lls_policy(enum spl_pixel_format format,
- enum spl_transfer_func_type tf_type,
- enum spl_transfer_func_predefined tf_predefined_type,
+static void spl_choose_lls_policy(enum spl_pixel_format format,
enum linear_light_scaling *lls_pref)
{
- if (spl_is_video_format(format)) {
+ if (spl_is_subsampled_format(format))
*lls_pref = LLS_PREF_NO;
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
- (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
- return true;
- } else { /* RGB or YUV444 */
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
- (tf_type == SPL_TF_TYPE_BYPASS)) {
- *lls_pref = LLS_PREF_YES;
- return true;
- }
- }
- *lls_pref = LLS_PREF_NO;
- return false;
+ else /* RGB or YUV444 */
+ *lls_pref = LLS_PREF_YES;
}
/* Enable EASF ?*/
@@ -795,7 +803,6 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
int vratio = 0;
int hratio = 0;
bool skip_easf = false;
- bool lls_enable_easf = true;
if (spl_in->disable_easf)
skip_easf = true;
@@ -811,17 +818,13 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
skip_easf = true;
/*
- * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
- * function to determine whether to use LINEAR or NONLINEAR scaling
+ * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format
+ * to determine whether to use LINEAR or NONLINEAR scaling
*/
if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
- lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
- spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
+ spl_choose_lls_policy(spl_in->basic_in.format,
&spl_in->lls_pref);
- if (!lls_enable_easf)
- skip_easf = true;
-
/* Check for linear scaling or EASF preferred */
if (spl_in->lls_pref != LLS_PREF_YES && !spl_in->prefer_easf)
skip_easf = true;
@@ -886,8 +889,12 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
/* Calculate number of tap with adaptive scaling off */
static void spl_get_taps_non_adaptive_scaler(
- struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
+ struct spl_scratch *spl_scratch,
+ const struct spl_taps *in_taps,
+ bool is_subsampled)
{
+ bool check_max_downscale = false;
+
if (in_taps->h_taps == 0) {
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
@@ -927,13 +934,31 @@ static void spl_get_taps_non_adaptive_scaler(
else
spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+
+ /*
+ * Max downscale supported is 6.0x. Add ASSERT to catch if go beyond that
+ */
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.horz,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.vert,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.horz_c,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.vert_c,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+
+
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
spl_scratch->scl_data.taps.h_taps = 1;
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled)
spl_scratch->scl_data.taps.v_taps_c = 1;
}
@@ -945,16 +970,16 @@ static bool spl_get_optimal_number_of_taps(
bool *enable_isharp)
{
int num_part_y, num_part_c;
- int max_taps_y, max_taps_c;
- int min_taps_y, min_taps_c;
+ unsigned int max_taps_y, max_taps_c;
+ unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- bool skip_easf = false;
+ bool skip_easf = false;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
spl_scratch->scl_data.viewport.width > max_downscale_src_width) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -963,7 +988,7 @@ static bool spl_get_optimal_number_of_taps(
/* Disable adaptive scaler and sharpener when integer scaling is enabled */
if (spl_in->scaling_quality.integer_scaling) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -978,8 +1003,9 @@ static bool spl_get_optimal_number_of_taps(
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
* taps = 4 for upscaling
*/
- if (skip_easf)
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ if (skip_easf) {
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
+ }
else {
if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
@@ -992,6 +1018,21 @@ static bool spl_get_optimal_number_of_taps(
spl_scratch->scl_data.taps.h_taps_c = 6;
spl_scratch->scl_data.taps.v_taps_c = 6;
}
+
+ /* Override mode: keep EASF enabled but use input taps if valid */
+ if (spl_in->override_easf) {
+ spl_scratch->scl_data.taps.h_taps = (in_taps->h_taps != 0) ? in_taps->h_taps : spl_scratch->scl_data.taps.h_taps;
+ spl_scratch->scl_data.taps.v_taps = (in_taps->v_taps != 0) ? in_taps->v_taps : spl_scratch->scl_data.taps.v_taps;
+ spl_scratch->scl_data.taps.h_taps_c = (in_taps->h_taps_c != 0) ? in_taps->h_taps_c : spl_scratch->scl_data.taps.h_taps_c;
+ spl_scratch->scl_data.taps.v_taps_c = (in_taps->v_taps_c != 0) ? in_taps->v_taps_c : spl_scratch->scl_data.taps.v_taps_c;
+
+ if ((spl_scratch->scl_data.taps.h_taps > 6) || (spl_scratch->scl_data.taps.v_taps > 6))
+ skip_easf = true;
+ if ((spl_scratch->scl_data.taps.h_taps > 1) && (spl_scratch->scl_data.taps.h_taps % 2))
+ spl_scratch->scl_data.taps.h_taps--;
+ if ((spl_scratch->scl_data.taps.h_taps_c > 1) && (spl_scratch->scl_data.taps.h_taps_c % 2))
+ spl_scratch->scl_data.taps.h_taps_c--;
+ }
}
/*Ensure we can support the requested number of vtaps*/
@@ -1008,12 +1049,18 @@ static bool spl_get_optimal_number_of_taps(
lb_config, &num_part_y, &num_part_c);
/* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2)
- max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
+ if ((spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2) > num_part_y)
+ max_taps_y = 0;
+ else
+ max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
else
max_taps_y = num_part_y;
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 2)
- max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
+ if ((spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2) > num_part_c)
+ max_taps_c = 0;
+ else
+ max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
else
max_taps_c = num_part_c;
@@ -1099,7 +1146,6 @@ static bool spl_get_optimal_number_of_taps(
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) {
spl_scratch->scl_data.taps.h_taps = 1;
spl_scratch->scl_data.taps.v_taps = 1;
-
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
@@ -1124,6 +1170,7 @@ static bool spl_get_optimal_number_of_taps(
if ((!*enable_easf_v) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
spl_scratch->scl_data.taps.v_taps_c = 1;
+
}
}
return true;
@@ -1274,7 +1321,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
- dscl_prog_data->easf_v_sharp_factor = 0;
+ dscl_prog_data->easf_v_sharp_factor = 1;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
/* 2-bit, BF3 chroma mode correction calculation mode */
@@ -1438,7 +1485,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
- dscl_prog_data->easf_h_sharp_factor = 0;
+ dscl_prog_data->easf_h_sharp_factor = 1;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
@@ -1599,7 +1646,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
0x0; // fp1.5.10, C3 coefficient
}
- if (spl_is_video_format(format)) { /* TODO: 0 = RGB, 1 = YUV */
+ if (spl_is_subsampled_format(format)) { /* TODO: 0 = RGB, 1 = YUV */
dscl_prog_data->easf_matrix_mode = 1;
/*
* 2-bit, BF3 chroma mode correction calculation mode
@@ -1782,6 +1829,8 @@ static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scrat
spl_calculate_recout(spl_in, spl_scratch, spl_out);
/* depends on pixel format */
spl_calculate_scaling_ratios(spl_in, spl_scratch, spl_out);
+ /* Adjust recout for opp if needed */
+ spl_opp_adjust_rect(&spl_scratch->scl_data.recout, &spl_in->basic_in.opp_recout_adjust);
/* depends on scaling ratios and recout, does not calculate offset yet */
spl_calculate_viewport_size(spl_in, spl_scratch);
@@ -1793,7 +1842,7 @@ static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scrat
}
/* Calculate scaler parameters */
-bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
+bool SPL_NAMESPACE(spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out))
{
bool res = false;
bool enable_easf_v = false;
@@ -1818,7 +1867,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
// Handle 3d recout
spl_handle_3d_recout(spl_in, &spl_scratch.scl_data.recout);
// Clamp
- spl_clamp_viewport(&spl_scratch.scl_data.viewport);
+ spl_clamp_viewport(&spl_scratch.scl_data.viewport, spl_in->min_viewport_size);
// Save all calculated parameters in dscl_prog_data structure to program hw registers
spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp);
@@ -1858,7 +1907,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
}
/* External interface to get number of taps only */
-bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out)
+bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out))
{
bool res = false;
bool enable_easf_v = false;
@@ -1873,3 +1922,4 @@ bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out)
spl_set_taps_data(dscl_prog_data, data);
return res;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
new file mode 100644
index 000000000000..d621c42a237e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DC_SPL_H__
+#define __DC_SPL_H__
+
+#include "dc_spl_types.h"
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#ifndef SPL_PFX_
+#define SPL_PFX_
+#endif
+
+#define SPL_EXPAND2(a, b) a##b
+#define SPL_EXPAND(a, b) SPL_EXPAND2(a, b)
+#define SPL_NAMESPACE(symbol) SPL_EXPAND(SPL_PFX_, symbol)
+
+
+/* SPL interfaces */
+
+bool SPL_NAMESPACE(spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out));
+
+bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out));
+
+#endif /* __DC_SPL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c
index 99238644e0a1..99238644e0a1 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h
index 20439cdbdb10..20439cdbdb10 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c
index e0572252c640..12acdd34e6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c
@@ -11,232 +11,6 @@
// LUT content is packed as 4-bytes into one DWORD/entry
// A_start = 0.000000
// A_end = 10.000000
-// A_gain = 2.000000
-// B_start = 11.000000
-// B_end = 86.000000
-// C_start = 40.000000
-// C_end = 64.000000
-//========================================
-static const uint32_t filter_isharp_1D_lut_0[ISHARP_LUT_TABLE_SIZE] = {
-0x02010000,
-0x0A070503,
-0x1614100D,
-0x1C1B1918,
-0x22211F1E,
-0x27262423,
-0x2A2A2928,
-0x2D2D2C2B,
-0x302F2F2E,
-0x31313030,
-0x31313131,
-0x31313131,
-0x30303031,
-0x292D2F2F,
-0x191D2125,
-0x050A0F14,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
-// A_gain = 0.500000
-// B_start = 11.000000
-// B_end = 127.000000
-// C_start = 96.000000
-// C_end = 127.000000
-//========================================
-
-static const uint32_t filter_isharp_1D_lut_0p5x[ISHARP_LUT_TABLE_SIZE] = {
-0x00000000,
-0x02020101,
-0x06050403,
-0x07070606,
-0x09080808,
-0x0A0A0A09,
-0x0C0B0B0B,
-0x0D0D0C0C,
-0x0E0E0D0D,
-0x0F0F0E0E,
-0x100F0F0F,
-0x10101010,
-0x11111010,
-0x11111111,
-0x11111111,
-0x11111111,
-0x11111111,
-0x11111111,
-0x11111111,
-0x10101111,
-0x10101010,
-0x0F0F0F10,
-0x0E0E0F0F,
-0x0D0D0E0E,
-0x0C0C0D0D,
-0x0B0B0B0C,
-0x090A0A0A,
-0x08080809,
-0x06060707,
-0x04050506,
-0x02030304,
-0x00010102,
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
-// A_gain = 1.000000
-// B_start = 11.000000
-// B_end = 127.000000
-// C_start = 96.000000
-// C_end = 127.000000
-//========================================
-static const uint32_t filter_isharp_1D_lut_1p0x[ISHARP_LUT_TABLE_SIZE] = {
-0x01000000,
-0x05040302,
-0x0B0A0806,
-0x0E0E0D0C,
-0x1211100F,
-0x15141312,
-0x17171615,
-0x1A191918,
-0x1C1B1B1A,
-0x1E1D1D1C,
-0x1F1F1E1E,
-0x2020201F,
-0x21212121,
-0x22222222,
-0x23232222,
-0x23232323,
-0x23232323,
-0x22222323,
-0x22222222,
-0x21212121,
-0x1F202020,
-0x1E1E1F1F,
-0x1C1D1D1E,
-0x1A1B1B1C,
-0x1819191A,
-0x15161717,
-0x12131415,
-0x0F101112,
-0x0C0D0E0E,
-0x08090A0B,
-0x04050607,
-0x00010203,
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
-// A_gain = 1.500000
-// B_start = 11.000000
-// B_end = 127.000000
-// C_start = 96.000000
-// C_end = 127.000000
-//========================================
-static const uint32_t filter_isharp_1D_lut_1p5x[ISHARP_LUT_TABLE_SIZE] = {
-0x01010000,
-0x07050402,
-0x110F0C0A,
-0x16141312,
-0x1B191817,
-0x1F1E1D1C,
-0x23222120,
-0x26262524,
-0x2A292827,
-0x2C2C2B2A,
-0x2F2E2E2D,
-0x3130302F,
-0x32323131,
-0x33333332,
-0x34343433,
-0x34343434,
-0x34343434,
-0x33343434,
-0x32333333,
-0x31313232,
-0x2F303031,
-0x2D2E2E2F,
-0x2A2B2C2C,
-0x2728292A,
-0x24252626,
-0x20212223,
-0x1C1D1E1F,
-0x1718191B,
-0x12131416,
-0x0C0E0F10,
-0x0608090B,
-0x00020305
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
-// A_gain = 2.000000
-// B_start = 11.000000
-// B_end = 127.000000
-// C_start = 40.000000
-// C_end = 127.000000
-//========================================
-static const uint32_t filter_isharp_1D_lut_2p0x[ISHARP_LUT_TABLE_SIZE] = {
-0x02010000,
-0x0A070503,
-0x1614100D,
-0x1D1B1A18,
-0x2322201F,
-0x29282625,
-0x2F2D2C2B,
-0x33323130,
-0x38373534,
-0x3B3A3938,
-0x3E3E3D3C,
-0x4140403F,
-0x43424241,
-0x44444443,
-0x45454545,
-0x46454545,
-0x45454546,
-0x45454545,
-0x43444444,
-0x41424243,
-0x3F404041,
-0x3C3D3E3E,
-0x38393A3B,
-0x34353738,
-0x30313233,
-0x2B2C2D2F,
-0x25262829,
-0x1F202223,
-0x181A1B1D,
-0x10121416,
-0x080B0D0E,
-0x00020406,
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
// A_gain = 3.000000
// B_start = 11.000000
// B_end = 127.000000
@@ -278,52 +52,6 @@ static const uint32_t filter_isharp_1D_lut_3p0x[ISHARP_LUT_TABLE_SIZE] = {
0x0003060A,
};
-//========================================
-// Wide scaler coefficients
-//========================================================
-// <using> gen_scaler_coeffs.m
-// <date> 15-Dec-2021
-// <coeffDescrip> 6t_64p_LanczosEd_p_1_p_10qb_
-// <num_taps> 6
-// <num_phases> 64
-// <CoefType> LanczosEd
-// <CoefQuant> S1.10
-//========================================================
-static const uint16_t filter_isharp_wide_6tap_64p[198] = {
-0x0000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000,
-0x0003, 0x0FF3, 0x0400, 0x000D, 0x0FFD, 0x0000,
-0x0006, 0x0FE7, 0x03FE, 0x001C, 0x0FF9, 0x0000,
-0x0009, 0x0FDB, 0x03FC, 0x002B, 0x0FF5, 0x0000,
-0x000C, 0x0FD0, 0x03F9, 0x003A, 0x0FF1, 0x0000,
-0x000E, 0x0FC5, 0x03F5, 0x004A, 0x0FED, 0x0001,
-0x0011, 0x0FBB, 0x03F0, 0x005A, 0x0FE9, 0x0001,
-0x0013, 0x0FB2, 0x03EB, 0x006A, 0x0FE5, 0x0001,
-0x0015, 0x0FA9, 0x03E4, 0x007B, 0x0FE1, 0x0002,
-0x0017, 0x0FA1, 0x03DD, 0x008D, 0x0FDC, 0x0002,
-0x0018, 0x0F99, 0x03D4, 0x00A0, 0x0FD8, 0x0003,
-0x001A, 0x0F92, 0x03CB, 0x00B2, 0x0FD3, 0x0004,
-0x001B, 0x0F8C, 0x03C1, 0x00C6, 0x0FCE, 0x0004,
-0x001C, 0x0F86, 0x03B7, 0x00D9, 0x0FC9, 0x0005,
-0x001D, 0x0F80, 0x03AB, 0x00EE, 0x0FC4, 0x0006,
-0x001E, 0x0F7C, 0x039F, 0x0101, 0x0FBF, 0x0007,
-0x001F, 0x0F78, 0x0392, 0x0115, 0x0FBA, 0x0008,
-0x001F, 0x0F74, 0x0385, 0x012B, 0x0FB5, 0x0008,
-0x0020, 0x0F71, 0x0376, 0x0140, 0x0FB0, 0x0009,
-0x0020, 0x0F6E, 0x0367, 0x0155, 0x0FAB, 0x000B,
-0x0020, 0x0F6C, 0x0357, 0x016B, 0x0FA6, 0x000C,
-0x0020, 0x0F6A, 0x0347, 0x0180, 0x0FA2, 0x000D,
-0x0020, 0x0F69, 0x0336, 0x0196, 0x0F9D, 0x000E,
-0x0020, 0x0F69, 0x0325, 0x01AB, 0x0F98, 0x000F,
-0x001F, 0x0F68, 0x0313, 0x01C3, 0x0F93, 0x0010,
-0x001F, 0x0F69, 0x0300, 0x01D8, 0x0F8F, 0x0011,
-0x001E, 0x0F69, 0x02ED, 0x01EF, 0x0F8B, 0x0012,
-0x001D, 0x0F6A, 0x02D9, 0x0205, 0x0F87, 0x0014,
-0x001D, 0x0F6C, 0x02C5, 0x021A, 0x0F83, 0x0015,
-0x001C, 0x0F6E, 0x02B1, 0x0230, 0x0F7F, 0x0016,
-0x001B, 0x0F70, 0x029C, 0x0247, 0x0F7B, 0x0017,
-0x001A, 0x0F72, 0x0287, 0x025D, 0x0F78, 0x0018,
-0x0019, 0x0F75, 0x0272, 0x0272, 0x0F75, 0x0019
-};
// Blur and scale coefficients
//========================================================
// <using> gen_BlurScale_coeffs.m
@@ -456,9 +184,113 @@ static const uint16_t filter_isharp_bs_3tap_64p[99] = {
};
/* Converted Blur & Scale coeff tables from S1.10 to S1.12 */
-static uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198];
-static uint16_t filter_isharp_bs_4tap_64p_s1_12[132];
-static uint16_t filter_isharp_bs_3tap_64p_s1_12[99];
+static const uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198] = {
+0x0000, 0x0394, 0x08dc, 0x0390, 0x0000, 0x0000,
+0x0000, 0x0378, 0x08dc, 0x03ac, 0x0000, 0x0000,
+0x0000, 0x035c, 0x08d8, 0x03c8, 0x0004, 0x0000,
+0x0000, 0x0340, 0x08d4, 0x03e8, 0x0004, 0x0000,
+0x0000, 0x0324, 0x08d0, 0x0404, 0x0008, 0x0000,
+0x0000, 0x0308, 0x08cc, 0x0420, 0x000c, 0x0000,
+0x0000, 0x02ec, 0x08c8, 0x0440, 0x000c, 0x0000,
+0x0000, 0x02d4, 0x08c0, 0x045c, 0x0010, 0x0000,
+0x0000, 0x02b8, 0x08b8, 0x047c, 0x0014, 0x0000,
+0x0000, 0x02a0, 0x08b0, 0x0498, 0x0018, 0x0000,
+0x0000, 0x0288, 0x08a8, 0x04b4, 0x001c, 0x0000,
+0x0000, 0x0270, 0x08a0, 0x04d0, 0x0020, 0x0000,
+0x0000, 0x0258, 0x0894, 0x04f0, 0x0024, 0x0000,
+0x0000, 0x0240, 0x0888, 0x050c, 0x002c, 0x0000,
+0x0000, 0x0228, 0x087c, 0x052c, 0x0030, 0x0000,
+0x0000, 0x0214, 0x0870, 0x0544, 0x0038, 0x0000,
+0x0000, 0x01fc, 0x0860, 0x0568, 0x003c, 0x0000,
+0x0000, 0x01e8, 0x0854, 0x0580, 0x0044, 0x0000,
+0x0000, 0x01d0, 0x0844, 0x05a0, 0x004c, 0x0000,
+0x0000, 0x01bc, 0x0834, 0x05bc, 0x0054, 0x0000,
+0x0000, 0x01a8, 0x0824, 0x05d8, 0x005c, 0x0000,
+0x0000, 0x0194, 0x0810, 0x05f8, 0x0064, 0x0000,
+0x0000, 0x0180, 0x0800, 0x0614, 0x006c, 0x0000,
+0x0000, 0x0170, 0x07ec, 0x0630, 0x0074, 0x0000,
+0x0000, 0x015c, 0x07d8, 0x064c, 0x0080, 0x0000,
+0x0000, 0x014c, 0x07c4, 0x0668, 0x0088, 0x0000,
+0x0000, 0x0138, 0x07b0, 0x0684, 0x0094, 0x0000,
+0x0000, 0x0128, 0x0798, 0x06a0, 0x00a0, 0x0000,
+0x0000, 0x0118, 0x0784, 0x06bc, 0x00a8, 0x0000,
+0x0000, 0x0108, 0x076c, 0x06d8, 0x00b4, 0x0000,
+0x0000, 0x00fc, 0x0754, 0x06ec, 0x00c4, 0x0000,
+0x0000, 0x00ec, 0x073c, 0x0708, 0x00d0, 0x0000,
+0x0000, 0x00dc, 0x0724, 0x0724, 0x00dc, 0x0000,
+};
+
+static const uint16_t filter_isharp_bs_4tap_64p_s1_12[132] = {
+0x0394, 0x08dc, 0x0390, 0x0000,
+0x0378, 0x08dc, 0x03ac, 0x0000,
+0x035c, 0x08d8, 0x03c8, 0x0004,
+0x0340, 0x08d4, 0x03e8, 0x0004,
+0x0324, 0x08d0, 0x0404, 0x0008,
+0x0308, 0x08cc, 0x0420, 0x000c,
+0x02ec, 0x08c8, 0x0440, 0x000c,
+0x02d4, 0x08c0, 0x045c, 0x0010,
+0x02b8, 0x08b8, 0x047c, 0x0014,
+0x02a0, 0x08b0, 0x0498, 0x0018,
+0x0288, 0x08a8, 0x04b4, 0x001c,
+0x0270, 0x08a0, 0x04d0, 0x0020,
+0x0258, 0x0894, 0x04f0, 0x0024,
+0x0240, 0x0888, 0x050c, 0x002c,
+0x0228, 0x087c, 0x052c, 0x0030,
+0x0214, 0x0870, 0x0544, 0x0038,
+0x01fc, 0x0860, 0x0568, 0x003c,
+0x01e8, 0x0854, 0x0580, 0x0044,
+0x01d0, 0x0844, 0x05a0, 0x004c,
+0x01bc, 0x0834, 0x05bc, 0x0054,
+0x01a8, 0x0824, 0x05d8, 0x005c,
+0x0194, 0x0810, 0x05f8, 0x0064,
+0x0180, 0x0800, 0x0614, 0x006c,
+0x0170, 0x07ec, 0x0630, 0x0074,
+0x015c, 0x07d8, 0x064c, 0x0080,
+0x014c, 0x07c4, 0x0668, 0x0088,
+0x0138, 0x07b0, 0x0684, 0x0094,
+0x0128, 0x0798, 0x06a0, 0x00a0,
+0x0118, 0x0784, 0x06bc, 0x00a8,
+0x0108, 0x076c, 0x06d8, 0x00b4,
+0x00fc, 0x0754, 0x06ec, 0x00c4,
+0x00ec, 0x073c, 0x0708, 0x00d0,
+0x00dc, 0x0724, 0x0724, 0x00dc,
+};
+
+static const uint16_t filter_isharp_bs_3tap_64p_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d8, 0x0818, 0x0010,
+0x07b0, 0x082c, 0x0024,
+0x0788, 0x0844, 0x0034,
+0x0760, 0x0858, 0x0048,
+0x0738, 0x0870, 0x0058,
+0x0710, 0x0884, 0x006c,
+0x06e8, 0x0898, 0x0080,
+0x06c0, 0x08a8, 0x0098,
+0x0698, 0x08bc, 0x00ac,
+0x0670, 0x08cc, 0x00c4,
+0x0648, 0x08e0, 0x00d8,
+0x0620, 0x08f0, 0x00f0,
+0x05f8, 0x0900, 0x0108,
+0x05d0, 0x0910, 0x0120,
+0x05a8, 0x0920, 0x0138,
+0x0584, 0x0928, 0x0154,
+0x055c, 0x0938, 0x016c,
+0x0534, 0x0944, 0x0188,
+0x0510, 0x094c, 0x01a4,
+0x04e8, 0x0958, 0x01c0,
+0x04c4, 0x0960, 0x01dc,
+0x049c, 0x096c, 0x01f8,
+0x0478, 0x0970, 0x0218,
+0x0454, 0x0978, 0x0234,
+0x042c, 0x0980, 0x0254,
+0x0408, 0x0988, 0x0270,
+0x03e4, 0x098c, 0x0290,
+0x03c0, 0x0990, 0x02b0,
+0x039c, 0x0994, 0x02d0,
+0x037c, 0x0990, 0x02f4,
+0x0358, 0x0994, 0x0314,
+0x0334, 0x0998, 0x0334,
+};
/* Pre-generated 1DLUT for given setup and sharpness level */
struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = {
@@ -509,47 +341,6 @@ struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_
{1, 1, 5},
};
-const uint32_t *spl_get_filter_isharp_1D_lut_0(void)
-{
- return filter_isharp_1D_lut_0;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void)
-{
- return filter_isharp_1D_lut_0p5x;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void)
-{
- return filter_isharp_1D_lut_1p0x;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void)
-{
- return filter_isharp_1D_lut_1p5x;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void)
-{
- return filter_isharp_1D_lut_2p0x;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void)
-{
- return filter_isharp_1D_lut_3p0x;
-}
-const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void)
-{
- return filter_isharp_wide_6tap_64p;
-}
-uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void)
-{
- return filter_isharp_bs_4tap_in_6_64p_s1_12;
-}
-uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
-{
- return filter_isharp_bs_4tap_64p_s1_12;
-}
-uint16_t *spl_get_filter_isharp_bs_3tap_64p(void)
-{
- return filter_isharp_bs_3tap_64p_s1_12;
-}
-
static unsigned int spl_calculate_sharpness_level_adj(struct spl_fixed31_32 ratio)
{
int j;
@@ -589,7 +380,7 @@ static unsigned int spl_calculate_sharpness_level_adj(struct spl_fixed31_32 rati
}
static unsigned int spl_calculate_sharpness_level(struct spl_fixed31_32 ratio,
- int discrete_sharpness_level, enum system_setup setup,
+ unsigned int discrete_sharpness_level, enum system_setup setup,
struct spl_sharpness_range sharpness_range,
enum scale_to_sharpness_policy scale_to_sharpness_policy)
{
@@ -720,24 +511,29 @@ uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup)
return filter_isharp_1D_lut_pregen[setup].value;
}
-void spl_init_blur_scale_coeffs(void)
+const uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
{
- convert_filter_s1_10_to_s1_12(filter_isharp_bs_3tap_64p,
- filter_isharp_bs_3tap_64p_s1_12, 3);
- convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_64p,
- filter_isharp_bs_4tap_64p_s1_12, 4);
- convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_in_6_64p,
- filter_isharp_bs_4tap_in_6_64p_s1_12, 6);
+ if (taps == 3)
+ return filter_isharp_bs_3tap_64p_s1_12;
+ else if (taps == 4)
+ return filter_isharp_bs_4tap_64p_s1_12;
+ else if (taps == 6)
+ return filter_isharp_bs_4tap_in_6_64p_s1_12;
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
}
-uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
+const uint16_t *spl_dscl_get_blur_scale_coeffs_64p_s1_10(int taps)
{
if (taps == 3)
- return spl_get_filter_isharp_bs_3tap_64p();
+ return filter_isharp_bs_3tap_64p;
else if (taps == 4)
- return spl_get_filter_isharp_bs_4tap_64p();
+ return filter_isharp_bs_4tap_64p;
else if (taps == 6)
- return spl_get_filter_isharp_bs_4tap_in_6_64p();
+ return filter_isharp_bs_4tap_in_6_64p;
else {
/* should never happen, bug */
SPL_BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h
index 89af91e19b6c..f5e3d3ecc913 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h
@@ -7,18 +7,6 @@
#include "dc_spl_types.h"
-const uint32_t *spl_get_filter_isharp_1D_lut_0(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void);
-uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void);
-uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
-uint16_t *spl_get_filter_isharp_bs_3tap_64p(void);
-const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void);
-uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
-
#define NUM_SHARPNESS_ADJ_LEVELS 6
struct scale_ratio_to_sharpness_level_adj {
unsigned int ratio_numer;
@@ -40,11 +28,15 @@ enum system_setup {
NUM_SHARPNESS_SETUPS
};
-void spl_init_blur_scale_coeffs(void);
void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data);
void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup,
struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy);
uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup);
+
+// public API
+const uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
+const uint16_t *spl_dscl_get_blur_scale_coeffs_64p_s1_10(int taps);
+
#endif /* __DC_SPL_ISHARP_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c
index 09bf82f7d468..0d1bd81ff04a 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c
@@ -1136,32 +1136,871 @@ static const uint16_t easf_filter_6tap_64p_ratio_1_00[198] = {
};
/* Converted scaler coeff tables from S1.10 to S1.12 */
-static uint16_t easf_filter_3tap_64p_ratio_0_30_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_40_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_50_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_60_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_70_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_80_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_90_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_1_00_s1_12[99];
-static uint16_t easf_filter_4tap_64p_ratio_0_30_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_40_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_50_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_60_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_70_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_80_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_90_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_1_00_s1_12[132];
-static uint16_t easf_filter_6tap_64p_ratio_0_30_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_40_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_50_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_60_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_70_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_80_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_90_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_1_00_s1_12[198];
-
-struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
+static const uint16_t easf_filter_3tap_64p_ratio_0_30_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d8, 0x0818, 0x0010,
+0x07b0, 0x082c, 0x0024,
+0x0788, 0x0844, 0x0034,
+0x0760, 0x0858, 0x0048,
+0x0738, 0x0870, 0x0058,
+0x0710, 0x0884, 0x006c,
+0x06e8, 0x0898, 0x0080,
+0x06c0, 0x08a8, 0x0098,
+0x0698, 0x08bc, 0x00ac,
+0x0670, 0x08cc, 0x00c4,
+0x0648, 0x08e0, 0x00d8,
+0x0620, 0x08f0, 0x00f0,
+0x05f8, 0x0900, 0x0108,
+0x05d0, 0x0910, 0x0120,
+0x05a8, 0x0920, 0x0138,
+0x0584, 0x0928, 0x0154,
+0x055c, 0x0938, 0x016c,
+0x0534, 0x0944, 0x0188,
+0x0510, 0x094c, 0x01a4,
+0x04e8, 0x0958, 0x01c0,
+0x04c4, 0x0960, 0x01dc,
+0x049c, 0x096c, 0x01f8,
+0x0478, 0x0970, 0x0218,
+0x0454, 0x0978, 0x0234,
+0x042c, 0x0980, 0x0254,
+0x0408, 0x0988, 0x0270,
+0x03e4, 0x098c, 0x0290,
+0x03c0, 0x0990, 0x02b0,
+0x039c, 0x0994, 0x02d0,
+0x037c, 0x0990, 0x02f4,
+0x0358, 0x0994, 0x0314,
+0x0334, 0x0998, 0x0334,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_40_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d8, 0x0818, 0x0010,
+0x07ac, 0x0838, 0x001c,
+0x0784, 0x0850, 0x002c,
+0x075c, 0x0868, 0x003c,
+0x0734, 0x0880, 0x004c,
+0x0708, 0x0898, 0x0060,
+0x06e0, 0x08b0, 0x0070,
+0x06b8, 0x08c4, 0x0084,
+0x068c, 0x08dc, 0x0098,
+0x0664, 0x08f0, 0x00ac,
+0x063c, 0x0900, 0x00c4,
+0x0614, 0x0914, 0x00d8,
+0x05e8, 0x0928, 0x00f0,
+0x05c0, 0x093c, 0x0104,
+0x0598, 0x094c, 0x011c,
+0x0570, 0x095c, 0x0134,
+0x0548, 0x0968, 0x0150,
+0x0520, 0x0978, 0x0168,
+0x04f8, 0x0984, 0x0184,
+0x04d0, 0x0990, 0x01a0,
+0x04ac, 0x0998, 0x01bc,
+0x0484, 0x09a4, 0x01d8,
+0x045c, 0x09b0, 0x01f4,
+0x0438, 0x09b8, 0x0210,
+0x0410, 0x09c0, 0x0230,
+0x03ec, 0x09c4, 0x0250,
+0x03c8, 0x09c8, 0x0270,
+0x03a4, 0x09cc, 0x0290,
+0x0380, 0x09d0, 0x02b0,
+0x035c, 0x09d4, 0x02d0,
+0x0338, 0x09d4, 0x02f4,
+0x0314, 0x09d8, 0x0314,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_50_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d4, 0x0824, 0x0008,
+0x07a8, 0x0844, 0x0014,
+0x077c, 0x0868, 0x001c,
+0x0750, 0x0888, 0x0028,
+0x0724, 0x08a8, 0x0034,
+0x06f8, 0x08c8, 0x0040,
+0x06cc, 0x08e4, 0x0050,
+0x06a0, 0x0904, 0x005c,
+0x0674, 0x0920, 0x006c,
+0x0648, 0x093c, 0x007c,
+0x061c, 0x0954, 0x0090,
+0x05f0, 0x0970, 0x00a0,
+0x05c4, 0x0988, 0x00b4,
+0x0598, 0x09a0, 0x00c8,
+0x056c, 0x09b8, 0x00dc,
+0x0540, 0x09cc, 0x00f4,
+0x0518, 0x09e0, 0x0108,
+0x04ec, 0x09f4, 0x0120,
+0x04c0, 0x0a08, 0x0138,
+0x0498, 0x0a18, 0x0150,
+0x046c, 0x0a28, 0x016c,
+0x0444, 0x0a34, 0x0188,
+0x041c, 0x0a40, 0x01a4,
+0x03f4, 0x0a4c, 0x01c0,
+0x03cc, 0x0a58, 0x01dc,
+0x03a4, 0x0a60, 0x01fc,
+0x037c, 0x0a68, 0x021c,
+0x0354, 0x0a70, 0x023c,
+0x0330, 0x0a74, 0x025c,
+0x030c, 0x0a78, 0x027c,
+0x02e8, 0x0a78, 0x02a0,
+0x02c4, 0x0a78, 0x02c4,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_60_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d0, 0x082c, 0x0004,
+0x07a0, 0x0858, 0x0008,
+0x0770, 0x0884, 0x000c,
+0x0740, 0x08ac, 0x0014,
+0x0710, 0x08d4, 0x001c,
+0x06e0, 0x0900, 0x0020,
+0x06b0, 0x0924, 0x002c,
+0x0680, 0x094c, 0x0034,
+0x0650, 0x0970, 0x0040,
+0x0620, 0x0994, 0x004c,
+0x05f0, 0x09b8, 0x0058,
+0x05c0, 0x09dc, 0x0064,
+0x0590, 0x09fc, 0x0074,
+0x0560, 0x0a1c, 0x0084,
+0x0530, 0x0a3c, 0x0094,
+0x0500, 0x0a5c, 0x00a4,
+0x04d4, 0x0a74, 0x00b8,
+0x04a4, 0x0a90, 0x00cc,
+0x0474, 0x0aac, 0x00e0,
+0x0448, 0x0ac0, 0x00f8,
+0x041c, 0x0ad4, 0x0110,
+0x03f0, 0x0ae8, 0x0128,
+0x03c4, 0x0afc, 0x0140,
+0x0398, 0x0b0c, 0x015c,
+0x036c, 0x0b1c, 0x0178,
+0x0344, 0x0b28, 0x0194,
+0x031c, 0x0b30, 0x01b4,
+0x02f4, 0x0b38, 0x01d4,
+0x02cc, 0x0b40, 0x01f4,
+0x02a4, 0x0b48, 0x0214,
+0x0280, 0x0b48, 0x0238,
+0x025c, 0x0b48, 0x025c,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_70_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07cc, 0x0834, 0x0000,
+0x0794, 0x086c, 0x0000,
+0x0760, 0x08a0, 0x0000,
+0x072c, 0x08d4, 0x0000,
+0x06f4, 0x090c, 0x0000,
+0x06c0, 0x093c, 0x0004,
+0x0688, 0x0970, 0x0008,
+0x0654, 0x09a0, 0x000c,
+0x061c, 0x09d4, 0x0010,
+0x05e8, 0x0a00, 0x0018,
+0x05b4, 0x0a30, 0x001c,
+0x057c, 0x0a60, 0x0024,
+0x0548, 0x0a88, 0x0030,
+0x0514, 0x0ab4, 0x0038,
+0x04e0, 0x0adc, 0x0044,
+0x04ac, 0x0b00, 0x0054,
+0x0478, 0x0b28, 0x0060,
+0x0444, 0x0b4c, 0x0070,
+0x0414, 0x0b6c, 0x0080,
+0x03e0, 0x0b8c, 0x0094,
+0x03b0, 0x0ba8, 0x00a8,
+0x0380, 0x0bc4, 0x00bc,
+0x0354, 0x0bd8, 0x00d4,
+0x0324, 0x0bf0, 0x00ec,
+0x02f8, 0x0c04, 0x0104,
+0x02cc, 0x0c14, 0x0120,
+0x02a0, 0x0c24, 0x013c,
+0x0278, 0x0c30, 0x0158,
+0x0250, 0x0c38, 0x0178,
+0x0228, 0x0c40, 0x0198,
+0x0204, 0x0c40, 0x01bc,
+0x01dc, 0x0c48, 0x01dc,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_80_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07c4, 0x0840, 0x3ffc,
+0x0788, 0x0880, 0x3ff8,
+0x0748, 0x08c8, 0x3ff0,
+0x070c, 0x0904, 0x3ff0,
+0x06d0, 0x0944, 0x3fec,
+0x0690, 0x0988, 0x3fe8,
+0x0654, 0x09c4, 0x3fe8,
+0x0618, 0x0a04, 0x3fe4,
+0x05d8, 0x0a44, 0x3fe4,
+0x059c, 0x0a80, 0x3fe4,
+0x0560, 0x0ab8, 0x3fe8,
+0x0524, 0x0af4, 0x3fe8,
+0x04e8, 0x0b2c, 0x3fec,
+0x04b0, 0x0b5c, 0x3ff4,
+0x0474, 0x0b94, 0x3ff8,
+0x043c, 0x0bc4, 0x0000,
+0x0404, 0x0bf4, 0x0008,
+0x03cc, 0x0c20, 0x0014,
+0x0394, 0x0c4c, 0x0020,
+0x0360, 0x0c74, 0x002c,
+0x032c, 0x0c98, 0x003c,
+0x02f8, 0x0cbc, 0x004c,
+0x02c8, 0x0cdc, 0x005c,
+0x0298, 0x0cf8, 0x0070,
+0x0268, 0x0d14, 0x0084,
+0x023c, 0x0d28, 0x009c,
+0x0210, 0x0d3c, 0x00b4,
+0x01e4, 0x0d4c, 0x00d0,
+0x01bc, 0x0d58, 0x00ec,
+0x0194, 0x0d60, 0x010c,
+0x0170, 0x0d64, 0x012c,
+0x014c, 0x0d68, 0x014c,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_90_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07b8, 0x0850, 0x3ff8,
+0x0770, 0x08a0, 0x3ff0,
+0x0728, 0x08f0, 0x3fe8,
+0x06e4, 0x093c, 0x3fe0,
+0x069c, 0x0988, 0x3fdc,
+0x0654, 0x09d8, 0x3fd4,
+0x060c, 0x0a28, 0x3fcc,
+0x05c8, 0x0a70, 0x3fc8,
+0x0580, 0x0abc, 0x3fc4,
+0x053c, 0x0b08, 0x3fbc,
+0x04f8, 0x0b50, 0x3fb8,
+0x04b4, 0x0b94, 0x3fb8,
+0x0470, 0x0bdc, 0x3fb4,
+0x0430, 0x0c1c, 0x3fb4,
+0x03ec, 0x0c60, 0x3fb4,
+0x03b0, 0x0c9c, 0x3fb4,
+0x0370, 0x0cd8, 0x3fb8,
+0x0334, 0x0d10, 0x3fbc,
+0x02f8, 0x0d48, 0x3fc0,
+0x02c0, 0x0d78, 0x3fc8,
+0x0288, 0x0da8, 0x3fd0,
+0x0254, 0x0dd4, 0x3fd8,
+0x0220, 0x0dfc, 0x3fe4,
+0x01ec, 0x0e20, 0x3ff4,
+0x01bc, 0x0e44, 0x0000,
+0x0190, 0x0e5c, 0x0014,
+0x0164, 0x0e74, 0x0028,
+0x0138, 0x0e8c, 0x003c,
+0x0114, 0x0e98, 0x0054,
+0x00ec, 0x0ea4, 0x0070,
+0x00cc, 0x0ea8, 0x008c,
+0x00a8, 0x0eb0, 0x00a8,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_1_00_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07ac, 0x085c, 0x3ff8,
+0x0754, 0x08bc, 0x3ff0,
+0x0700, 0x091c, 0x3fe4,
+0x06ac, 0x0978, 0x3fdc,
+0x0658, 0x09d8, 0x3fd0,
+0x0604, 0x0a34, 0x3fc8,
+0x05b0, 0x0a94, 0x3fbc,
+0x0560, 0x0aec, 0x3fb4,
+0x0510, 0x0b44, 0x3fac,
+0x04c0, 0x0ba0, 0x3fa0,
+0x0470, 0x0bf8, 0x3f98,
+0x0424, 0x0c4c, 0x3f90,
+0x03d8, 0x0ca0, 0x3f88,
+0x0390, 0x0cf0, 0x3f80,
+0x0348, 0x0d3c, 0x3f7c,
+0x0300, 0x0d8c, 0x3f74,
+0x02c0, 0x0dd0, 0x3f70,
+0x027c, 0x0e14, 0x3f70,
+0x0240, 0x0e54, 0x3f6c,
+0x0204, 0x0e90, 0x3f6c,
+0x01c8, 0x0ecc, 0x3f6c,
+0x0190, 0x0f00, 0x3f70,
+0x015c, 0x0f30, 0x3f74,
+0x012c, 0x0f58, 0x3f7c,
+0x00fc, 0x0f80, 0x3f84,
+0x00d0, 0x0fa0, 0x3f90,
+0x00a8, 0x0fbc, 0x3f9c,
+0x0080, 0x0fd4, 0x3fac,
+0x005c, 0x0fe8, 0x3fbc,
+0x003c, 0x0ff4, 0x3fd0,
+0x001c, 0x0ffc, 0x3fe8,
+0x0000, 0x1000, 0x0000,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_30_s1_12[132] = {
+0x0410, 0x07e0, 0x0410, 0x0000,
+0x03f8, 0x07dc, 0x0428, 0x0004,
+0x03e0, 0x07d8, 0x043c, 0x000c,
+0x03c8, 0x07d4, 0x0450, 0x0014,
+0x03ac, 0x07d0, 0x046c, 0x0018,
+0x0394, 0x07cc, 0x0480, 0x0020,
+0x037c, 0x07c8, 0x0494, 0x0028,
+0x0368, 0x07c0, 0x04a8, 0x0030,
+0x0350, 0x07b8, 0x04c0, 0x0038,
+0x0338, 0x07b4, 0x04d4, 0x0040,
+0x0320, 0x07ac, 0x04e8, 0x004c,
+0x0308, 0x07a4, 0x0500, 0x0054,
+0x02f4, 0x079c, 0x0514, 0x005c,
+0x02dc, 0x0794, 0x0528, 0x0068,
+0x02c4, 0x0788, 0x0544, 0x0070,
+0x02b0, 0x0780, 0x0554, 0x007c,
+0x029c, 0x0774, 0x0568, 0x0088,
+0x0284, 0x076c, 0x057c, 0x0094,
+0x0270, 0x0760, 0x0594, 0x009c,
+0x025c, 0x0754, 0x05a8, 0x00a8,
+0x0248, 0x0748, 0x05b8, 0x00b8,
+0x0230, 0x073c, 0x05d0, 0x00c4,
+0x021c, 0x0730, 0x05e4, 0x00d0,
+0x020c, 0x0724, 0x05f4, 0x00dc,
+0x01f8, 0x0714, 0x0608, 0x00ec,
+0x01e4, 0x0708, 0x061c, 0x00f8,
+0x01d0, 0x06f8, 0x0630, 0x0108,
+0x01c0, 0x06e8, 0x0640, 0x0118,
+0x01ac, 0x06dc, 0x0654, 0x0124,
+0x0198, 0x06cc, 0x0668, 0x0134,
+0x0188, 0x06bc, 0x0678, 0x0144,
+0x0178, 0x06ac, 0x0688, 0x0154,
+0x0168, 0x0698, 0x0698, 0x0168,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_40_s1_12[132] = {
+0x03ec, 0x0824, 0x03f0, 0x0000,
+0x03d4, 0x0824, 0x0404, 0x0004,
+0x03b8, 0x0820, 0x0420, 0x0008,
+0x03a0, 0x081c, 0x0438, 0x000c,
+0x0388, 0x0818, 0x0450, 0x0010,
+0x036c, 0x0814, 0x0468, 0x0018,
+0x0354, 0x0810, 0x0480, 0x001c,
+0x033c, 0x080c, 0x0494, 0x0024,
+0x0324, 0x0804, 0x04b0, 0x0028,
+0x030c, 0x07fc, 0x04c8, 0x0030,
+0x02f4, 0x07f4, 0x04e0, 0x0038,
+0x02dc, 0x07ec, 0x04f8, 0x0040,
+0x02c4, 0x07e4, 0x0510, 0x0048,
+0x02b0, 0x07dc, 0x0524, 0x0050,
+0x0298, 0x07d0, 0x0540, 0x0058,
+0x0280, 0x07c8, 0x0558, 0x0060,
+0x026c, 0x07bc, 0x0570, 0x0068,
+0x0254, 0x07b0, 0x0588, 0x0074,
+0x0240, 0x07a4, 0x05a0, 0x007c,
+0x022c, 0x0798, 0x05b4, 0x0088,
+0x0214, 0x078c, 0x05cc, 0x0094,
+0x0200, 0x077c, 0x05e4, 0x00a0,
+0x01ec, 0x0770, 0x05f8, 0x00ac,
+0x01d8, 0x0760, 0x0610, 0x00b8,
+0x01c4, 0x0750, 0x0628, 0x00c4,
+0x01b4, 0x0744, 0x0638, 0x00d0,
+0x01a0, 0x0734, 0x064c, 0x00e0,
+0x018c, 0x0720, 0x0668, 0x00ec,
+0x017c, 0x0710, 0x0678, 0x00fc,
+0x016c, 0x0700, 0x068c, 0x0108,
+0x0158, 0x06ec, 0x06a4, 0x0118,
+0x0148, 0x06dc, 0x06b4, 0x0128,
+0x0138, 0x06c8, 0x06c8, 0x0138,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_50_s1_12[132] = {
+0x0394, 0x08d8, 0x0394, 0x0000,
+0x0378, 0x08d4, 0x03b4, 0x0000,
+0x035c, 0x08d4, 0x03d0, 0x0000,
+0x0340, 0x08d4, 0x03ec, 0x0000,
+0x0324, 0x08d0, 0x0408, 0x0004,
+0x0308, 0x08cc, 0x0428, 0x0004,
+0x02f0, 0x08c8, 0x0444, 0x0004,
+0x02d4, 0x08c0, 0x0464, 0x0008,
+0x02b8, 0x08bc, 0x0484, 0x0008,
+0x02a0, 0x08b4, 0x04a0, 0x000c,
+0x0288, 0x08ac, 0x04bc, 0x0010,
+0x026c, 0x08a4, 0x04dc, 0x0014,
+0x0254, 0x0898, 0x04fc, 0x0018,
+0x023c, 0x0890, 0x0518, 0x001c,
+0x0224, 0x0884, 0x0538, 0x0020,
+0x020c, 0x0878, 0x0554, 0x0028,
+0x01f8, 0x086c, 0x0570, 0x002c,
+0x01e0, 0x085c, 0x0590, 0x0034,
+0x01c8, 0x084c, 0x05b4, 0x0038,
+0x01b4, 0x0840, 0x05cc, 0x0040,
+0x01a0, 0x0830, 0x05e8, 0x0048,
+0x018c, 0x081c, 0x0608, 0x0050,
+0x0178, 0x080c, 0x0624, 0x0058,
+0x0164, 0x07f8, 0x0644, 0x0060,
+0x0150, 0x07e4, 0x0660, 0x006c,
+0x0140, 0x07d0, 0x067c, 0x0074,
+0x012c, 0x07bc, 0x0698, 0x0080,
+0x011c, 0x07a8, 0x06b0, 0x008c,
+0x010c, 0x0790, 0x06cc, 0x0098,
+0x00fc, 0x077c, 0x06e4, 0x00a4,
+0x00ec, 0x0764, 0x0700, 0x00b0,
+0x00dc, 0x074c, 0x0718, 0x00c0,
+0x00cc, 0x0734, 0x0734, 0x00cc,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_60_s1_12[132] = {
+0x0320, 0x09bc, 0x0324, 0x0000,
+0x0300, 0x09c0, 0x0344, 0x3ffc,
+0x02e0, 0x09c0, 0x0364, 0x3ffc,
+0x02c4, 0x09c0, 0x0384, 0x3ff8,
+0x02a4, 0x09bc, 0x03ac, 0x3ff4,
+0x0288, 0x09b8, 0x03cc, 0x3ff4,
+0x0268, 0x09b4, 0x03f4, 0x3ff0,
+0x024c, 0x09b0, 0x0414, 0x3ff0,
+0x0230, 0x09a8, 0x043c, 0x3fec,
+0x0214, 0x09a0, 0x0460, 0x3fec,
+0x01f8, 0x0994, 0x0488, 0x3fec,
+0x01e0, 0x098c, 0x04a8, 0x3fec,
+0x01c4, 0x0980, 0x04d0, 0x3fec,
+0x01ac, 0x0970, 0x04f8, 0x3fec,
+0x0194, 0x0964, 0x051c, 0x3fec,
+0x017c, 0x0954, 0x0544, 0x3fec,
+0x0164, 0x0944, 0x0568, 0x3ff0,
+0x0150, 0x0934, 0x058c, 0x3ff0,
+0x0138, 0x0920, 0x05b4, 0x3ff4,
+0x0124, 0x090c, 0x05d8, 0x3ff8,
+0x0110, 0x08f8, 0x05fc, 0x3ffc,
+0x00fc, 0x08e0, 0x0624, 0x0000,
+0x00e8, 0x08c8, 0x064c, 0x0004,
+0x00d8, 0x08b0, 0x0670, 0x0008,
+0x00c4, 0x0898, 0x0694, 0x0010,
+0x00b4, 0x087c, 0x06bc, 0x0014,
+0x00a4, 0x0860, 0x06e0, 0x001c,
+0x0094, 0x0844, 0x0704, 0x0024,
+0x0088, 0x0828, 0x0724, 0x002c,
+0x0078, 0x080c, 0x0748, 0x0034,
+0x006c, 0x07ec, 0x0768, 0x0040,
+0x0060, 0x07cc, 0x078c, 0x0048,
+0x0054, 0x07ac, 0x07ac, 0x0054,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_70_s1_12[132] = {
+0x028c, 0x0ae4, 0x0290, 0x0000,
+0x0268, 0x0ae8, 0x02b4, 0x3ffc,
+0x0248, 0x0ae8, 0x02d8, 0x3ff8,
+0x0224, 0x0ae8, 0x0304, 0x3ff0,
+0x0204, 0x0ae4, 0x032c, 0x3fec,
+0x01e4, 0x0ae0, 0x0354, 0x3fe8,
+0x01c4, 0x0adc, 0x037c, 0x3fe4,
+0x01a4, 0x0ad4, 0x03a8, 0x3fe0,
+0x0188, 0x0acc, 0x03d0, 0x3fdc,
+0x016c, 0x0ac0, 0x03fc, 0x3fd8,
+0x0150, 0x0ab4, 0x042c, 0x3fd0,
+0x0134, 0x0aa4, 0x045c, 0x3fcc,
+0x0118, 0x0a94, 0x048c, 0x3fc8,
+0x0100, 0x0a84, 0x04b4, 0x3fc8,
+0x00e8, 0x0a70, 0x04e4, 0x3fc4,
+0x00d0, 0x0a5c, 0x0514, 0x3fc0,
+0x00bc, 0x0a48, 0x0540, 0x3fbc,
+0x00a4, 0x0a30, 0x0570, 0x3fbc,
+0x0090, 0x0a14, 0x05a4, 0x3fb8,
+0x007c, 0x09fc, 0x05d0, 0x3fb8,
+0x006c, 0x09e0, 0x05fc, 0x3fb8,
+0x0058, 0x09c0, 0x0634, 0x3fb4,
+0x0048, 0x09a0, 0x0664, 0x3fb4,
+0x0038, 0x0980, 0x0690, 0x3fb8,
+0x002c, 0x0960, 0x06bc, 0x3fb8,
+0x001c, 0x093c, 0x06f0, 0x3fb8,
+0x0010, 0x0918, 0x071c, 0x3fbc,
+0x0004, 0x08f4, 0x074c, 0x3fbc,
+0x3ff8, 0x08cc, 0x077c, 0x3fc0,
+0x3ff0, 0x08a4, 0x07a8, 0x3fc4,
+0x3fe8, 0x087c, 0x07d0, 0x3fcc,
+0x3fe0, 0x0854, 0x07fc, 0x3fd0,
+0x3fd8, 0x0828, 0x0828, 0x3fd8,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_80_s1_12[132] = {
+0x01d4, 0x0c54, 0x01d8, 0x0000,
+0x01b0, 0x0c58, 0x01fc, 0x3ffc,
+0x0188, 0x0c58, 0x0228, 0x3ff8,
+0x0164, 0x0c54, 0x0258, 0x3ff0,
+0x0140, 0x0c50, 0x0284, 0x3fec,
+0x0120, 0x0c48, 0x02b4, 0x3fe4,
+0x0100, 0x0c40, 0x02e0, 0x3fe0,
+0x00e0, 0x0c34, 0x0314, 0x3fd8,
+0x00c0, 0x0c28, 0x0344, 0x3fd4,
+0x00a4, 0x0c18, 0x0378, 0x3fcc,
+0x0088, 0x0c04, 0x03ac, 0x3fc8,
+0x0070, 0x0bf0, 0x03e0, 0x3fc0,
+0x0054, 0x0bdc, 0x0418, 0x3fb8,
+0x0040, 0x0bc4, 0x0448, 0x3fb4,
+0x0028, 0x0ba8, 0x0484, 0x3fac,
+0x0014, 0x0b8c, 0x04bc, 0x3fa4,
+0x0000, 0x0b6c, 0x04f4, 0x3fa0,
+0x3fec, 0x0b4c, 0x0530, 0x3f98,
+0x3fdc, 0x0b28, 0x0568, 0x3f94,
+0x3fcc, 0x0b04, 0x05a4, 0x3f8c,
+0x3fc0, 0x0adc, 0x05dc, 0x3f88,
+0x3fb0, 0x0ab4, 0x0618, 0x3f84,
+0x3fa4, 0x0a88, 0x0658, 0x3f7c,
+0x3f9c, 0x0a5c, 0x0690, 0x3f78,
+0x3f90, 0x0a30, 0x06cc, 0x3f74,
+0x3f88, 0x0a00, 0x0708, 0x3f70,
+0x3f80, 0x09d0, 0x0740, 0x3f70,
+0x3f7c, 0x09a0, 0x0778, 0x3f6c,
+0x3f74, 0x096c, 0x07b8, 0x3f68,
+0x3f70, 0x0938, 0x07f0, 0x3f68,
+0x3f6c, 0x0904, 0x0828, 0x3f68,
+0x3f6c, 0x08cc, 0x0860, 0x3f68,
+0x3f68, 0x0898, 0x0898, 0x3f68,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_90_s1_12[132] = {
+0x00fc, 0x0e0c, 0x00f8, 0x0000,
+0x00d0, 0x0e0c, 0x0128, 0x3ffc,
+0x00ac, 0x0e0c, 0x0150, 0x3ff8,
+0x0084, 0x0e04, 0x0184, 0x3ff4,
+0x0064, 0x0dfc, 0x01b0, 0x3ff0,
+0x0040, 0x0df0, 0x01e4, 0x3fec,
+0x0020, 0x0de0, 0x0218, 0x3fe8,
+0x0004, 0x0dd0, 0x024c, 0x3fe0,
+0x3fe8, 0x0db8, 0x0284, 0x3fdc,
+0x3fcc, 0x0da0, 0x02c0, 0x3fd4,
+0x3fb4, 0x0d84, 0x02fc, 0x3fcc,
+0x3fa0, 0x0d68, 0x0334, 0x3fc4,
+0x3f88, 0x0d48, 0x0370, 0x3fc0,
+0x3f78, 0x0d24, 0x03ac, 0x3fb8,
+0x3f64, 0x0cfc, 0x03f0, 0x3fb0,
+0x3f54, 0x0cd4, 0x0434, 0x3fa4,
+0x3f48, 0x0ca8, 0x0474, 0x3f9c,
+0x3f3c, 0x0c78, 0x04b8, 0x3f94,
+0x3f30, 0x0c48, 0x04fc, 0x3f8c,
+0x3f28, 0x0c14, 0x0540, 0x3f84,
+0x3f20, 0x0be0, 0x0588, 0x3f78,
+0x3f18, 0x0ba8, 0x05d0, 0x3f70,
+0x3f14, 0x0b70, 0x0614, 0x3f68,
+0x3f10, 0x0b34, 0x065c, 0x3f60,
+0x3f0c, 0x0af8, 0x06a8, 0x3f54,
+0x3f0c, 0x0abc, 0x06ec, 0x3f4c,
+0x3f0c, 0x0a7c, 0x0734, 0x3f44,
+0x3f0c, 0x0a38, 0x0780, 0x3f3c,
+0x3f0c, 0x09f8, 0x07c8, 0x3f34,
+0x3f10, 0x09b4, 0x080c, 0x3f30,
+0x3f14, 0x0970, 0x0854, 0x3f28,
+0x3f18, 0x092c, 0x089c, 0x3f20,
+0x3f1c, 0x08e4, 0x08e4, 0x3f1c,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_1_00_s1_12[132] = {
+0x0000, 0x1000, 0x0000, 0x0000,
+0x3fd8, 0x0ffc, 0x002c, 0x0000,
+0x3fb4, 0x0ff8, 0x0054, 0x0000,
+0x3f90, 0x0fec, 0x0088, 0x3ffc,
+0x3f70, 0x0fdc, 0x00b8, 0x3ffc,
+0x3f54, 0x0fc8, 0x00ec, 0x3ff8,
+0x3f38, 0x0fb0, 0x0120, 0x3ff8,
+0x3f20, 0x0f94, 0x0158, 0x3ff4,
+0x3f0c, 0x0f70, 0x0194, 0x3ff0,
+0x3ef8, 0x0f4c, 0x01d4, 0x3fe8,
+0x3ee4, 0x0f24, 0x0214, 0x3fe4,
+0x3ed8, 0x0ef8, 0x0250, 0x3fe0,
+0x3ec8, 0x0ec8, 0x0298, 0x3fd8,
+0x3ec0, 0x0e94, 0x02dc, 0x3fd0,
+0x3eb4, 0x0e5c, 0x0328, 0x3fc8,
+0x3eac, 0x0e24, 0x0370, 0x3fc0,
+0x3ea8, 0x0de4, 0x03bc, 0x3fb8,
+0x3ea4, 0x0da4, 0x0408, 0x3fb0,
+0x3ea4, 0x0d64, 0x0454, 0x3fa4,
+0x3ea4, 0x0d20, 0x04a4, 0x3f98,
+0x3ea4, 0x0cd8, 0x04f4, 0x3f90,
+0x3ea4, 0x0c8c, 0x054c, 0x3f84,
+0x3ea8, 0x0c40, 0x05a0, 0x3f78,
+0x3eb0, 0x0bf4, 0x05f0, 0x3f6c,
+0x3eb4, 0x0ba4, 0x0648, 0x3f60,
+0x3ebc, 0x0b54, 0x069c, 0x3f54,
+0x3ec4, 0x0b00, 0x06f4, 0x3f48,
+0x3ecc, 0x0ab0, 0x0748, 0x3f3c,
+0x3ed4, 0x0a58, 0x07a4, 0x3f30,
+0x3ee0, 0x0a04, 0x07f8, 0x3f24,
+0x3ee8, 0x09b0, 0x0850, 0x3f18,
+0x3ef4, 0x0958, 0x08a8, 0x3f0c,
+0x3f00, 0x0900, 0x0900, 0x3f00,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_30_s1_12[198] = {
+0x012c, 0x0400, 0x05a4, 0x0404, 0x012c, 0x0000,
+0x0124, 0x03f4, 0x05a4, 0x040c, 0x0138, 0x0000,
+0x011c, 0x03e8, 0x05a4, 0x0418, 0x0140, 0x0000,
+0x0114, 0x03dc, 0x05a0, 0x0424, 0x0148, 0x0004,
+0x010c, 0x03d4, 0x05a0, 0x042c, 0x0150, 0x0004,
+0x0100, 0x03c8, 0x05a0, 0x0438, 0x015c, 0x0004,
+0x00f8, 0x03bc, 0x05a0, 0x0440, 0x0164, 0x0008,
+0x00f0, 0x03b0, 0x059c, 0x044c, 0x0170, 0x0008,
+0x00e8, 0x03a4, 0x059c, 0x0458, 0x0178, 0x0008,
+0x00e0, 0x0398, 0x0598, 0x0460, 0x0184, 0x000c,
+0x00d8, 0x038c, 0x0594, 0x0470, 0x018c, 0x000c,
+0x00d0, 0x0380, 0x0594, 0x0474, 0x0198, 0x0010,
+0x00cc, 0x0374, 0x0590, 0x0480, 0x01a0, 0x0010,
+0x00c4, 0x0368, 0x058c, 0x0488, 0x01ac, 0x0014,
+0x00bc, 0x035c, 0x058c, 0x0494, 0x01b4, 0x0014,
+0x00b4, 0x034c, 0x0588, 0x04a0, 0x01c0, 0x0018,
+0x00ac, 0x0340, 0x0584, 0x04a8, 0x01cc, 0x001c,
+0x00a8, 0x0334, 0x0580, 0x04b4, 0x01d4, 0x001c,
+0x00a0, 0x0328, 0x057c, 0x04bc, 0x01e0, 0x0020,
+0x0098, 0x031c, 0x0578, 0x04c4, 0x01ec, 0x0024,
+0x0094, 0x0310, 0x0574, 0x04cc, 0x01f8, 0x0024,
+0x008c, 0x0304, 0x0570, 0x04d8, 0x0200, 0x0028,
+0x0088, 0x02f8, 0x0568, 0x04e0, 0x020c, 0x002c,
+0x0080, 0x02ec, 0x0564, 0x04e8, 0x0218, 0x0030,
+0x007c, 0x02e0, 0x0560, 0x04ec, 0x0224, 0x0034,
+0x0078, 0x02d4, 0x0558, 0x04f8, 0x0230, 0x0034,
+0x0070, 0x02c8, 0x0554, 0x0500, 0x023c, 0x0038,
+0x006c, 0x02bc, 0x054c, 0x050c, 0x0244, 0x003c,
+0x0064, 0x02b0, 0x0548, 0x0514, 0x0250, 0x0040,
+0x0060, 0x02a4, 0x0540, 0x051c, 0x025c, 0x0044,
+0x005c, 0x0298, 0x053c, 0x0520, 0x0268, 0x0048,
+0x0058, 0x028c, 0x0534, 0x0524, 0x0274, 0x0050,
+0x0054, 0x0280, 0x052c, 0x052c, 0x0280, 0x0054,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_40_s1_12[198] = {
+0x00a0, 0x0418, 0x068c, 0x041c, 0x00a0, 0x0000,
+0x0098, 0x0408, 0x068c, 0x0428, 0x00ac, 0x0000,
+0x0090, 0x03f8, 0x068c, 0x043c, 0x00b4, 0x3ffc,
+0x0088, 0x03e8, 0x068c, 0x044c, 0x00bc, 0x3ffc,
+0x0084, 0x03d8, 0x068c, 0x0458, 0x00c4, 0x3ffc,
+0x007c, 0x03c8, 0x0688, 0x046c, 0x00d0, 0x3ff8,
+0x0074, 0x03b8, 0x0688, 0x047c, 0x00d8, 0x3ff8,
+0x006c, 0x03a8, 0x0684, 0x048c, 0x00e4, 0x3ff8,
+0x0064, 0x0398, 0x0684, 0x049c, 0x00ec, 0x3ff8,
+0x0060, 0x0388, 0x0680, 0x04a8, 0x00f8, 0x3ff8,
+0x0058, 0x0378, 0x0680, 0x04b8, 0x0104, 0x3ff4,
+0x0054, 0x0368, 0x067c, 0x04c8, 0x010c, 0x3ff4,
+0x004c, 0x0358, 0x0678, 0x04d8, 0x0118, 0x3ff4,
+0x0048, 0x0348, 0x0674, 0x04e4, 0x0124, 0x3ff4,
+0x0040, 0x0338, 0x0670, 0x04f4, 0x0130, 0x3ff4,
+0x003c, 0x0328, 0x0668, 0x0504, 0x013c, 0x3ff4,
+0x0038, 0x0318, 0x0664, 0x0510, 0x0148, 0x3ff4,
+0x0034, 0x0308, 0x065c, 0x0520, 0x0154, 0x3ff4,
+0x002c, 0x02f8, 0x0658, 0x0530, 0x0160, 0x3ff4,
+0x0028, 0x02e8, 0x0654, 0x053c, 0x016c, 0x3ff4,
+0x0024, 0x02d8, 0x064c, 0x054c, 0x0178, 0x3ff4,
+0x0020, 0x02c8, 0x0644, 0x055c, 0x0184, 0x3ff4,
+0x001c, 0x02b8, 0x0640, 0x0568, 0x0190, 0x3ff4,
+0x0018, 0x02a8, 0x0638, 0x0574, 0x01a0, 0x3ff4,
+0x0014, 0x0298, 0x0630, 0x0584, 0x01ac, 0x3ff4,
+0x0014, 0x0288, 0x0624, 0x0590, 0x01bc, 0x3ff4,
+0x0010, 0x0278, 0x061c, 0x059c, 0x01c8, 0x3ff8,
+0x000c, 0x0268, 0x0614, 0x05ac, 0x01d4, 0x3ff8,
+0x0008, 0x0258, 0x060c, 0x05b8, 0x01e4, 0x3ff8,
+0x0008, 0x024c, 0x0600, 0x05bc, 0x01f4, 0x3ffc,
+0x0004, 0x023c, 0x05f8, 0x05cc, 0x0200, 0x3ffc,
+0x0004, 0x022c, 0x05ec, 0x05d4, 0x0210, 0x0000,
+0x0000, 0x021c, 0x05e4, 0x05e4, 0x021c, 0x0000,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_50_s1_12[198] = {
+0x0000, 0x041c, 0x07cc, 0x0418, 0x0000, 0x0000,
+0x3ff8, 0x0404, 0x07cc, 0x0434, 0x0008, 0x3ffc,
+0x3ff4, 0x03ec, 0x07cc, 0x044c, 0x000c, 0x3ffc,
+0x3ff0, 0x03d8, 0x07cc, 0x0460, 0x0014, 0x3ff8,
+0x3fe8, 0x03c0, 0x07cc, 0x0478, 0x001c, 0x3ff8,
+0x3fe4, 0x03ac, 0x07c8, 0x0490, 0x0024, 0x3ff4,
+0x3fe0, 0x0394, 0x07c8, 0x04a4, 0x002c, 0x3ff4,
+0x3fdc, 0x0380, 0x07c4, 0x04bc, 0x0034, 0x3ff0,
+0x3fd8, 0x0368, 0x07c0, 0x04d4, 0x0040, 0x3fec,
+0x3fd4, 0x0350, 0x07bc, 0x04ec, 0x0048, 0x3fec,
+0x3fd0, 0x033c, 0x07b8, 0x0504, 0x0050, 0x3fe8,
+0x3fcc, 0x0324, 0x07b4, 0x051c, 0x005c, 0x3fe4,
+0x3fc8, 0x0310, 0x07ac, 0x0530, 0x0068, 0x3fe4,
+0x3fc4, 0x02fc, 0x07a8, 0x0548, 0x0070, 0x3fe0,
+0x3fc4, 0x02e4, 0x07a0, 0x055c, 0x007c, 0x3fe0,
+0x3fc0, 0x02d0, 0x0798, 0x0574, 0x0088, 0x3fdc,
+0x3fc0, 0x02b8, 0x0790, 0x058c, 0x0094, 0x3fd8,
+0x3fbc, 0x02a4, 0x0788, 0x05a0, 0x00a0, 0x3fd8,
+0x3fbc, 0x0290, 0x077c, 0x05b8, 0x00ac, 0x3fd4,
+0x3fbc, 0x027c, 0x0774, 0x05c8, 0x00b8, 0x3fd4,
+0x3fb8, 0x0268, 0x0768, 0x05e0, 0x00c8, 0x3fd0,
+0x3fb8, 0x0250, 0x0760, 0x05f8, 0x00d4, 0x3fcc,
+0x3fb8, 0x023c, 0x0754, 0x0608, 0x00e4, 0x3fcc,
+0x3fb8, 0x0228, 0x0748, 0x0620, 0x00f0, 0x3fc8,
+0x3fb8, 0x0214, 0x073c, 0x0630, 0x0100, 0x3fc8,
+0x3fb8, 0x0204, 0x072c, 0x0644, 0x0110, 0x3fc4,
+0x3fb8, 0x01f0, 0x0720, 0x0658, 0x011c, 0x3fc4,
+0x3fb8, 0x01dc, 0x0710, 0x0670, 0x012c, 0x3fc0,
+0x3fb8, 0x01c8, 0x0704, 0x0680, 0x013c, 0x3fc0,
+0x3fb8, 0x01b8, 0x06f4, 0x0690, 0x014c, 0x3fc0,
+0x3fb8, 0x01a4, 0x06e4, 0x06a4, 0x0160, 0x3fbc,
+0x3fb8, 0x0194, 0x06d4, 0x06b4, 0x0170, 0x3fbc,
+0x3fbc, 0x0180, 0x06c4, 0x06c4, 0x0180, 0x3fbc,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_60_s1_12[198] = {
+0x3f64, 0x03ec, 0x0960, 0x03ec, 0x3f64, 0x0000,
+0x3f64, 0x03cc, 0x0960, 0x0408, 0x3f68, 0x0000,
+0x3f60, 0x03ac, 0x0960, 0x042c, 0x3f6c, 0x3ffc,
+0x3f60, 0x038c, 0x0960, 0x0448, 0x3f70, 0x3ffc,
+0x3f60, 0x0370, 0x095c, 0x046c, 0x3f70, 0x3ff8,
+0x3f5c, 0x0350, 0x0958, 0x048c, 0x3f78, 0x3ff8,
+0x3f5c, 0x0334, 0x0954, 0x04ac, 0x3f7c, 0x3ff4,
+0x3f5c, 0x0314, 0x0950, 0x04cc, 0x3f80, 0x3ff4,
+0x3f5c, 0x02f8, 0x0948, 0x04f0, 0x3f84, 0x3ff0,
+0x3f5c, 0x02d8, 0x0944, 0x050c, 0x3f8c, 0x3ff0,
+0x3f60, 0x02bc, 0x093c, 0x052c, 0x3f90, 0x3fec,
+0x3f60, 0x02a0, 0x0930, 0x0550, 0x3f98, 0x3fe8,
+0x3f60, 0x0284, 0x0928, 0x056c, 0x3fa0, 0x3fe8,
+0x3f64, 0x0268, 0x091c, 0x058c, 0x3fa8, 0x3fe4,
+0x3f64, 0x024c, 0x0910, 0x05b0, 0x3fb0, 0x3fe0,
+0x3f64, 0x0230, 0x0904, 0x05d0, 0x3fbc, 0x3fdc,
+0x3f68, 0x0214, 0x08f8, 0x05ec, 0x3fc4, 0x3fdc,
+0x3f6c, 0x01fc, 0x08e8, 0x060c, 0x3fcc, 0x3fd8,
+0x3f6c, 0x01e0, 0x08dc, 0x062c, 0x3fd8, 0x3fd4,
+0x3f70, 0x01c8, 0x08cc, 0x0648, 0x3fe4, 0x3fd0,
+0x3f74, 0x01b0, 0x08bc, 0x0664, 0x3ff0, 0x3fcc,
+0x3f74, 0x0194, 0x08a8, 0x068c, 0x3ffc, 0x3fc8,
+0x3f78, 0x017c, 0x0898, 0x06a8, 0x0008, 0x3fc4,
+0x3f7c, 0x0168, 0x0884, 0x06c0, 0x0018, 0x3fc0,
+0x3f80, 0x0150, 0x0870, 0x06dc, 0x0024, 0x3fc0,
+0x3f84, 0x0138, 0x085c, 0x06f8, 0x0034, 0x3fbc,
+0x3f88, 0x0120, 0x0848, 0x0718, 0x0040, 0x3fb8,
+0x3f8c, 0x010c, 0x0830, 0x0734, 0x0050, 0x3fb4,
+0x3f90, 0x00f8, 0x081c, 0x074c, 0x0060, 0x3fb0,
+0x3f94, 0x00e4, 0x0800, 0x0768, 0x0074, 0x3fac,
+0x3f98, 0x00d0, 0x07e8, 0x0784, 0x0084, 0x3fa8,
+0x3f9c, 0x00bc, 0x07d4, 0x079c, 0x0094, 0x3fa4,
+0x3fa0, 0x00a8, 0x07b8, 0x07b8, 0x00a8, 0x3fa0,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_70_s1_12[198] = {
+0x3f00, 0x0368, 0x0b30, 0x0368, 0x3f00, 0x0000,
+0x3f04, 0x0340, 0x0b30, 0x0390, 0x3efc, 0x0000,
+0x3f08, 0x0318, 0x0b2c, 0x03bc, 0x3ef8, 0x0000,
+0x3f0c, 0x02f0, 0x0b28, 0x03e4, 0x3ef8, 0x0000,
+0x3f10, 0x02c8, 0x0b24, 0x0410, 0x3ef4, 0x0000,
+0x3f14, 0x02a0, 0x0b1c, 0x043c, 0x3ef4, 0x0000,
+0x3f1c, 0x027c, 0x0b14, 0x0464, 0x3ef0, 0x0000,
+0x3f20, 0x0254, 0x0b0c, 0x0490, 0x3ef0, 0x0000,
+0x3f24, 0x0230, 0x0b00, 0x04bc, 0x3ef0, 0x0000,
+0x3f2c, 0x020c, 0x0af4, 0x04e4, 0x3ef0, 0x0000,
+0x3f30, 0x01e8, 0x0ae8, 0x0510, 0x3ef0, 0x0000,
+0x3f38, 0x01c8, 0x0ad8, 0x0534, 0x3ef4, 0x0000,
+0x3f40, 0x01a4, 0x0ac8, 0x0564, 0x3ef4, 0x3ffc,
+0x3f44, 0x0184, 0x0ab4, 0x0590, 0x3ef8, 0x3ffc,
+0x3f4c, 0x0164, 0x0aa4, 0x05b8, 0x3efc, 0x3ff8,
+0x3f50, 0x0144, 0x0a90, 0x05e8, 0x3efc, 0x3ff8,
+0x3f58, 0x0124, 0x0a78, 0x0610, 0x3f04, 0x3ff8,
+0x3f60, 0x0108, 0x0a64, 0x0638, 0x3f08, 0x3ff4,
+0x3f64, 0x00e8, 0x0a4c, 0x066c, 0x3f0c, 0x3ff0,
+0x3f6c, 0x00cc, 0x0a34, 0x0690, 0x3f14, 0x3ff0,
+0x3f70, 0x00b4, 0x0a18, 0x06bc, 0x3f1c, 0x3fec,
+0x3f78, 0x0098, 0x0a00, 0x06e8, 0x3f20, 0x3fe8,
+0x3f80, 0x007c, 0x09e4, 0x0710, 0x3f2c, 0x3fe4,
+0x3f84, 0x0064, 0x09c8, 0x0738, 0x3f34, 0x3fe4,
+0x3f8c, 0x004c, 0x09a8, 0x0764, 0x3f3c, 0x3fe0,
+0x3f90, 0x0034, 0x098c, 0x078c, 0x3f48, 0x3fdc,
+0x3f98, 0x0020, 0x096c, 0x07b0, 0x3f54, 0x3fd8,
+0x3f9c, 0x0008, 0x094c, 0x07dc, 0x3f60, 0x3fd4,
+0x3fa4, 0x3ff4, 0x0928, 0x0808, 0x3f6c, 0x3fcc,
+0x3fa8, 0x3fe0, 0x0908, 0x082c, 0x3f7c, 0x3fc8,
+0x3fb0, 0x3fcc, 0x08e4, 0x0854, 0x3f88, 0x3fc4,
+0x3fb4, 0x3fbc, 0x08c0, 0x0878, 0x3f98, 0x3fc0,
+0x3fbc, 0x3fac, 0x0898, 0x0898, 0x3fac, 0x3fbc,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_80_s1_12[198] = {
+0x3efc, 0x0284, 0x0d00, 0x0284, 0x3efc, 0x0000,
+0x3f04, 0x0254, 0x0d00, 0x02b4, 0x3ef0, 0x0004,
+0x3f10, 0x0224, 0x0cf8, 0x02e8, 0x3ee8, 0x0004,
+0x3f18, 0x01f4, 0x0cf4, 0x0318, 0x3ee0, 0x0008,
+0x3f24, 0x01c8, 0x0ce8, 0x034c, 0x3ed8, 0x0008,
+0x3f30, 0x019c, 0x0ce0, 0x037c, 0x3ecc, 0x000c,
+0x3f38, 0x0170, 0x0cd0, 0x03b8, 0x3ec4, 0x000c,
+0x3f44, 0x0144, 0x0cc4, 0x03e8, 0x3ebc, 0x0010,
+0x3f4c, 0x011c, 0x0cb4, 0x0420, 0x3eb4, 0x0010,
+0x3f58, 0x00f4, 0x0ca0, 0x0458, 0x3eac, 0x0010,
+0x3f60, 0x00cc, 0x0c8c, 0x048c, 0x3ea8, 0x0014,
+0x3f6c, 0x00a8, 0x0c74, 0x04c4, 0x3ea0, 0x0014,
+0x3f74, 0x0084, 0x0c5c, 0x04fc, 0x3e9c, 0x0014,
+0x3f7c, 0x0060, 0x0c44, 0x0534, 0x3e94, 0x0018,
+0x3f88, 0x0040, 0x0c28, 0x0568, 0x3e90, 0x0018,
+0x3f90, 0x0020, 0x0c08, 0x05a4, 0x3e8c, 0x0018,
+0x3f98, 0x0000, 0x0bec, 0x05dc, 0x3e88, 0x0018,
+0x3fa0, 0x3fe4, 0x0bcc, 0x0614, 0x3e84, 0x0018,
+0x3fac, 0x3fc4, 0x0ba8, 0x064c, 0x3e84, 0x0018,
+0x3fb4, 0x3fac, 0x0b84, 0x0684, 0x3e80, 0x0018,
+0x3fb8, 0x3f90, 0x0b60, 0x06c0, 0x3e80, 0x0018,
+0x3fc0, 0x3f78, 0x0b38, 0x06f8, 0x3e80, 0x0018,
+0x3fc8, 0x3f60, 0x0b14, 0x072c, 0x3e80, 0x0018,
+0x3fd0, 0x3f4c, 0x0ae8, 0x0760, 0x3e84, 0x0018,
+0x3fd8, 0x3f34, 0x0ac0, 0x079c, 0x3e84, 0x0014,
+0x3fdc, 0x3f20, 0x0a94, 0x07d4, 0x3e88, 0x0014,
+0x3fe4, 0x3f10, 0x0a68, 0x0808, 0x3e8c, 0x0010,
+0x3fe8, 0x3f00, 0x0a38, 0x0840, 0x3e90, 0x0010,
+0x3fec, 0x3ef0, 0x0a0c, 0x0874, 0x3e98, 0x000c,
+0x3ff4, 0x3ee0, 0x09d8, 0x08a8, 0x3ea0, 0x000c,
+0x3ff8, 0x3ed0, 0x09ac, 0x08dc, 0x3ea8, 0x0008,
+0x3ffc, 0x3ec4, 0x0978, 0x0914, 0x3eb0, 0x0004,
+0x0000, 0x3eb8, 0x0948, 0x0948, 0x3eb8, 0x0000,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_90_s1_12[198] = {
+0x3f60, 0x0154, 0x0e9c, 0x0150, 0x3f60, 0x0000,
+0x3f6c, 0x011c, 0x0e9c, 0x018c, 0x3f50, 0x0000,
+0x3f7c, 0x00ec, 0x0e94, 0x01bc, 0x3f44, 0x0004,
+0x3f88, 0x00b8, 0x0e8c, 0x01f8, 0x3f34, 0x0008,
+0x3f94, 0x0088, 0x0e80, 0x0234, 0x3f28, 0x0008,
+0x3fa0, 0x005c, 0x0e74, 0x026c, 0x3f18, 0x000c,
+0x3fac, 0x0030, 0x0e60, 0x02b0, 0x3f08, 0x000c,
+0x3fb8, 0x0004, 0x0e50, 0x02e8, 0x3efc, 0x0010,
+0x3fc4, 0x3fdc, 0x0e38, 0x0328, 0x3eec, 0x0014,
+0x3fd0, 0x3fb4, 0x0e20, 0x0368, 0x3ee0, 0x0014,
+0x3fd8, 0x3f90, 0x0e04, 0x03ac, 0x3ed0, 0x0018,
+0x3fe4, 0x3f6c, 0x0de8, 0x03e8, 0x3ec4, 0x001c,
+0x3fec, 0x3f4c, 0x0dc8, 0x042c, 0x3eb4, 0x0020,
+0x3ff4, 0x3f2c, 0x0da4, 0x0474, 0x3ea8, 0x0020,
+0x0000, 0x3f0c, 0x0d80, 0x04b8, 0x3e98, 0x0024,
+0x0008, 0x3ef0, 0x0d58, 0x04fc, 0x3e8c, 0x0028,
+0x000c, 0x3ed8, 0x0d30, 0x0540, 0x3e80, 0x002c,
+0x0014, 0x3ec0, 0x0d04, 0x0588, 0x3e74, 0x002c,
+0x001c, 0x3ea8, 0x0cd8, 0x05cc, 0x3e68, 0x0030,
+0x0020, 0x3e94, 0x0ca8, 0x0614, 0x3e5c, 0x0034,
+0x0028, 0x3e80, 0x0c78, 0x065c, 0x3e50, 0x0034,
+0x002c, 0x3e6c, 0x0c44, 0x06a4, 0x3e48, 0x0038,
+0x0030, 0x3e5c, 0x0c0c, 0x06f0, 0x3e3c, 0x003c,
+0x0034, 0x3e50, 0x0bd8, 0x0734, 0x3e34, 0x003c,
+0x0038, 0x3e44, 0x0ba0, 0x0778, 0x3e2c, 0x0040,
+0x003c, 0x3e38, 0x0b64, 0x07c4, 0x3e24, 0x0040,
+0x0040, 0x3e2c, 0x0b28, 0x0808, 0x3e20, 0x0044,
+0x0040, 0x3e24, 0x0aec, 0x0850, 0x3e1c, 0x0044,
+0x0044, 0x3e1c, 0x0aac, 0x0898, 0x3e18, 0x0044,
+0x0044, 0x3e18, 0x0a70, 0x08d8, 0x3e14, 0x0048,
+0x0044, 0x3e14, 0x0a2c, 0x0924, 0x3e10, 0x0048,
+0x0048, 0x3e10, 0x09ec, 0x0964, 0x3e10, 0x0048,
+0x0048, 0x3e10, 0x09a8, 0x09a8, 0x3e10, 0x0048,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_1_00_s1_12[198] = {
+0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000,
+0x000c, 0x3fcc, 0x1000, 0x0034, 0x3ff4, 0x0000,
+0x0018, 0x3f9c, 0x0ff8, 0x0070, 0x3fe4, 0x0000,
+0x0024, 0x3f6c, 0x0ff0, 0x00ac, 0x3fd4, 0x0000,
+0x0030, 0x3f40, 0x0fe4, 0x00e8, 0x3fc4, 0x0000,
+0x0038, 0x3f14, 0x0fd4, 0x0128, 0x3fb4, 0x0004,
+0x0044, 0x3eec, 0x0fc0, 0x0168, 0x3fa4, 0x0004,
+0x004c, 0x3ec8, 0x0fac, 0x01a8, 0x3f94, 0x0004,
+0x0054, 0x3ea4, 0x0f90, 0x01ec, 0x3f84, 0x0008,
+0x005c, 0x3e84, 0x0f74, 0x0234, 0x3f70, 0x0008,
+0x0060, 0x3e64, 0x0f50, 0x0280, 0x3f60, 0x000c,
+0x0068, 0x3e48, 0x0f2c, 0x02c8, 0x3f4c, 0x0010,
+0x006c, 0x3e30, 0x0f04, 0x0318, 0x3f38, 0x0010,
+0x0070, 0x3e18, 0x0edc, 0x0364, 0x3f24, 0x0014,
+0x0074, 0x3e00, 0x0eac, 0x03b8, 0x3f10, 0x0018,
+0x0078, 0x3df0, 0x0e7c, 0x0404, 0x3efc, 0x001c,
+0x007c, 0x3de0, 0x0e48, 0x0454, 0x3ee8, 0x0020,
+0x007c, 0x3dd0, 0x0e14, 0x04ac, 0x3ed4, 0x0020,
+0x0080, 0x3dc4, 0x0dd8, 0x0500, 0x3ec0, 0x0024,
+0x0080, 0x3db8, 0x0d9c, 0x0554, 0x3eac, 0x002c,
+0x0080, 0x3db0, 0x0d5c, 0x05ac, 0x3e98, 0x0030,
+0x0080, 0x3da8, 0x0d1c, 0x0600, 0x3e88, 0x0034,
+0x0080, 0x3da4, 0x0cd8, 0x0658, 0x3e74, 0x0038,
+0x0080, 0x3da4, 0x0c94, 0x06ac, 0x3e60, 0x003c,
+0x007c, 0x3da0, 0x0c4c, 0x070c, 0x3e4c, 0x0040,
+0x007c, 0x3da4, 0x0c00, 0x0760, 0x3e3c, 0x0044,
+0x0078, 0x3da4, 0x0bb4, 0x07bc, 0x3e2c, 0x0048,
+0x0074, 0x3da8, 0x0b64, 0x0814, 0x3e1c, 0x0050,
+0x0074, 0x3db0, 0x0b14, 0x0868, 0x3e0c, 0x0054,
+0x0070, 0x3db8, 0x0ac4, 0x08c0, 0x3dfc, 0x0058,
+0x006c, 0x3dc0, 0x0a70, 0x091c, 0x3dec, 0x005c,
+0x0068, 0x3dc8, 0x0a1c, 0x0974, 0x3de0, 0x0060,
+0x0064, 0x3dd4, 0x09c8, 0x09c8, 0x3dd4, 0x0064,
+};
+
+static struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1173,7 +2012,7 @@ struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
{-1, -1, 0x0002},
};
-struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1185,7 +2024,7 @@ struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
{-1, -1, 0x0002},
};
-struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
{3, 10, 0x4100},
{4, 10, 0x4100},
{5, 10, 0x4100},
@@ -1197,7 +2036,7 @@ struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
{-1, -1, 0x4100},
};
-struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
{3, 10, 0x4000},
{4, 10, 0x4000},
{5, 10, 0x4000},
@@ -1209,7 +2048,7 @@ struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
{-1, -1, 0x4000},
};
-struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x251F},
{5, 10, 0x291F},
@@ -1221,7 +2060,7 @@ struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
{-1, -1, 0xA640},
};
-struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x9600},
{5, 10, 0xA460},
@@ -1233,7 +2072,7 @@ struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
{-1, -1, 0xB058},
};
-struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
{3, 10, 0x4100},
{4, 10, 0x4100},
{5, 10, 0x4100},
@@ -1245,7 +2084,7 @@ struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
{-1, -1, 0x4100},
};
-struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
{3, 10, 0x4000},
{4, 10, 0x4000},
{5, 10, 0x4000},
@@ -1257,7 +2096,7 @@ struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
{-1, -1, 0x4000},
};
-struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1269,7 +2108,7 @@ struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
{-1, -1, 0x0000},
};
-struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1281,7 +2120,7 @@ struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
{-1, -1, 0xAC00},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1293,7 +2132,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] =
{-1, -1, 0xA8D8},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1305,7 +2144,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
{-1, -1, 0x3ADB},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
{3, 10, 0x3800},
{4, 10, 0x3800},
{5, 10, 0x3800},
@@ -1317,7 +2156,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
{-1, -1, 0x3B66},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
{3, 10, 0x3800},
{4, 10, 0x3800},
{5, 10, 0x3800},
@@ -1329,7 +2168,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
{-1, -1, 0x2F20},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1341,7 +2180,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
{-1, -1, 0x1F00},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1353,61 +2192,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
{-1, -1, 0x9E00},
};
-void spl_init_easf_filter_coeffs(void)
-{
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_30,
- easf_filter_3tap_64p_ratio_0_30_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_40,
- easf_filter_3tap_64p_ratio_0_40_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_50,
- easf_filter_3tap_64p_ratio_0_50_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_60,
- easf_filter_3tap_64p_ratio_0_60_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_70,
- easf_filter_3tap_64p_ratio_0_70_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_80,
- easf_filter_3tap_64p_ratio_0_80_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_90,
- easf_filter_3tap_64p_ratio_0_90_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_1_00,
- easf_filter_3tap_64p_ratio_1_00_s1_12, 3);
-
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_30,
- easf_filter_4tap_64p_ratio_0_30_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_40,
- easf_filter_4tap_64p_ratio_0_40_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_50,
- easf_filter_4tap_64p_ratio_0_50_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_60,
- easf_filter_4tap_64p_ratio_0_60_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_70,
- easf_filter_4tap_64p_ratio_0_70_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_80,
- easf_filter_4tap_64p_ratio_0_80_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_90,
- easf_filter_4tap_64p_ratio_0_90_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_1_00,
- easf_filter_4tap_64p_ratio_1_00_s1_12, 4);
-
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_30,
- easf_filter_6tap_64p_ratio_0_30_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_40,
- easf_filter_6tap_64p_ratio_0_40_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_50,
- easf_filter_6tap_64p_ratio_0_50_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_60,
- easf_filter_6tap_64p_ratio_0_60_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_70,
- easf_filter_6tap_64p_ratio_0_70_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_80,
- easf_filter_6tap_64p_ratio_0_80_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_90,
- easf_filter_6tap_64p_ratio_0_90_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_1_00,
- easf_filter_6tap_64p_ratio_1_00_s1_12, 6);
-}
-
-uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
return easf_filter_3tap_64p_ratio_0_30_s1_12;
@@ -1427,7 +2212,7 @@ uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
return easf_filter_3tap_64p_ratio_1_00_s1_12;
}
-uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
return easf_filter_4tap_64p_ratio_0_30_s1_12;
@@ -1447,7 +2232,7 @@ uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
return easf_filter_4tap_64p_ratio_1_00_s1_12;
}
-uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
return easf_filter_6tap_64p_ratio_0_30_s1_12;
@@ -1467,7 +2252,7 @@ uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
return easf_filter_6tap_64p_ratio_1_00_s1_12;
}
-uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
+const uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
{
if (taps == 6)
return spl_get_easf_filter_6tap_64p(ratio);
@@ -1482,6 +2267,81 @@ uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ra
}
}
+static const uint16_t *spl_get_easf_filter_3tap_64p_s1_10(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_3tap_64p_ratio_0_30;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_3tap_64p_ratio_0_40;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_3tap_64p_ratio_0_50;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_3tap_64p_ratio_0_60;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_3tap_64p_ratio_0_70;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_3tap_64p_ratio_0_80;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_3tap_64p_ratio_0_90;
+ else
+ return easf_filter_3tap_64p_ratio_1_00;
+}
+
+static const uint16_t *spl_get_easf_filter_4tap_64p_s1_10(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_4tap_64p_ratio_0_30;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_4tap_64p_ratio_0_40;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_4tap_64p_ratio_0_50;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_4tap_64p_ratio_0_60;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_4tap_64p_ratio_0_70;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_4tap_64p_ratio_0_80;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_4tap_64p_ratio_0_90;
+ else
+ return easf_filter_4tap_64p_ratio_1_00;
+}
+
+static const uint16_t *spl_get_easf_filter_6tap_64p_s1_10(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_6tap_64p_ratio_0_30;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_6tap_64p_ratio_0_40;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_6tap_64p_ratio_0_50;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_6tap_64p_ratio_0_60;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_6tap_64p_ratio_0_70;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_6tap_64p_ratio_0_80;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_6tap_64p_ratio_0_90;
+ else
+ return easf_filter_6tap_64p_ratio_1_00;
+}
+
+const uint16_t *spl_dscl_get_easf_filter_coeffs_64p_s1_10(int taps, struct spl_fixed31_32 ratio)
+{
+ if (taps == 6)
+ return spl_get_easf_filter_6tap_64p_s1_10(ratio);
+ else if (taps == 4)
+ return spl_get_easf_filter_4tap_64p_s1_10(ratio);
+ else if (taps == 3)
+ return spl_get_easf_filter_3tap_64p_s1_10(ratio);
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data, bool enable_easf_v,
bool enable_easf_h)
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h
index 8bb2b8108e38..321ae22a04d4 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h
@@ -13,11 +13,6 @@ struct scale_ratio_to_reg_value_lookup {
const uint32_t reg_value;
};
-void spl_init_easf_filter_coeffs(void);
-uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio);
-uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio);
-uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio);
-uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data, bool enable_easf_v,
bool enable_easf_h);
@@ -35,4 +30,8 @@ uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio);
+/* public API */
+const uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
+const uint16_t *spl_dscl_get_easf_filter_coeffs_64p_s1_10(int taps, struct spl_fixed31_32 ratio);
+
#endif /* __DC_SPL_SCL_EASF_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c
index b02c7b0b262b..5e52bdf1ad44 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c
@@ -4,194 +4,6 @@
#include "spl_debug.h"
#include "dc_spl_scl_filters.h"
-//=========================================
-// <num_taps> = 2
-// <num_phases> = 16
-// <scale_ratio> = 0.833333 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = s1.10
-// <CoefOut> = s1.12
-//=========================================
-static const uint16_t filter_2tap_16p[18] = {
- 0x1000, 0x0000,
- 0x0FF0, 0x0010,
- 0x0FB0, 0x0050,
- 0x0F34, 0x00CC,
- 0x0E68, 0x0198,
- 0x0D44, 0x02BC,
- 0x0BC4, 0x043C,
- 0x09FC, 0x0604,
- 0x0800, 0x0800
-};
-
-//=========================================
-// <num_taps> = 3
-// <num_phases> = 16
-// <scale_ratio> = 0.83333 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_3tap_16p_upscale[27] = {
- 0x0804, 0x07FC, 0x0000,
- 0x06AC, 0x0978, 0x3FDC,
- 0x055C, 0x0AF0, 0x3FB4,
- 0x0420, 0x0C50, 0x3F90,
- 0x0300, 0x0D88, 0x3F78,
- 0x0200, 0x0E90, 0x3F70,
- 0x0128, 0x0F5C, 0x3F7C,
- 0x007C, 0x0FD8, 0x3FAC,
- 0x0000, 0x1000, 0x0000
-};
-
-//=========================================
-// <num_taps> = 3
-// <num_phases> = 16
-// <scale_ratio> = 1.16666 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_3tap_16p_116[27] = {
- 0x0804, 0x07FC, 0x0000,
- 0x0700, 0x0914, 0x3FEC,
- 0x0604, 0x0A1C, 0x3FE0,
- 0x050C, 0x0B14, 0x3FE0,
- 0x041C, 0x0BF4, 0x3FF0,
- 0x0340, 0x0CB0, 0x0010,
- 0x0274, 0x0D3C, 0x0050,
- 0x01C0, 0x0D94, 0x00AC,
- 0x0128, 0x0DB4, 0x0124
-};
-
-//=========================================
-// <num_taps> = 3
-// <num_phases> = 16
-// <scale_ratio> = 1.49999 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_3tap_16p_149[27] = {
- 0x0804, 0x07FC, 0x0000,
- 0x0730, 0x08CC, 0x0004,
- 0x0660, 0x098C, 0x0014,
- 0x0590, 0x0A3C, 0x0034,
- 0x04C4, 0x0AD4, 0x0068,
- 0x0400, 0x0B54, 0x00AC,
- 0x0348, 0x0BB0, 0x0108,
- 0x029C, 0x0BEC, 0x0178,
- 0x0200, 0x0C00, 0x0200
-};
-
-//=========================================
-// <num_taps> = 3
-// <num_phases> = 16
-// <scale_ratio> = 1.83332 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_3tap_16p_183[27] = {
- 0x0804, 0x07FC, 0x0000,
- 0x0754, 0x0880, 0x002C,
- 0x06A8, 0x08F0, 0x0068,
- 0x05FC, 0x0954, 0x00B0,
- 0x0550, 0x09AC, 0x0104,
- 0x04A8, 0x09F0, 0x0168,
- 0x0408, 0x0A20, 0x01D8,
- 0x036C, 0x0A40, 0x0254,
- 0x02DC, 0x0A48, 0x02DC
-};
-
-//=========================================
-// <num_taps> = 4
-// <num_phases> = 16
-// <scale_ratio> = 0.83333 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_4tap_16p_upscale[36] = {
- 0x0000, 0x1000, 0x0000, 0x0000,
- 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
- 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
- 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
- 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
- 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
- 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
- 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
- 0x3F00, 0x08FC, 0x0900, 0x3F04
-};
-
-//=========================================
-// <num_taps> = 4
-// <num_phases> = 16
-// <scale_ratio> = 1.16666 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_4tap_16p_116[36] = {
- 0x01A8, 0x0CB4, 0x01A4, 0x0000,
- 0x0110, 0x0CB0, 0x0254, 0x3FEC,
- 0x0090, 0x0C80, 0x031C, 0x3FD4,
- 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
- 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
- 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
- 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
- 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
- 0x3F54, 0x08AC, 0x08AC, 0x3F54
-};
-
-//=========================================
-// <num_taps> = 4
-// <num_phases> = 16
-// <scale_ratio> = 1.49999 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_4tap_16p_149[36] = {
- 0x02B8, 0x0A90, 0x02B8, 0x0000,
- 0x0230, 0x0A90, 0x0350, 0x3FF0,
- 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
- 0x0148, 0x0A48, 0x049C, 0x3FD4,
- 0x00E8, 0x0A00, 0x054C, 0x3FCC,
- 0x0098, 0x09A0, 0x0600, 0x3FC8,
- 0x0054, 0x0928, 0x06B4, 0x3FD0,
- 0x001C, 0x08A4, 0x0760, 0x3FE0,
- 0x3FFC, 0x0804, 0x0804, 0x3FFC
-};
-
-//=========================================
-// <num_taps> = 4
-// <num_phases> = 16
-// <scale_ratio> = 1.83332 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_4tap_16p_183[36] = {
- 0x03B0, 0x08A0, 0x03B0, 0x0000,
- 0x0348, 0x0898, 0x041C, 0x0004,
- 0x02DC, 0x0884, 0x0490, 0x0010,
- 0x0278, 0x0864, 0x0500, 0x0024,
- 0x021C, 0x0838, 0x0570, 0x003C,
- 0x01C8, 0x07FC, 0x05E0, 0x005C,
- 0x0178, 0x07B8, 0x064C, 0x0084,
- 0x0130, 0x076C, 0x06B0, 0x00B4,
- 0x00F0, 0x0714, 0x0710, 0x00EC
-};
//=========================================
// <num_taps> = 2
@@ -1318,19 +1130,7 @@ static const uint16_t filter_8tap_64p_183[264] = {
0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
};
-const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio)
-{
- if (ratio.value < spl_fixpt_one.value)
- return filter_3tap_16p_upscale;
- else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
- return filter_3tap_16p_116;
- else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
- return filter_3tap_16p_149;
- else
- return filter_3tap_16p_183;
-}
-
-const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_3tap_64p_upscale;
@@ -1342,19 +1142,7 @@ const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
return filter_3tap_64p_183;
}
-const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio)
-{
- if (ratio.value < spl_fixpt_one.value)
- return filter_4tap_16p_upscale;
- else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
- return filter_4tap_16p_116;
- else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
- return filter_4tap_16p_149;
- else
- return filter_4tap_16p_183;
-}
-
-const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_4tap_64p_upscale;
@@ -1366,7 +1154,7 @@ const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
return filter_4tap_64p_183;
}
-const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_5tap_64p_upscale;
@@ -1378,7 +1166,7 @@ const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
return filter_5tap_64p_183;
}
-const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_6tap_64p_upscale;
@@ -1390,7 +1178,7 @@ const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
return filter_6tap_64p_183;
}
-const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_7tap_64p_upscale;
@@ -1402,7 +1190,7 @@ const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
return filter_7tap_64p_183;
}
-const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_8tap_64p_upscale;
@@ -1414,12 +1202,7 @@ const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
return filter_8tap_64p_183;
}
-const uint16_t *spl_get_filter_2tap_16p(void)
-{
- return filter_2tap_16p;
-}
-
-const uint16_t *spl_get_filter_2tap_64p(void)
+static const uint16_t *spl_get_filter_2tap_64p(void)
{
return filter_2tap_64p;
}
@@ -1448,4 +1231,3 @@ const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 r
return NULL;
}
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h
new file mode 100644
index 000000000000..c315a438d064
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DC_SPL_SCL_FILTERS_H__
+#define __DC_SPL_SCL_FILTERS_H__
+
+#include "dc_spl_types.h"
+
+/* public API */
+const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
+
+#endif /* __DC_SPL_SCL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 467af9dd90de..20e4e52a77ac 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -427,6 +427,14 @@ struct spl_out {
// SPL inputs
+// opp extra adjustment for rect
+struct spl_opp_adjust {
+ int x;
+ int y;
+ int width;
+ int height;
+};
+
// Basic input information
struct basic_in {
enum spl_pixel_format format; // Pixel Format
@@ -444,6 +452,7 @@ struct basic_in {
} num_slices_recout_width;
} num_h_slices_recout_width_align;
int mpc_h_slice_index; // previous mpc_combine_v - split_idx
+ struct spl_opp_adjust opp_recout_adjust;
// Inputs for adaptive scaler - TODO
enum spl_transfer_func_type tf_type; /* Transfer function type */
enum spl_transfer_func_predefined tf_predefined_type; /* Transfer function predefined type */
@@ -451,6 +460,8 @@ struct basic_in {
enum spl_color_space color_space; // Color Space
unsigned int max_luminance; // Max Luminance TODO: Is determined in dc_hw_sequencer.c is_sdr
bool film_grain_applied; // Film Grain Applied // TODO: To check from where to get this?
+ int custom_width; // Width for non-standard segmentation - used when != 0
+ int custom_x; // Start x for non-standard segmentation - used when custom_width != 0
};
// Basic output information
@@ -471,6 +482,10 @@ enum sharpness_setting {
SHARPNESS_ZERO,
SHARPNESS_CUSTOM
};
+enum sharpness_range_source {
+ SHARPNESS_RANGE_DCN = 0,
+ SHARPNESS_RANGE_DCN_OVERRIDE
+};
struct spl_sharpness_range {
int sdr_rgb_min;
int sdr_rgb_max;
@@ -484,7 +499,7 @@ struct spl_sharpness_range {
};
struct adaptive_sharpness {
bool enable;
- int sharpness_level;
+ unsigned int sharpness_level;
struct spl_sharpness_range sharpness_range;
};
enum linear_light_scaling { // convert it in translation logic
@@ -530,11 +545,13 @@ struct spl_in {
enum linear_light_scaling lls_pref; // Linear Light Scaling
bool prefer_easf;
bool disable_easf;
+ bool override_easf; /* If true, keep EASF enabled but use provided in_taps */
struct spl_debug debug;
bool is_fullscreen;
bool is_hdr_on;
int h_active;
int v_active;
+ int min_viewport_size;
int sdr_white_level_nits;
enum sharpen_policy sharpen_policy;
};
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c
index be2f34d034c5..be2f34d034c5 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h
index cdc4e107b9de..cdc4e107b9de 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h
index a6f6132df241..a6f6132df241 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
index 131f1e3949d3..ebf0287417e0 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
@@ -29,8 +29,6 @@ static inline unsigned long long spl_complete_integer_division_u64(
{
unsigned long long result;
- SPL_ASSERT(divisor);
-
result = spl_div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -196,8 +194,6 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
* Good idea to use Newton's method
*/
- SPL_ASSERT(arg.value);
-
return spl_fixpt_from_fraction(
spl_fixpt_one.value,
arg.value);
@@ -346,7 +342,7 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
if (m > 0)
return spl_fixpt_shl(
spl_fixed31_32_exp_from_taylor_series(r),
- (unsigned char)m);
+ (unsigned int)m);
else
return spl_fixpt_div_int(
spl_fixed31_32_exp_from_taylor_series(r),
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h
index ed2647f9a099..9f349ffe9148 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h
@@ -189,7 +189,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp(
* @brief
* result = arg << shift
*/
-static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
+static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned int shift)
{
SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
@@ -203,7 +203,7 @@ static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, uns
* @brief
* result = arg >> shift
*/
-static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift)
+static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned int shift)
{
bool negative = arg.value < 0;
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h
index 2e6ba71960ac..2e6ba71960ac 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 4b3ccbca0da2..9d0168986fe7 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -51,8 +51,8 @@
* for the cache windows.
*
* The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
- * for command submission. Commands can be queued via dmub_srv_cmd_queue()
- * and executed via dmub_srv_cmd_execute().
+ * for command submission. Commands can be queued via dmub_srv_fb_cmd_queue()
+ * and executed via dmub_srv_fb_cmd_execute().
*
* If the queue is full the dmub_srv_wait_for_idle() call can be used to
* wait until the queue has been cleared.
@@ -114,6 +114,7 @@ enum dmub_asic {
DMUB_ASIC_DCN321,
DMUB_ASIC_DCN35,
DMUB_ASIC_DCN351,
+ DMUB_ASIC_DCN36,
DMUB_ASIC_DCN401,
DMUB_ASIC_MAX,
};
@@ -128,7 +129,10 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
+ DMUB_WINDOW_IB_MEM,
DMUB_WINDOW_SHARED_STATE,
+ DMUB_WINDOW_LSDMA_BUFFER,
+ DMUB_WINDOW_CURSOR_OFFLOAD,
DMUB_WINDOW_TOTAL,
};
@@ -141,6 +145,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
+ DMUB_NOTIFICATION_FUSED_IO,
DMUB_NOTIFICATION_MAX
};
@@ -169,6 +174,13 @@ enum dmub_srv_power_state_type {
DMUB_POWER_STATE_D3 = 8
};
+/* enum dmub_inbox_cmd_interface type - defines default interface for host->dmub commands */
+enum dmub_inbox_cmd_interface_type {
+ DMUB_CMD_INTERFACE_DEFAULT = 0,
+ DMUB_CMD_INTERFACE_FB = 1,
+ DMUB_CMD_INTERFACE_REG = 2,
+};
+
/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
@@ -305,6 +317,8 @@ struct dmub_srv_hw_params {
bool disable_sldo_opt;
bool enable_non_transparent_setconfig;
bool lower_hbr3_phy_ssc;
+ bool override_hbr3_pll_vco;
+ bool disable_dpia_bw_allocation;
};
/**
@@ -312,7 +326,7 @@ struct dmub_srv_hw_params {
* @timeout_occured: Indicates a timeout occured on any message from driver to dmub
* @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored
*/
-struct dmub_srv_debug {
+struct dmub_timeout_info {
bool timeout_occured;
union dmub_rb_cmd timeout_cmd;
unsigned long long timestamp;
@@ -339,13 +353,42 @@ struct dmub_diagnostic_data {
uint32_t outbox1_wptr;
uint32_t outbox1_size;
uint32_t gpint_datain0;
- struct dmub_srv_debug timeout_info;
+ struct dmub_timeout_info timeout_info;
uint8_t is_dmcub_enabled : 1;
uint8_t is_dmcub_soft_reset : 1;
uint8_t is_dmcub_secure_reset : 1;
uint8_t is_traceport_en : 1;
uint8_t is_cw0_enabled : 1;
uint8_t is_cw6_enabled : 1;
+ uint8_t is_pwait : 1;
+};
+
+/**
+ * struct dmub_preos_info - preos fw info before loading post os fw.
+ */
+struct dmub_preos_info {
+ uint64_t fb_base;
+ uint64_t fb_offset;
+ uint64_t trace_buffer_phy_addr;
+ uint32_t trace_buffer_size;
+ uint32_t fw_version;
+ uint32_t boot_status;
+ uint32_t boot_options;
+};
+
+struct dmub_srv_inbox {
+ /* generic status */
+ uint64_t num_submitted;
+ uint64_t num_reported;
+ union {
+ /* frame buffer mailbox status */
+ struct dmub_rb rb;
+ /* register mailbox status */
+ struct {
+ bool is_pending;
+ bool is_multi_pending;
+ };
+ };
};
/**
@@ -421,6 +464,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub);
+ uint32_t (*emul_get_inbox1_wptr)(struct dmub_srv *dmub);
+
void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
bool (*is_supported)(struct dmub_srv *dmub);
@@ -455,24 +500,28 @@ struct dmub_srv_hw_funcs {
void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
uint32_t (*get_current_time)(struct dmub_srv *dmub);
- void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+ void (*get_diagnostic_data)(struct dmub_srv *dmub);
+ bool (*get_preos_fw_info)(struct dmub_srv *dmub);
bool (*should_detect)(struct dmub_srv *dmub);
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+
void (*send_reg_inbox0_cmd_msg)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
uint32_t (*read_reg_inbox0_rsp_int_status)(struct dmub_srv *dmub);
void (*read_reg_inbox0_cmd_rsp)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void (*write_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*clear_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
+
uint32_t (*read_reg_outbox0_rdy_int_status)(struct dmub_srv *dmub);
void (*write_reg_outbox0_rdy_int_ack)(struct dmub_srv *dmub);
void (*read_reg_outbox0_msg)(struct dmub_srv *dmub, uint32_t *msg);
void (*write_reg_outbox0_rsp)(struct dmub_srv *dmub, uint32_t *rsp);
uint32_t (*read_reg_outbox0_rsp_int_status)(struct dmub_srv *dmub);
- void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
void (*enable_reg_outbox0_rdy_int)(struct dmub_srv *dmub, bool enable);
};
@@ -492,6 +541,7 @@ struct dmub_srv_create_params {
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
+ enum dmub_inbox_cmd_interface_type inbox_type;
};
/**
@@ -501,7 +551,8 @@ struct dmub_srv_create_params {
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
* @shared_state: dmub shared state between firmware and driver
- * @fw_state: dmub firmware state pointer
+ * @cursor_offload_v1: Cursor offload state
+ * @fw_state: dmub firmware state pointer (debug purpose only)
*/
struct dmub_srv {
enum dmub_asic asic;
@@ -509,7 +560,10 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
+ struct dmub_fb ib_mem_gart;
+ struct dmub_fb cursor_offload_fb;
volatile struct dmub_shared_state_feature_block *shared_state;
+ volatile struct dmub_cursor_offload_v1 *cursor_offload_v1;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -518,11 +572,11 @@ struct dmub_srv {
struct dmub_srv_dcn32_regs *regs_dcn32;
struct dmub_srv_dcn35_regs *regs_dcn35;
const struct dmub_srv_dcn401_regs *regs_dcn401;
-
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
- struct dmub_rb inbox1_rb;
+ struct dmub_srv_inbox inbox1;
uint32_t inbox1_last_wptr;
+ struct dmub_srv_inbox reg_inbox0;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -533,6 +587,7 @@ struct dmub_srv {
bool sw_init;
bool hw_init;
+ bool dpia_supported;
uint64_t fb_base;
uint64_t fb_offset;
@@ -542,9 +597,12 @@ struct dmub_srv {
struct dmub_fw_meta_info meta_info;
struct dmub_feature_caps feature_caps;
struct dmub_visual_confirm_color visual_confirm_color;
+ enum dmub_inbox_cmd_interface_type inbox_type;
enum dmub_srv_power_state_type power_state;
- struct dmub_srv_debug debug;
+ struct dmub_diagnostic_data debug;
+ struct dmub_fb lsdma_rb_fb;
+ struct dmub_preos_info preos_info;
};
/**
@@ -561,27 +619,18 @@ struct dmub_notification {
enum dmub_notification_type type;
uint8_t link_index;
uint8_t result;
+ /* notify instance from DMUB */
+ uint8_t instance;
bool pending_notification;
union {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
- /**
- * DPIA notification command.
- */
- struct dmub_rb_cmd_dpia_notification dpia_notification;
struct dmub_rb_cmd_hpd_sense_notify_data hpd_sense_notify;
+ struct dmub_cmd_fused_request fused_request;
};
};
-/* enum dmub_ips_mode - IPS mode identifier */
-enum dmub_ips_mode {
- DMUB_IPS_MODE_IPS1_MAX = 0,
- DMUB_IPS_MODE_IPS2,
- DMUB_IPS_MODE_IPS1_RCG,
- DMUB_IPS_MODE_IPS1_ONO2_ON
-};
-
/**
* DMUB firmware version helper macro - useful for checking if the version
* of a firmware to know if feature or functionality is supported or present.
@@ -699,19 +748,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
- * dmub_srv_sync_inbox1() - sync sw state with hw state
- * @dmub: the dmub service
- *
- * Sync sw state with hw state when resume from S0i3
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * dmub_srv_fb_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
*
@@ -723,11 +760,11 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
* DMUB_STATUS_QUEUE_FULL - no remaining room in queue
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd);
/**
- * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * dmub_srv_fb_cmd_execute() - Executes a queued sequence to the dmub
* @dmub: the dmub service
*
* Begins execution of queued commands on the dmub.
@@ -736,7 +773,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub);
/**
* dmub_srv_wait_for_hw_pwr_up() - Waits for firmware hardware power up is completed
@@ -795,6 +832,23 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
uint32_t timeout_us);
/**
+ * dmub_srv_wait_for_pending() - Re-entrant wait for messages currently pending
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the commands queued prior to this call are complete.
+ * If interfaces remain busy due to additional work being submitted
+ * concurrently, this function will not continue to wait.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
* dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
* @dmub: the dmub service
* @timeout_us: the maximum number of microseconds to wait
@@ -892,15 +946,12 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
union dmub_fw_boot_options *option);
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd);
-
enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
bool skip);
bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry);
-bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub);
bool dmub_srv_should_detect(struct dmub_srv *dmub);
@@ -959,26 +1010,6 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
/**
- * dmub_srv_send_reg_inbox0_cmd() - send a dmub command and wait for the command
- * being processed by DMUB.
- * @dmub: The dmub service
- * @cmd: The dmub command being sent. If with_replay is true, the function will
- * update cmd with replied data.
- * @with_reply: true if DMUB reply needs to be copied back to cmd. false if the
- * cmd doesn't need to be replied.
- * @timeout_us: timeout in microseconds.
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_TIMEOUT - DMUB fails to process the command within the timeout
- * interval.
- */
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us);
-
-/**
* dmub_srv_set_power_state() - Track DC power state in dmub_srv
* @dmub: The dmub service
* @power_state: DC power state setting
@@ -990,4 +1021,81 @@ enum dmub_status dmub_srv_send_reg_inbox0_cmd(
*/
void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state);
+/**
+ * dmub_srv_reg_cmd_execute() - Executes provided command to the dmub
+ * @dmub: the dmub service
+ * @cmd: the command packet to be executed
+ *
+ * Executes a single command for the dmub.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd);
+
+
+/**
+ * dmub_srv_cmd_get_response() - Copies return data for command into buffer
+ * @dmub: the dmub service
+ * @cmd_rsp: response buffer
+ *
+ * Copies return data for command into buffer
+ */
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp);
+
+/**
+ * dmub_srv_sync_inboxes() - Sync inbox state
+ * @dmub: the dmub service
+ *
+ * Sync inbox state
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_inbox_free() - Waits for space in the DMUB inbox to free up
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ * @num_free_required: number of free entries required
+ *
+ * Waits until the DMUB buffer is freed to the specified number.
+ * The maximum wait time is given in microseconds to prevent spinning
+ * forever.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required);
+
+/**
+ * dmub_srv_update_inbox_status() - Updates pending status for inbox & reg inbox0
+ * @dmub: the dmub service
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_HW_FAILURE - issue with HW programming
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_get_preos_info() - retrieves preos fw info
+ * @dmub: the dmub service
+ *
+ * Return:
+ * true - preos fw info retrieved successfully
+ * false - preos fw info not retrieved successfully
+ */
+bool dmub_srv_get_preos_info(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 59990929e44e..3f2a0ed02c59 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,6 +36,9 @@
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
+#ifdef __forceinline
+#undef __forceinline
+#endif
#define __forceinline inline
/**
@@ -101,6 +104,14 @@
*/
#define DMUB_MAX_FPO_STREAMS 4
+/* Define to ensure that the "common" members always appear in the same
+ * order in different structs for back compat purposes
+ */
+#define COMMON_STREAM_STATIC_SUB_STATE \
+ struct dmub_fams2_cmd_legacy_stream_static_state legacy; \
+ struct dmub_fams2_cmd_subvp_stream_static_state subvp; \
+ struct dmub_fams2_cmd_drr_stream_static_state drr;
+
/* Maximum number of streams on any ASIC. */
#define DMUB_MAX_STREAMS 6
@@ -161,6 +172,13 @@
#endif
/**
+ * OS/FW agnostic memcmp
+ */
+#ifndef dmub_memcmp
+#define dmub_memcmp(lhs, rhs, bytes) memcmp((lhs), (rhs), (bytes))
+#endif
+
+/**
* OS/FW agnostic udelay
*/
#ifndef dmub_udelay
@@ -281,6 +299,31 @@ union dmub_addr {
} u; /*<< Low/high bit access */
uint64_t quad_part; /*<< 64 bit address */
};
+
+/* Flattened structure containing SOC BB parameters stored in the VBIOS
+ * It is not practical to store the entire bounding box in VBIOS since the bounding box struct can gain new parameters.
+ * This also prevents alighment issues when new parameters are added to the SoC BB.
+ * The following parameters should be added since these values can't be obtained elsewhere:
+ * -dml2_soc_power_management_parameters
+ * -dml2_soc_vmin_clock_limits
+ */
+struct dmub_soc_bb_params {
+ uint32_t dram_clk_change_blackout_ns;
+ uint32_t dram_clk_change_read_only_ns;
+ uint32_t dram_clk_change_write_only_ns;
+ uint32_t fclk_change_blackout_ns;
+ uint32_t g7_ppt_blackout_ns;
+ uint32_t stutter_enter_plus_exit_latency_ns;
+ uint32_t stutter_exit_latency_ns;
+ uint32_t z8_stutter_enter_plus_exit_latency_ns;
+ uint32_t z8_stutter_exit_latency_ns;
+ uint32_t z8_min_idle_time_ns;
+ uint32_t type_b_dram_clk_change_blackout_ns;
+ uint32_t type_b_ppt_blackout_ns;
+ uint32_t vmin_limit_dispclk_khz;
+ uint32_t vmin_limit_dcfclk_khz;
+ uint32_t g7_temperature_read_blackout_ns;
+};
#pragma pack(pop)
/**
@@ -431,7 +474,80 @@ union replay_debug_flags {
*/
uint32_t enable_ips_residency_profiling : 1;
- uint32_t reserved : 20;
+ /**
+ * 0x1000 (bit 12)
+ * @enable_coasting_vtotal_check: Enable Coasting_vtotal_check
+ */
+ uint32_t enable_coasting_vtotal_check : 1;
+ /**
+ * 0x2000 (bit 13)
+ * @enable_visual_confirm_debug: Enable Visual Confirm Debug
+ */
+ uint32_t enable_visual_confirm_debug : 1;
+
+ /**
+ * 0x4000 (bit 14)
+ * @debug_log_enabled: Debug Log Enabled
+ */
+ uint32_t debug_log_enabled : 1;
+
+ /**
+ * 0x8000 (bit 15)
+ * @enable_sub_feature_visual_confirm: Enable Sub Feature Visual Confirm
+ */
+ uint32_t enable_sub_feature_visual_confirm : 1;
+
+ uint32_t reserved : 16;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+/**
+ * Flags record error state.
+ */
+union replay_visual_confirm_error_state_flags {
+ struct {
+ /**
+ * 0x1 (bit 0) - Desync Error flag.
+ */
+ uint32_t desync_error : 1;
+
+ /**
+ * 0x2 (bit 1) - State Transition Error flag.
+ */
+ uint32_t state_transition_error : 1;
+
+ /**
+ * 0x4 (bit 2) - Crc Error flag
+ */
+ uint32_t crc_error : 1;
+
+ /**
+ * 0x8 (bit 3) - Reserved
+ */
+ uint32_t reserved_3 : 1;
+
+ /**
+ * 0x10 (bit 4) - Incorrect Coasting vtotal checking --> use debug flag to control DPCD write.
+ * Added new debug flag to control DPCD.
+ */
+ uint32_t incorrect_vtotal_in_static_screen : 1;
+
+ /**
+ * 0x20 (bit 5) - No doubled Refresh Rate.
+ */
+ uint32_t no_double_rr : 1;
+
+ /**
+ * Reserved bit 6-7
+ */
+ uint32_t reserved_6_7 : 2;
+
+ /**
+ * Reserved bit 9-31
+ */
+ uint32_t reserved_9_31 : 24;
} bitfields;
uint32_t u32All;
@@ -479,6 +595,109 @@ union replay_hw_flags {
* @is_alpm_initialized: Indicates whether ALPM is initialized
*/
uint32_t is_alpm_initialized : 1;
+
+ /**
+ * @alpm_mode: Indicates ALPM mode selected
+ */
+ uint32_t alpm_mode : 2;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+/**
+ * Flags that can be set by driver to change some Panel Replay behaviour.
+ */
+union pr_debug_flags {
+ struct {
+ /**
+ * 0x1 (bit 0)
+ * Enable visual confirm in FW.
+ */
+ uint32_t visual_confirm : 1;
+
+ /**
+ * 0x2 (bit 1)
+ * @skip_crc: Set if need to skip CRC.
+ */
+ uint32_t skip_crc : 1;
+
+ /**
+ * 0x4 (bit 2)
+ * @force_link_power_on: Force disable ALPM control
+ */
+ uint32_t force_link_power_on : 1;
+
+ /**
+ * 0x8 (bit 3)
+ * @force_phy_power_on: Force phy power on
+ */
+ uint32_t force_phy_power_on : 1;
+
+ /**
+ * 0x10 (bit 4)
+ * @skip_crtc_disabled: CRTC disable skipped
+ */
+ uint32_t skip_crtc_disabled : 1;
+
+ /*
+ * 0x20 (bit 5)
+ * @visual_confirm_rate_control: Enable Visual Confirm rate control detection
+ */
+ uint32_t visual_confirm_rate_control : 1;
+
+ uint32_t reserved : 26;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+union pr_hw_flags {
+ struct {
+ /**
+ * @allow_alpm_fw_standby_mode: To indicate whether the
+ * ALPM FW standby mode is allowed
+ */
+ uint32_t allow_alpm_fw_standby_mode : 1;
+
+ /*
+ * @dsc_enable_status: DSC enable status in driver
+ */
+ uint32_t dsc_enable_status : 1;
+
+ /**
+ * @fec_enable_status: receive fec enable/disable status from driver
+ */
+ uint32_t fec_enable_status : 1;
+
+ /*
+ * @smu_optimizations_en: SMU power optimization.
+ * Only when active display is Replay capable and display enters Replay.
+ * Trigger interrupt to SMU to powerup/down.
+ */
+ uint32_t smu_optimizations_en : 1;
+
+ /**
+ * @phy_power_state: Indicates current phy power state
+ */
+ uint32_t phy_power_state : 1;
+
+ /**
+ * @link_power_state: Indicates current link power state
+ */
+ uint32_t link_power_state : 1;
+ /**
+ * Use TPS3 signal when restore main link.
+ */
+ uint32_t force_wakeup_by_tps3 : 1;
+ /**
+ * @is_alpm_initialized: Indicates whether ALPM is initialized
+ */
+ uint32_t is_alpm_initialized : 1;
+ /**
+ * @alpm_mode: Indicates ALPM mode selected
+ */
+ uint32_t alpm_mode : 2;
} bitfields;
uint32_t u32All;
@@ -508,6 +727,7 @@ struct dmub_feature_caps {
uint8_t replay_supported;
uint8_t replay_reserved[3];
uint8_t abm_aux_backlight_support;
+ uint8_t lsdma_support_in_dmu;
};
struct dmub_visual_confirm_color {
@@ -520,6 +740,112 @@ struct dmub_visual_confirm_color {
uint16_t panel_inst;
};
+/**
+ * struct dmub_cursor_offload_pipe_data_dcn30_v1 - DCN30+ per pipe data.
+ */
+struct dmub_cursor_offload_pipe_data_dcn30_v1 {
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16;
+ uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5;
+ uint32_t reserved0[4];
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1;
+ uint32_t CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24;
+ uint32_t CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24;
+ uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS : 16;
+ uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE, : 16;
+ uint32_t reserved1[5];
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8;
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8;
+ uint32_t reserved2[3];
+};
+
+/**
+ * struct dmub_cursor_offload_pipe_data_dcn401_v1 - DCN401 per pipe data.
+ */
+struct dmub_cursor_offload_pipe_data_dcn401_v1 {
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16;
+ uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5;
+ uint32_t reserved0[4];
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1;
+ uint32_t CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24;
+ uint32_t CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y, : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB : 16;
+ uint32_t reserved1[4];
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8;
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8;
+ uint32_t HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR : 1;
+ uint32_t reserved2[3];
+};
+
+/**
+ * struct dmub_cursor_offload_pipe_data_v1 - Per pipe data for cursor offload.
+ */
+struct dmub_cursor_offload_pipe_data_v1 {
+ union {
+ struct dmub_cursor_offload_pipe_data_dcn30_v1 dcn30; /**< DCN30 cursor data. */
+ struct dmub_cursor_offload_pipe_data_dcn401_v1 dcn401; /**< DCN401 cursor data. */
+ uint8_t payload[96]; /**< Guarantees the cursor pipe data size per-pipe. */
+ };
+};
+
+/**
+ * struct dmub_cursor_offload_payload_data_v1 - A payload of stream data.
+ */
+struct dmub_cursor_offload_payload_data_v1 {
+ uint32_t write_idx_start; /**< Write index, updated before pipe_data is written. */
+ uint32_t write_idx_finish; /**< Write index, updated after pipe_data is written. */
+ uint32_t pipe_mask; /**< Mask of pipes to update. */
+ uint32_t reserved; /**< Reserved for future use. */
+ struct dmub_cursor_offload_pipe_data_v1 pipe_data[6]; /**< Per-pipe cursor data. */
+};
+
+/**
+ * struct dmub_cursor_offload_stream_v1 - Per-stream data for cursor offload.
+ */
+struct dmub_cursor_offload_stream_v1 {
+ struct dmub_cursor_offload_payload_data_v1 payloads[4]; /**< A small buffer of cursor payloads. */
+ uint32_t write_idx; /**< The index of the last written payload. */
+};
+
+/**
+ * struct dmub_cursor_offload_v1 - Cursor offload feature state.
+ */
+struct dmub_cursor_offload_v1 {
+ struct dmub_cursor_offload_stream_v1 offload_streams[6]; /**< Per-stream cursor offload data */
+};
+
//==============================================================================
//</DMUB_TYPES>=================================================================
//==============================================================================
@@ -539,7 +865,8 @@ struct dmub_visual_confirm_color {
union dmub_fw_meta_feature_bits {
struct {
uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */
- uint32_t reserved : 31;
+ uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */
+ uint32_t reserved : 30;
} bits; /**< status bits */
uint32_t all; /**< 32-bit access to status bits */
};
@@ -671,6 +998,21 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
+ DMUB_IPS_DISABLE_Z8_RETENTION = 7,
+};
+
+enum dmub_ips_rcg_disable_type {
+ DMUB_IPS_RCG_ENABLE = 0,
+ DMUB_IPS0_RCG_DISABLE = 1,
+ DMUB_IPS1_RCG_DISABLE = 2,
+ DMUB_IPS_RCG_DISABLE = 3
+};
+
+enum dmub_ips_in_vpb_disable_type {
+ DMUB_IPS_VPB_RCG_ONLY = 0, // Legacy behaviour
+ DMUB_IPS_VPB_DISABLE_ALL = 1,
+ DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG = 2,
+ DMUB_IPS_VPB_ENABLE_ALL = 3 // Enable IPS1 Z8, IPS1 and RCG
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -678,6 +1020,39 @@ enum dmub_ips_disable_type {
#define DMUB_IPS1_COMMIT_MASK 0x00000004
#define DMUB_IPS2_COMMIT_MASK 0x00000008
+enum dmub_ips_comand_type {
+ /**
+ * Start/stop IPS residency measurements for a given IPS mode
+ */
+ DMUB_CMD__IPS_RESIDENCY_CNTL = 0,
+ /**
+ * Query IPS residency information for a given IPS mode
+ */
+ DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1,
+};
+
+/**
+ * enum dmub_cursor_offload_comand_type - Cursor offload subcommands.
+ */
+enum dmub_cursor_offload_comand_type {
+ /**
+ * Initializes the cursor offload feature.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_INIT = 0,
+ /**
+ * Enables cursor offloading for a stream and updates the timing parameters.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE = 1,
+ /**
+ * Disables cursor offloading for a given stream.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE = 2,
+ /**
+ * Programs the latest data for a given stream.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM = 3,
+};
+
/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
@@ -708,7 +1083,9 @@ union dmub_fw_boot_options {
uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */
uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */
- uint32_t reserved : 6; /**< reserved */
+ uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */
+ uint32_t disable_dpia_bw_allocation: 1; /**< 1 to disable the USB4 DPIA BW allocation */
+ uint32_t reserved : 4; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -741,6 +1118,7 @@ enum dmub_shared_state_feature_id {
DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3,
+ DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1 = 4,
DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
};
@@ -749,11 +1127,12 @@ enum dmub_shared_state_feature_id {
*/
union dmub_shared_state_ips_fw_signals {
struct {
- uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
+ uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
uint32_t detection_required : 1; /**< 1 if detection is required */
- uint32_t reserved_bits : 28; /**< Reversed */
+ uint32_t ips1z8_commit: 1; /**< 1 if in IPS1 Z8 Retention */
+ uint32_t reserved_bits : 27; /**< Reversed */
} bits;
uint32_t all;
};
@@ -768,7 +1147,12 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */
- uint32_t reserved_bits : 27; /**< Reversed bits */
+ uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */
+ uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */
+ uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */
+ uint32_t allow_dynamic_ips1 : 1; /**< 1 if IPS1 is allowed in dynamic use cases such as VPB */
+ uint32_t allow_dynamic_ips1_z8: 1; /**< 1 if IPS1 z8 ret is allowed in dynamic use cases such as VPB */
+ uint32_t reserved_bits : 22; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -797,7 +1181,9 @@ struct dmub_shared_state_ips_fw {
uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
- uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
+ uint32_t ips1_z8ret_entry_count; /**< Entry counter for IPS1 Z8 Retention */
+ uint32_t ips1_z8ret_exit_count; /**< Exit counter for IPS1 Z8 Retention */
+ uint32_t reserved[53]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
@@ -814,6 +1200,22 @@ struct dmub_shared_state_ips_driver {
}; /* 248-bytes, fixed */
/**
+ * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload.
+ */
+struct dmub_shared_state_cursor_offload_stream_v1 {
+ uint32_t last_write_idx; /**< Last write index */
+ uint8_t reserved[28]; /**< Reserved bytes. */
+}; /* 32-bytes, fixed */
+
+/**
+ * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload.
+ */
+struct dmub_shared_state_cursor_offload_v1 {
+ struct dmub_shared_state_cursor_offload_stream_v1 offload_streams[6]; /**< stream state, 32-bytes each */
+ uint8_t reserved[56]; /**< reserved for future use */
+}; /* 248-bytes, fixed */
+
+/**
* enum dmub_shared_state_feature_common - Generic payload.
*/
struct dmub_shared_state_feature_common {
@@ -839,6 +1241,7 @@ struct dmub_shared_state_feature_block {
struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */
+ struct dmub_shared_state_cursor_offload_v1 cursor_offload_v1; /**< Cursor offload */
} data; /**< Shared state data. */
}; /* 256-bytes, fixed */
@@ -1185,6 +1588,10 @@ enum dmub_gpint_command {
* DESC: Setup debug configs.
*/
DMUB_GPINT__SETUP_DEBUG_MODE = 136,
+ /**
+ * DESC: Initiates IPS wake sequence.
+ */
+ DMUB_GPINT__IPS_DEBUG_WAKE = 137,
};
/**
@@ -1264,6 +1671,16 @@ enum dmub_inbox0_command {
#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
/**
+ * Maximum number of items in the DMUB REG INBOX0 internal ringbuffer.
+ */
+#define DMUB_REG_INBOX0_RB_MAX_ENTRY 16
+
+/**
+ * Ringbuffer size in bytes.
+ */
+#define DMUB_REG_INBOX0_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_REG_INBOX0_RB_MAX_ENTRY)
+
+/**
* REG_SET mask for reg offload.
*/
#define REG_SET_MASK 0xFFFF
@@ -1399,6 +1816,40 @@ enum dmub_cmd_type {
*/
DMUB_CMD__PSP = 88,
+ /**
+ * Command type used for all Fused IO commands.
+ */
+ DMUB_CMD__FUSED_IO = 89,
+
+ /**
+ * Command type used for all LSDMA commands.
+ */
+ DMUB_CMD__LSDMA = 90,
+
+ /**
+ * Command type use for all IPS commands.
+ */
+ DMUB_CMD__IPS = 91,
+
+ /**
+ * Command type use for Cursor offload.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD = 92,
+
+ /**
+ * Command type used for all SMART_POWER_OLED commands.
+ */
+ DMUB_CMD__SMART_POWER_OLED = 93,
+
+ /**
+ * Command type use for all Panel Replay commands.
+ */
+ DMUB_CMD__PR = 94,
+
+
+ /**
+ * Command type use for VBIOS shared commands.
+ */
DMUB_CMD__VBIOS = 128,
};
@@ -1430,6 +1881,10 @@ enum dmub_out_cmd_type {
* Command type used for HPD redetect notification
*/
DMUB_OUT_CMD__HPD_SENSE_NOTIFY = 6,
+ /**
+ * Command type used for Fused IO notification
+ */
+ DMUB_OUT_CMD__FUSED_IO = 7,
};
/* DMUB_CMD__DPIA command sub-types. */
@@ -1456,7 +1911,8 @@ struct dmub_cmd_header {
unsigned int sub_type : 8; /**< command sub type */
unsigned int ret_status : 1; /**< 1 if returned data, 0 otherwise */
unsigned int multi_cmd_pending : 1; /**< 1 if multiple commands chained together */
- unsigned int reserved0 : 6; /**< reserved bits */
+ unsigned int is_reg_based : 1; /**< 1 if register based mailbox cmd, 0 if FB based cmd */
+ unsigned int reserved0 : 5; /**< reserved bits */
unsigned int payload_bytes : 6; /* payload excluding header - up to 60 bytes */
unsigned int reserved1 : 2; /**< reserved bits */
};
@@ -1804,6 +2260,154 @@ struct dmub_rb_cmd_fams2_flip {
struct dmub_fams2_flip_info flip_info;
};
+struct dmub_cmd_lsdma_data {
+ union {
+ struct lsdma_init_data {
+ union dmub_addr gpu_addr_base;
+ uint32_t ring_size;
+ } init_data;
+ struct lsdma_tiled_copy_data {
+ uint32_t src_addr_lo;
+ uint32_t src_addr_hi;
+
+ uint32_t dst_addr_lo;
+ uint32_t dst_addr_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t src_height : 16;
+
+ uint32_t dst_width : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_swizzle_mode : 5;
+ uint32_t src_mip_max : 5;
+ uint32_t src_mip_id : 5;
+ uint32_t dst_mip_max : 5;
+ uint32_t dst_swizzle_mode : 5;
+ uint32_t dst_mip_id : 5;
+ uint32_t tmz : 1;
+ uint32_t dcc : 1;
+
+ uint32_t data_format : 6;
+ uint32_t padding1 : 4;
+ uint32_t dst_element_size : 3;
+ uint32_t num_type : 3;
+ uint32_t src_element_size : 3;
+ uint32_t write_compress : 2;
+ uint32_t cache_policy_dst : 2;
+ uint32_t cache_policy_src : 2;
+ uint32_t read_compress : 2;
+ uint32_t src_dim : 2;
+ uint32_t dst_dim : 2;
+ uint32_t max_uncom : 1;
+
+ uint32_t max_com : 2;
+ uint32_t padding : 30;
+ } tiled_copy_data;
+ struct lsdma_linear_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ uint32_t count : 30;
+ uint32_t cache_policy_dst : 2;
+
+ uint32_t tmz : 1;
+ uint32_t cache_policy_src : 2;
+ uint32_t padding : 29;
+ } linear_copy_data;
+ struct lsdma_linear_sub_window_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t reserved0 : 22;
+ } linear_sub_window_copy_data;
+ struct lsdma_reg_write_data {
+ uint32_t reg_addr;
+ uint32_t reg_data;
+ } reg_write_data;
+ struct lsdma_pio_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ union {
+ struct {
+ uint32_t byte_count : 26;
+ uint32_t src_loc : 1;
+ uint32_t dst_loc : 1;
+ uint32_t src_addr_inc : 1;
+ uint32_t dst_addr_inc : 1;
+ uint32_t overlap_disable : 1;
+ uint32_t constant_fill : 1;
+ } fields;
+ uint32_t raw;
+ } packet;
+ } pio_copy_data;
+ struct lsdma_pio_constfill_data {
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ union {
+ struct {
+ uint32_t byte_count : 26;
+ uint32_t src_loc : 1;
+ uint32_t dst_loc : 1;
+ uint32_t src_addr_inc : 1;
+ uint32_t dst_addr_inc : 1;
+ uint32_t overlap_disable : 1;
+ uint32_t constant_fill : 1;
+ } fields;
+ uint32_t raw;
+ } packet;
+
+ uint32_t data;
+ } pio_constfill_data;
+
+ uint32_t all[14];
+ } u;
+};
+
+struct dmub_rb_cmd_lsdma {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_lsdma_data lsdma_data;
+};
+
struct dmub_optc_state_v2 {
uint32_t v_total_min;
uint32_t v_total_max;
@@ -1835,6 +2439,28 @@ enum fams2_stream_type {
FAMS2_STREAM_TYPE_SUBVP = 4,
};
+struct dmub_rect16 {
+ /**
+ * Dirty rect x offset.
+ */
+ uint16_t x;
+
+ /**
+ * Dirty rect y offset.
+ */
+ uint16_t y;
+
+ /**
+ * Dirty rect width.
+ */
+ uint16_t width;
+
+ /**
+ * Dirty rect height.
+ */
+ uint16_t height;
+};
+
/* static stream state */
struct dmub_fams2_legacy_stream_static_state {
uint8_t vactive_det_fill_delay_otg_vlines;
@@ -1907,11 +2533,13 @@ union dmub_fams2_stream_static_sub_state {
}; //v0
union dmub_fams2_cmd_stream_static_sub_state {
- struct dmub_fams2_cmd_legacy_stream_static_state legacy;
- struct dmub_fams2_cmd_subvp_stream_static_state subvp;
- struct dmub_fams2_cmd_drr_stream_static_state drr;
+ COMMON_STREAM_STATIC_SUB_STATE
}; //v1
+union dmub_fams2_stream_static_sub_state_v2 {
+ COMMON_STREAM_STATIC_SUB_STATE
+}; //v2
+
struct dmub_fams2_stream_static_state {
enum fams2_stream_type type;
uint32_t otg_vline_time_ns;
@@ -1977,7 +2605,7 @@ struct dmub_fams2_cmd_stream_static_base_state {
struct dmub_fams2_stream_static_state_v1 {
struct dmub_fams2_cmd_stream_static_base_state base;
- union dmub_fams2_cmd_stream_static_sub_state sub_state;
+ union dmub_fams2_stream_static_sub_state_v2 sub_state;
}; //v1
/**
@@ -2014,10 +2642,12 @@ struct dmub_cmd_fams2_global_config {
union dmub_fams2_global_feature_config features;
uint32_t recovery_timeout_us;
uint32_t hwfq_flip_programming_delay_us;
+ uint32_t max_allow_to_target_delta_us; // how early DCN could assert P-State allow compared to the P-State target
};
union dmub_cmd_fams2_config {
struct dmub_cmd_fams2_global_config global;
+// coverity[cert_dcl37_c_violation:FALSE] errno.h, stddef.h, stdint.h not included in atombios.h
struct dmub_fams2_stream_static_state stream; //v0
union {
struct dmub_fams2_cmd_stream_static_base_state base;
@@ -2025,6 +2655,11 @@ union dmub_cmd_fams2_config {
} stream_v1; //v1
};
+struct dmub_fams2_config_v2 {
+ struct dmub_cmd_fams2_global_config global;
+ struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1
+};
+
/**
* DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy)
*/
@@ -2034,6 +2669,22 @@ struct dmub_rb_cmd_fams2 {
};
/**
+ * Indirect buffer descriptor
+ */
+struct dmub_ib_data {
+ union dmub_addr src; // location of indirect buffer in memory
+ uint16_t size; // indirect buffer size in bytes
+};
+
+/**
+ * DMUB rb command definition for commands passed over indirect buffer
+ */
+struct dmub_rb_cmd_ib {
+ struct dmub_cmd_header header;
+ struct dmub_ib_data ib_data;
+};
+
+/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
enum dmub_cmd_idle_opt_type {
@@ -2056,6 +2707,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware notify power state.
*/
DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
+
+ /**
+ * DCN notify to release HW.
+ */
+ DMUB_CMD__IDLE_OPT_RELEASE_HW = 4,
};
/**
@@ -2201,7 +2857,8 @@ struct dmub_dig_transmitter_control_data_v1_7 {
uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */
uint8_t reserved1; /**< For future use */
- uint8_t reserved2[3]; /**< For future use */
+ uint8_t skip_phy_ssc_reduction;
+ uint8_t reserved2[2]; /**< For future use */
uint32_t reserved3[11]; /**< For future use */
};
@@ -2548,7 +3205,11 @@ enum dp_hpd_type {
/**
* DP HPD short pulse
*/
- DP_IRQ
+ DP_IRQ = 1,
+ /**
+ * Failure to acquire DP HPD state
+ */
+ DP_NONE_HPD = 2
};
/**
@@ -2815,6 +3476,7 @@ enum dmub_cmd_fams_type {
DMUB_CMD__FAMS2_CONFIG = 4,
DMUB_CMD__FAMS2_DRR_UPDATE = 5,
DMUB_CMD__FAMS2_FLIP = 6,
+ DMUB_CMD__FAMS2_IB_CONFIG = 7,
};
/**
@@ -3057,6 +3719,12 @@ struct dmub_cmd_psr_copy_settings_data {
* Some panels request main link off before xth vertical line
*/
uint16_t poweroff_before_vertical_line;
+ /**
+ * Some panels cannot handle idle pattern during PSR entry.
+ * To power down phy before disable stream to avoid sending
+ * idle pattern.
+ */
+ uint8_t power_down_phy_before_disable_stream;
};
/**
@@ -3515,6 +4183,12 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
+enum dmub_alpm_mode {
+ ALPM_AUXWAKE = 0,
+ ALPM_AUXLESS = 1,
+ ALPM_UNSUPPORTED = 2,
+};
+
/**
* Definition of Replay Residency GPINT command.
* Bit[0] - Residency mode for Revision 0
@@ -3586,6 +4260,33 @@ enum replay_state {
};
/**
+ * Definition of a panel replay state
+ */
+enum pr_state {
+ PR_STATE_0 = 0x00, // State 0 steady state
+ // Pending SDP and Unlock before back to State 0
+ PR_STATE_0_PENDING_SDP_AND_UNLOCK = 0x01,
+ PR_STATE_1 = 0x10, // State 1
+ PR_STATE_2 = 0x20, // State 2 steady state
+ // Pending frame transmission before transition to State 2
+ PR_STATE_2_PENDING_FRAME_TRANSMISSION = 0x30,
+ // Active and Powered Up
+ PR_STATE_2_POWERED = 0x31,
+ // Active and Powered Down, but need to blank HUBP after DPG_EN latch
+ PR_STATE_2_PENDING_HUBP_BLANK = 0x32,
+ // Active and Pending Power Up
+ PR_STATE_2_PENDING_POWER_UP = 0x33,
+ // Active and Powered Up, Pending DPG latch
+ PR_STATE_2_PENDING_LOCK_FOR_DPG_POWER_ON = 0x34,
+ // Active and Powered Up, Pending SDP and Unlock
+ PR_STATE_2_PENDING_SDP_AND_UNLOCK = 0x35,
+ // Pending transmission of AS SDP for timing sync, but no rfb update
+ PR_STATE_2_PENDING_AS_SDP = 0x36,
+ // Invalid
+ PR_STATE_INVALID = 0xFF,
+};
+
+/**
* Replay command sub-types.
*/
enum dmub_cmd_replay_type {
@@ -3626,11 +4327,34 @@ enum dmub_cmd_replay_type {
*/
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
/**
+ * Set version
+ */
+ DMUB_CMD__REPLAY_SET_VERSION = 9,
+ /**
* Set Replay General command.
*/
DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16,
};
+/*
+ * Panel Replay sub-types
+ */
+enum dmub_cmd_panel_replay_type {
+ DMUB_CMD__PR_ENABLE = 0,
+ DMUB_CMD__PR_COPY_SETTINGS = 1,
+ DMUB_CMD__PR_UPDATE_STATE = 2,
+ DMUB_CMD__PR_GENERAL_CMD = 3,
+};
+
+enum dmub_cmd_panel_replay_state_update_subtype {
+ PR_STATE_UPDATE_COASTING_VTOTAL = 0x1,
+ PR_STATE_UPDATE_SYNC_MODE = 0x2,
+};
+
+enum dmub_cmd_panel_replay_general_subtype {
+ PR_GENERAL_CMD_DEBUG_OPTION = 0x1,
+};
+
/**
* Replay general command sub-types.
*/
@@ -3644,6 +4368,22 @@ enum dmub_cmd_replay_general_subtype {
*/
REPLAY_GENERAL_CMD_DISABLED_ADAPTIVE_SYNC_SDP,
REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION,
+ REPLAY_GENERAL_CMD_UPDATE_ERROR_STATUS,
+ REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
+ REPLAY_GENERAL_CMD_VIDEO_CONFERENCING,
+};
+
+struct dmub_alpm_auxless_data {
+ uint16_t lfps_setup_ns;
+ uint16_t lfps_period_ns;
+ uint16_t lfps_silence_ns;
+ uint16_t lfps_t1_t2_override_us;
+ short lfps_t1_t2_offset_us;
+ uint8_t lttpr_count;
+ /*
+ * Padding to align structure to 4 byte boundary.
+ */
+ uint8_t pad[1];
};
/**
@@ -3716,6 +4456,79 @@ struct dmub_cmd_replay_copy_settings_data {
* Use FSM state for Replay power up/down
*/
uint8_t use_phy_fsm;
+ /**
+ * Use for AUX-less ALPM LFPS wake operation
+ */
+ struct dmub_alpm_auxless_data auxless_alpm_data;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * Determines if fast resync in ultra sleep mode is enabled/disabled.
+ */
+ uint8_t replay_support_fast_resync_in_ultra_sleep_mode;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[1];
+};
+
+
+/**
+ * Replay versions.
+ */
+enum replay_version {
+ /**
+ * FreeSync Replay
+ */
+ REPLAY_VERSION_FREESYNC_REPLAY = 0,
+ /**
+ * Panel Replay
+ */
+ REPLAY_VERSION_PANEL_REPLAY = 1,
+ /**
+ * Replay not supported.
+ */
+ REPLAY_VERSION_UNSUPPORTED = 0xFF,
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD___SET_REPLAY_VERSION command.
+ */
+struct dmub_cmd_replay_set_version_data {
+ /**
+ * Panel Instance.
+ * Panel instance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Replay version that FW should implement.
+ */
+ enum replay_version version;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[3];
+};
+
+/**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+struct dmub_rb_cmd_replay_set_version {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_cmd_replay_set_version_data replay_set_version_data;
};
/**
@@ -3747,6 +4560,45 @@ enum replay_enable {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_ENABLE command.
+ */
+struct dmub_rb_cmd_smart_power_oled_enable_data {
+ /**
+ * SMART_POWER_OLED enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+
+ uint16_t peak_nits;
+ /**
+ * OTG HW instance.
+ */
+ uint8_t otg_inst;
+ /**
+ * DIG FE HW instance.
+ */
+ uint8_t digfe_inst;
+ /**
+ * DIG BE HW instance.
+ */
+ uint8_t digbe_inst;
+ uint8_t debugcontrol;
+ /*
+ * vertical interrupt trigger line
+ */
+ uint32_t triggerline;
+
+ uint16_t fixed_max_cll;
+
+ uint8_t pad[2];
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command.
*/
struct dmub_rb_cmd_replay_enable_data {
@@ -3917,9 +4769,9 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
*/
uint16_t coasting_vtotal_high;
/**
- * Explicit padding to 4 byte boundary.
+ * frame skip number.
*/
- uint8_t pad[2];
+ uint16_t frame_skip_number;
};
/**
@@ -4070,12 +4922,68 @@ union dmub_replay_cmd_set {
*/
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
/**
+ * Definition of DMUB_CMD__REPLAY_SET_VERSION command data.
+ */
+ struct dmub_cmd_replay_set_version_data version_data;
+ /**
* Definition of DMUB_CMD__REPLAY_SET_GENERAL_CMD command data.
*/
struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data;
};
/**
+ * SMART POWER OLED command sub-types.
+ */
+enum dmub_cmd_smart_power_oled_type {
+
+ /**
+ * Enable/Disable SMART_POWER_OLED.
+ */
+ DMUB_CMD__SMART_POWER_OLED_ENABLE = 1,
+ /**
+ * Get current MaxCLL value if SMART POWER OLED is enabled.
+ */
+ DMUB_CMD__SMART_POWER_OLED_GETMAXCLL = 2,
+};
+
+/**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED command.
+ */
+struct dmub_rb_cmd_smart_power_oled_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ struct dmub_rb_cmd_smart_power_oled_enable_data data;
+};
+
+struct dmub_cmd_smart_power_oled_getmaxcll_input {
+ uint8_t panel_inst;
+ uint8_t pad[3];
+};
+
+struct dmub_cmd_smart_power_oled_getmaxcll_output {
+ uint16_t current_max_cll;
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED command.
+ */
+struct dmub_rb_cmd_smart_power_oled_getmaxcll {
+ struct dmub_cmd_header header; /**< Command header */
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command.
+ */
+ union dmub_cmd_smart_power_oled_getmaxcll_data {
+ struct dmub_cmd_smart_power_oled_getmaxcll_input input; /**< Input */
+ struct dmub_cmd_smart_power_oled_getmaxcll_output output; /**< Output */
+ uint32_t output_raw; /**< Raw data output */
+ } data;
+};
+
+/**
* Set of HW components that can be locked.
*
* Note: If updating with more HW components, fields
@@ -4157,6 +5065,7 @@ enum hw_lock_client {
*/
HW_LOCK_CLIENT_REPLAY = 4,
HW_LOCK_CLIENT_FAMS2 = 5,
+ HW_LOCK_CLIENT_CURSOR_OFFLOAD = 6,
/**
* Invalid client.
*/
@@ -4264,6 +5173,46 @@ enum dmub_cmd_abm_type {
* Get the current ACE curve.
*/
DMUB_CMD__ABM_GET_ACE_CURVE = 10,
+
+ /**
+ * Get current histogram data
+ */
+ DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11,
+};
+
+/**
+ * LSDMA command sub-types.
+ */
+enum dmub_cmd_lsdma_type {
+ /**
+ * Initialize parameters for LSDMA.
+ * Ring buffer is mapped to the ring buffer
+ */
+ DMUB_CMD__LSDMA_INIT_CONFIG = 0,
+ /**
+ * LSDMA copies data from source to destination linearly
+ */
+ DMUB_CMD__LSDMA_LINEAR_COPY = 1,
+ /**
+ * LSDMA copies data from source to destination linearly in sub window
+ */
+ DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY = 2,
+ /**
+ * Send the tiled-to-tiled copy command
+ */
+ DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 3,
+ /**
+ * Send the poll reg write command
+ */
+ DMUB_CMD__LSDMA_POLL_REG_WRITE = 4,
+ /**
+ * Send the pio copy command
+ */
+ DMUB_CMD__LSDMA_PIO_COPY = 5,
+ /**
+ * Send the pio constfill command
+ */
+ DMUB_CMD__LSDMA_PIO_CONSTFILL = 6,
};
struct abm_ace_curve {
@@ -4858,6 +5807,20 @@ enum dmub_abm_ace_curve_type {
};
/**
+ * enum dmub_abm_histogram_type - Histogram type.
+ */
+enum dmub_abm_histogram_type {
+ /**
+ * ACE curve as defined by the SW layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW = 0,
+ /**
+ * ACE curve as defined by the SW to HW translation interface layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW_IF = 1,
+};
+
+/**
* Definition of a DMUB_CMD__ABM_GET_ACE_CURVE command.
*/
struct dmub_rb_cmd_abm_get_ace_curve {
@@ -4893,6 +5856,41 @@ struct dmub_rb_cmd_abm_get_ace_curve {
};
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+struct dmub_rb_cmd_abm_get_histogram {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Address where Histogram should be copied.
+ */
+ union dmub_addr dest;
+
+ /**
+ * Type of Histogram being queried.
+ */
+ enum dmub_abm_histogram_type histogram_type;
+
+ /**
+ * Indirect buffer length.
+ */
+ uint16_t bytes;
+
+ /**
+ * eDP panel instance.
+ */
+ uint8_t panel_inst;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
*/
struct dmub_rb_cmd_abm_save_restore {
@@ -5256,6 +6254,64 @@ struct dmub_rb_cmd_get_usbc_cable_id {
} data;
};
+enum dmub_cmd_fused_io_sub_type {
+ DMUB_CMD__FUSED_IO_EXECUTE = 0,
+ DMUB_CMD__FUSED_IO_ABORT = 1,
+};
+
+enum dmub_cmd_fused_request_type {
+ FUSED_REQUEST_READ,
+ FUSED_REQUEST_WRITE,
+ FUSED_REQUEST_POLL,
+};
+
+enum dmub_cmd_fused_request_status {
+ FUSED_REQUEST_STATUS_SUCCESS,
+ FUSED_REQUEST_STATUS_BEGIN,
+ FUSED_REQUEST_STATUS_SUBMIT,
+ FUSED_REQUEST_STATUS_REPLY,
+ FUSED_REQUEST_STATUS_POLL,
+ FUSED_REQUEST_STATUS_ABORTED,
+ FUSED_REQUEST_STATUS_FAILED = 0x80,
+ FUSED_REQUEST_STATUS_INVALID,
+ FUSED_REQUEST_STATUS_BUSY,
+ FUSED_REQUEST_STATUS_TIMEOUT,
+ FUSED_REQUEST_STATUS_POLL_TIMEOUT,
+};
+
+struct dmub_cmd_fused_request {
+ uint8_t status;
+ uint8_t type : 2;
+ uint8_t _reserved0 : 3;
+ uint8_t poll_mask_msb : 3; // Number of MSB to zero out from last byte before comparing
+ uint8_t identifier;
+ uint8_t _reserved1;
+ uint32_t timeout_us;
+ union dmub_cmd_fused_request_location {
+ struct dmub_cmd_fused_request_location_i2c {
+ uint8_t is_aux : 1; // False
+ uint8_t ddc_line : 3;
+ uint8_t over_aux : 1;
+ uint8_t _reserved0 : 3;
+ uint8_t address;
+ uint8_t offset;
+ uint8_t length;
+ } i2c;
+ struct dmub_cmd_fused_request_location_aux {
+ uint32_t is_aux : 1; // True
+ uint32_t ddc_line : 3;
+ uint32_t address : 20;
+ uint32_t length : 8; // Automatically split into 16B transactions
+ } aux;
+ } u;
+ uint8_t buffer[0x30]; // Read: out, write: in, poll: expected
+};
+
+struct dmub_rb_cmd_fused_io {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_fused_request request;
+};
+
/**
* Command type of a DMUB_CMD__SECURE_DISPLAY command
*/
@@ -5364,6 +6420,315 @@ struct dmub_rb_cmd_assr_enable {
};
/**
+ * Current definition of "ips_mode" from driver
+ */
+enum ips_residency_mode {
+ IPS_RESIDENCY__IPS1_MAX,
+ IPS_RESIDENCY__IPS2,
+ IPS_RESIDENCY__IPS1_RCG,
+ IPS_RESIDENCY__IPS1_ONO2_ON,
+ IPS_RESIDENCY__IPS1_Z8_RETENTION,
+ IPS_RESIDENCY__PG_ONO_LAST_SEEN_IN_IPS,
+ IPS_RESIDENCY__PG_ONO_CURRENT_STATE
+};
+
+#define NUM_IPS_HISTOGRAM_BUCKETS 16
+
+/**
+ * IPS residency statistics to be sent to driver - subset of struct dmub_ips_residency_stats
+ */
+struct dmub_ips_residency_info {
+ uint32_t residency_millipercent;
+ uint32_t entry_counter;
+ uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS];
+ uint64_t total_time_us;
+ uint64_t total_inactive_time_us;
+ uint32_t ono_pg_state_at_collection;
+ uint32_t ono_pg_state_last_seen_in_ips;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__IPS_RESIDENCY_CNTL command.
+ */
+struct dmub_cmd_ips_residency_cntl_data {
+ uint8_t panel_inst;
+ uint8_t start_measurement;
+ uint8_t padding[2]; // align to 4-byte boundary
+};
+
+struct dmub_rb_cmd_ips_residency_cntl {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_ips_residency_cntl_data cntl_data;
+};
+
+/**
+ * Data passed from FW to driver in a DMUB_CMD__IPS_QUERY_RESIDENCY_INFO command.
+ */
+struct dmub_cmd_ips_query_residency_info_data {
+ union dmub_addr dest;
+ uint32_t size;
+ uint32_t ips_mode;
+ uint8_t panel_inst;
+ uint8_t padding[3]; // align to 4-byte boundary
+};
+
+struct dmub_rb_cmd_ips_query_residency_info {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_ips_query_residency_info_data info_data;
+};
+
+/**
+ * struct dmub_cmd_cursor_offload_init_data - Payload for cursor offload init command.
+ */
+struct dmub_cmd_cursor_offload_init_data {
+ union dmub_addr state_addr; /**< State address for dmub_cursor_offload */
+ uint32_t state_size; /**< State size for dmub_cursor_offload */
+};
+
+/**
+ * struct dmub_rb_cmd_cursor_offload_init - Data for initializing cursor offload.
+ */
+struct dmub_rb_cmd_cursor_offload_init {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_cursor_offload_init_data init_data;
+};
+
+/**
+ * struct dmub_cmd_cursor_offload_stream_data - Payload for cursor offload stream command.
+ */
+struct dmub_cmd_cursor_offload_stream_data {
+ uint32_t otg_inst: 4; /**< OTG instance to control */
+ uint32_t reserved: 28; /**< Reserved for future use */
+ uint32_t line_time_in_ns; /**< Line time in ns for the OTG */
+ uint32_t v_total_max; /**< OTG v_total_max */
+};
+
+/**
+ * struct dmub_rb_cmd_cursor_offload_stream_cntl - Controls a stream for cursor offload.
+ */
+struct dmub_rb_cmd_cursor_offload_stream_cntl {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_cursor_offload_stream_data data;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PR_ENABLE command.
+ */
+struct dmub_cmd_pr_enable_data {
+ /**
+ * Panel Replay enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Phy state to enter.
+ * Values to use are defined in dmub_phy_fsm_state
+ */
+ uint8_t phy_fsm_state;
+ /**
+ * Phy rate for DP - RBR/HBR/HBR2/HBR3.
+ * Set this using enum phy_link_rate.
+ * This does not support HDMI/DP2 for now.
+ */
+ uint8_t phy_rate;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_ENABLE command.
+ * Panel Replay enable/disable is controlled using action in data.
+ */
+struct dmub_rb_cmd_pr_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ struct dmub_cmd_pr_enable_data data;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+struct dmub_cmd_pr_copy_settings_data {
+ /**
+ * Flags that can be set by driver to change some replay behaviour.
+ */
+ union pr_debug_flags debug;
+
+ /**
+ * @flags: Flags used to determine feature functionality.
+ */
+ union pr_hw_flags flags;
+
+ /**
+ * DPP HW instance.
+ */
+ uint8_t dpp_inst;
+ /**
+ * OTG HW instance.
+ */
+ uint8_t otg_inst;
+ /**
+ * DIG FE HW instance.
+ */
+ uint8_t digfe_inst;
+ /**
+ * DIG BE HW instance.
+ */
+ uint8_t digbe_inst;
+ /**
+ * AUX HW instance.
+ */
+ uint8_t aux_inst;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Length of each horizontal line in ns.
+ */
+ uint32_t line_time_in_ns;
+ /**
+ * PHY instance.
+ */
+ uint8_t dpphy_inst;
+ /**
+ * Determines if SMU optimzations are enabled/disabled.
+ */
+ uint8_t smu_optimizations_en;
+ /*
+ * Use FSM state for Replay power up/down
+ */
+ uint8_t use_phy_fsm;
+ /*
+ * Use FSFT afftet pixel clk
+ */
+ uint32_t pix_clk_100hz;
+ /*
+ * Use Original pixel clock
+ */
+ uint32_t sink_pix_clk_100hz;
+ /**
+ * Use for AUX-less ALPM LFPS wake operation
+ */
+ struct dmub_alpm_auxless_data auxless_alpm_data;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+struct dmub_rb_cmd_pr_copy_settings {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+ struct dmub_cmd_pr_copy_settings_data data;
+};
+
+struct dmub_cmd_pr_update_state_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+
+ uint8_t pad[3]; // align to 4-byte boundary
+ /*
+ * Update flags to control the update behavior.
+ */
+ uint32_t update_flag;
+ /**
+ * state/data to set.
+ */
+ uint32_t coasting_vtotal;
+ uint32_t sync_mode;
+};
+
+struct dmub_cmd_pr_general_cmd_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * subtype: PR general cmd sub type
+ */
+ uint8_t subtype;
+
+ uint8_t pad[2];
+ /**
+ * config data by different subtypes
+ */
+ union {
+ uint32_t u32All;
+ } data;
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_UPDATE_STATE command.
+ */
+struct dmub_rb_cmd_pr_update_state {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_UPDATE_STATE command.
+ */
+ struct dmub_cmd_pr_update_state_data data;
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_GENERAL_CMD command.
+ */
+struct dmub_rb_cmd_pr_general_cmd {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_GENERAL_CMD command.
+ */
+ struct dmub_cmd_pr_general_cmd_data data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -5534,6 +6899,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_get_ace_curve abm_get_ace_curve;
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+ struct dmub_rb_cmd_abm_get_histogram abm_get_histogram;
+
+ /**
* Definition of a DMUB_CMD__ABM_SET_EVENT command.
*/
struct dmub_rb_cmd_abm_set_event abm_set_event;
@@ -5622,6 +6992,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
*/
struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_rb_cmd_replay_set_version replay_set_version;
/*
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -5664,11 +7038,57 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
+
struct dmub_rb_cmd_fams2 fams2_config;
+ struct dmub_rb_cmd_ib ib_fams2_config;
+
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;
+
+ struct dmub_rb_cmd_fused_io fused_io;
+
+ /**
+ * Definition of a DMUB_CMD__LSDMA command.
+ */
+ struct dmub_rb_cmd_lsdma lsdma;
+
+ struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl;
+
+ struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info;
+ /**
+ * Definition of a DMUB_CMD__CURSOR_OFFLOAD_INIT command.
+ */
+ struct dmub_rb_cmd_cursor_offload_init cursor_offload_init;
+ /**
+ * Definition of a DMUB_CMD__CURSOR_OFFLOAD control commands.
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_UPDATE_DRR
+ */
+ struct dmub_rb_cmd_cursor_offload_stream_cntl cursor_offload_stream_ctnl;
+ /**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED_ENABLE command.
+ */
+ struct dmub_rb_cmd_smart_power_oled_enable smart_power_oled_enable;
+ /**
+ * Definition of a DMUB_CMD__DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command.
+ */
+ struct dmub_rb_cmd_smart_power_oled_getmaxcll smart_power_oled_getmaxcll;
+ /*
+ * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
+ */
+ struct dmub_rb_cmd_pr_copy_settings pr_copy_settings;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_ENABLE command.
+ */
+ struct dmub_rb_cmd_pr_enable pr_enable;
+
+ struct dmub_rb_cmd_pr_update_state pr_update_state;
+
+ struct dmub_rb_cmd_pr_general_cmd pr_general_cmd;
};
/**
@@ -5699,6 +7119,7 @@ union dmub_rb_out_cmd {
* HPD sense notification command.
*/
struct dmub_rb_cmd_hpd_sense_notify hpd_sense_notify;
+ struct dmub_rb_cmd_fused_io fused_io;
};
#pragma pack(pop)
@@ -5746,6 +7167,45 @@ static inline bool dmub_rb_empty(struct dmub_rb *rb)
}
/**
+ * @brief gets number of outstanding requests in the RB
+ *
+ * @param rb DMUB Ringbuffer
+ * @return true if full
+ */
+static inline uint32_t dmub_rb_num_outstanding(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ return data_count / DMUB_RB_CMD_SIZE;
+}
+
+/**
+ * @brief gets number of free buffers in the RB
+ *
+ * @param rb DMUB Ringbuffer
+ * @return true if full
+ */
+static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ /* +1 because 1 entry is always unusable */
+ data_count += DMUB_RB_CMD_SIZE;
+
+ return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE;
+}
+
+/**
* @brief Checks if the ringbuffer is full
*
* @param rb DMUB Ringbuffer
@@ -5761,6 +7221,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* -1 because 1 entry is always unusable */
return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
}
@@ -5775,15 +7236,18 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);
- const uint64_t *src = (const uint64_t *)cmd;
+ uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
+ const uint8_t *src = (const uint8_t *)cmd;
uint8_t i;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
// copying data
- for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ for (i = 0; i < DMUB_RB_CMD_SIZE; i++)
*dst++ = *src++;
rb->wrpt += DMUB_RB_CMD_SIZE;
@@ -5808,6 +7272,9 @@ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
const uint8_t *src = (const uint8_t *)cmd;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
@@ -5853,6 +7320,9 @@ static inline void dmub_rb_get_rptr_with_offset(struct dmub_rb *rb,
uint32_t num_cmds,
uint32_t *next_rptr)
{
+ if (rb->capacity == 0)
+ return;
+
*next_rptr = rb->rptr + DMUB_RB_CMD_SIZE * num_cmds;
if (*next_rptr >= rb->capacity)
@@ -5916,6 +7386,9 @@ static inline bool dmub_rb_out_front(struct dmub_rb *rb,
*/
static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
{
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_empty(rb))
return false;
@@ -5940,6 +7413,9 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t rptr = rb->rptr;
uint32_t wptr = rb->wrpt;
+ if (rb->capacity == 0)
+ return;
+
while (rptr != wptr) {
uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
index a00b9e992292..468b768c11ae 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/Makefile
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -26,6 +26,7 @@ DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o
DMUB += dmub_dcn32.o
DMUB += dmub_dcn35.o
DMUB += dmub_dcn351.o
+DMUB += dmub_dcn36.o
DMUB += dmub_dcn401.o
AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index e500ca9ae09c..73888c1bea93 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -414,63 +414,66 @@ uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index de287b101848..42c1fb4bc73f 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -247,6 +247,6 @@ bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub);
uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub);
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index d9f31b191c69..cd04d7c756c3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -83,8 +83,8 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn31_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100;
- uint32_t in_reset, scratch, i, pwait_mode;
+ const uint32_t timeout = 100000;
+ uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -108,7 +108,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -125,9 +125,14 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
+
+ if (is_enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -371,8 +376,11 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
+ boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
+ boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -408,69 +416,75 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
}
bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index eccdab4986ce..1c43ef2bca66 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -251,7 +251,7 @@ void dmub_dcn31_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub);
bool dmub_dcn31_should_detect(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 9600b7f858b0..7e9856289910 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -89,44 +89,58 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn32_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
- uint32_t in_reset, scratch, i;
+ const uint32_t timeout_us = 1 * 1000 * 1000; //1s
+ const uint32_t poll_delay_us = 1; //1us
+ uint32_t i = 0;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
- */
-
- for (i = 0; i < timeout; ++i) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ for (; i < timeout_us; i++) {
+ scratch = REG_READ(DMCUB_SCRATCH7);
+ if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
+
+ udelay(poll_delay_us);
}
- for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
- if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ for (; i < timeout_us; i++) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
break;
+
+ udelay(poll_delay_us);
}
+ }
- /* Force reset in case we timed out, DMCUB is likely hung. */
+ if (enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ udelay(1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ if (i >= timeout_us) {
+ /* timeout should never occur */
+ BREAK_TO_DEBUGGER();
+ }
+
+ REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0);
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -157,7 +171,9 @@ void dmub_dcn32_backdoor_load(struct dmub_srv *dmub,
dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -187,7 +203,9 @@ void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub,
{
union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
@@ -417,73 +435,72 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
- uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ uint32_t is_dmub_enabled, is_soft_reset, is_pwait;
+ uint32_t is_traceport_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
- REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
-
- REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
-
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 29c1132951af..daf81027d663 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION6_OFFSET) \
+ DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
@@ -155,6 +158,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
@@ -162,7 +167,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \
DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \
DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \
- DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK)
+ DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn32_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
@@ -254,7 +260,7 @@ void dmub_dcn32_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub);
void dmub_dcn32_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index e5e77bd3c31e..e13557ed97be 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -88,23 +88,19 @@ static inline void dmub_dcn35_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn35_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100;
+ const uint32_t timeout = 100000;
uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
- if (in_reset == 0) {
+ if (in_reset == 0 && is_enabled != 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- */
-
for (i = 0; i < timeout; ++i) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
@@ -113,7 +109,7 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -130,11 +126,9 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
-
if (is_enabled) {
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ udelay(1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
@@ -160,11 +154,7 @@ void dmub_dcn35_reset_release(struct dmub_srv *dmub)
LONO_SOCCLK_GATE_DISABLE, 1,
LONO_DMCUBCLK_GATE_DISABLE, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
- udelay(1);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- udelay(1);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0);
}
@@ -410,13 +400,14 @@ union dmub_fw_boot_options dmub_dcn35_get_fw_boot_option(struct dmub_srv *dmub)
void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
- union dmub_fw_boot_options cur_boot_options = {0};
- cur_boot_options = dmub_dcn35_get_fw_boot_option(dmub);
+ if (!dmub->dpia_supported) {
+ dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia;
+ }
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
- boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia;
+ boot_options.bits.enable_dpia = dmub->dpia_supported && !params->disable_dpia;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
@@ -427,6 +418,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.disable_sldo_opt = params->disable_sldo_opt;
boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig;
boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -462,67 +454,112 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
+
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
+}
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
- diag_data->timeout_info = dmub->debug;
+bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub)
+{
+ uint64_t region3_cw5_offset;
+ uint32_t top_addr, top_addr_enable, offset_low;
+ uint32_t offset_high, base_addr, fw_version;
+ bool is_vbios_fw = false;
+
+ memset(&dmub->preos_info, 0, sizeof(dmub->preos_info));
+
+ fw_version = REG_READ(DMCUB_SCRATCH1);
+ is_vbios_fw = ((fw_version >> 6) & 0x01) ? true : false;
+ if (!is_vbios_fw)
+ return false;
+
+ dmub->preos_info.boot_status = REG_READ(DMCUB_SCRATCH0);
+ dmub->preos_info.fw_version = REG_READ(DMCUB_SCRATCH1);
+ dmub->preos_info.boot_options = REG_READ(DMCUB_SCRATCH14);
+ REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS,
+ DMCUB_REGION3_CW5_ENABLE, &top_addr_enable);
+ if (top_addr_enable) {
+ dmub_dcn35_get_fb_base_offset(dmub,
+ &dmub->preos_info.fb_base, &dmub->preos_info.fb_offset);
+ offset_low = REG_READ(DMCUB_REGION3_CW5_OFFSET);
+ offset_high = REG_READ(DMCUB_REGION3_CW5_OFFSET_HIGH);
+ region3_cw5_offset = ((uint64_t)offset_high << 32) | offset_low;
+ dmub->preos_info.trace_buffer_phy_addr = region3_cw5_offset
+ - dmub->preos_info.fb_base + dmub->preos_info.fb_offset;
+
+ REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS,
+ DMCUB_REGION3_CW5_TOP_ADDRESS, &top_addr);
+ base_addr = REG_READ(DMCUB_REGION3_CW5_BASE_ADDRESS) & 0x1FFFFFFF;
+ dmub->preos_info.trace_buffer_size =
+ (top_addr > base_addr) ? (top_addr - base_addr + 1) : 0;
+ }
+
+ return true;
}
+
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
/* DMCUB_REGION3_TMR_AXI_SPACE values:
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 686e97c00ccc..92e6695a2c9b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -269,7 +269,7 @@ void dmub_dcn35_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub);
@@ -285,4 +285,6 @@ bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub);
void dmub_srv_dcn35_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
+bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN35_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c
new file mode 100644
index 000000000000..b1ce09d48920
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#include "../dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn36.h"
+
+#include "dcn/dcn_3_6_0_offset.h"
+#include "dcn/dcn_3_6_0_sh_mask.h"
+
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+#define CTX dmub
+#define REGS dmub->regs_dcn35
+#define REG_OFFSET_EXP(reg_name) BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+void dmub_srv_dcn36_regs_init(struct dmub_srv *dmub, struct dc_context *ctx)
+{
+ struct dmub_srv_dcn35_regs *regs = dmub->regs_dcn35;
+#define REG_STRUCT regs
+
+#define DMUB_SR(reg) REG_STRUCT->offset.reg = REG_OFFSET_EXP(reg);
+ DMUB_DCN35_REGS()
+ DMCUB_INTERNAL_REGS()
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) REG_STRUCT->mask.reg##__##field = FD_MASK(reg, field);
+ DMUB_DCN35_FIELDS()
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) REG_STRUCT->shift.reg##__##field = FD_SHIFT(reg, field);
+ DMUB_DCN35_FIELDS()
+#undef DMUB_SF
+#undef REG_STRUCT
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h
new file mode 100644
index 000000000000..57850550f682
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#ifndef _DMUB_DCN36_H_
+#define _DMUB_DCN36_H_
+
+#include "dmub_dcn35.h"
+
+struct dmub_srv;
+
+void dmub_srv_dcn36_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
+
+#endif /* _DMUB_DCN36_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index 39a8cb6d7523..95542299e3b3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -63,44 +63,58 @@ static inline void dmub_dcn401_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn401_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
- uint32_t in_reset, scratch, i;
+ const uint32_t timeout_us = 1 * 1000 * 1000; //1s
+ const uint32_t poll_delay_us = 1; //1us
+ uint32_t i = 0;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
- */
-
- for (i = 0; i < timeout; ++i) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ for (; i < timeout_us; i++) {
+ scratch = REG_READ(DMCUB_SCRATCH7);
+ if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
+
+ udelay(poll_delay_us);
}
- for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
- if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ for (; i < timeout_us; i++) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
break;
+
+ udelay(poll_delay_us);
}
+ }
- /* Force reset in case we timed out, DMCUB is likely hung. */
+ if (enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ udelay(1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ if (i >= timeout_us) {
+ /* timeout should never occur */
+ BREAK_TO_DEBUGGER();
+ }
+
+ REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0);
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -131,7 +145,9 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
dmub_dcn401_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -151,6 +167,7 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
@@ -161,7 +178,9 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
{
union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
@@ -181,6 +200,7 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
@@ -402,72 +422,78 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn401_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
@@ -501,28 +527,69 @@ void dmub_dcn401_send_reg_inbox0_cmd_msg(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd)
{
uint32_t *dwords = (uint32_t *)cmd;
-
+ int32_t payload_size_bytes = cmd->cmd_common.header.payload_bytes;
+ uint32_t msg_index;
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[0]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[1]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[2]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[3]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[4]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[5]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[6]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[7]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[8]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[9]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[10]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[11]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[12]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[13]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[14]);
+ /* read remaining data based on payload size */
+ for (msg_index = 0; msg_index < 15; msg_index++) {
+ if (payload_size_bytes <= msg_index * 4) {
+ break;
+ }
+
+ switch (msg_index) {
+ case 0:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[msg_index + 1]);
+ break;
+ case 1:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[msg_index + 1]);
+ break;
+ case 2:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[msg_index + 1]);
+ break;
+ case 3:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[msg_index + 1]);
+ break;
+ case 4:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[msg_index + 1]);
+ break;
+ case 5:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[msg_index + 1]);
+ break;
+ case 6:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[msg_index + 1]);
+ break;
+ case 7:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[msg_index + 1]);
+ break;
+ case 8:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[msg_index + 1]);
+ break;
+ case 9:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[msg_index + 1]);
+ break;
+ case 10:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[msg_index + 1]);
+ break;
+ case 11:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[msg_index + 1]);
+ break;
+ case 12:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[msg_index + 1]);
+ break;
+ case 13:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[msg_index + 1]);
+ break;
+ case 14:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[msg_index + 1]);
+ break;
+ }
+ }
+
/* writing to INBOX RDY register will trigger DMUB REG INBOX0 RDY
* interrupt.
*/
- REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[15]);
+ REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[0]);
}
uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub)
@@ -540,30 +607,39 @@ void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- dwords[0] = REG_READ(DMCUB_REG_INBOX0_MSG0);
- dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG1);
- dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG2);
- dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG3);
- dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG4);
- dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG5);
- dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG6);
- dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG7);
- dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG8);
- dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG9);
- dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG10);
- dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG11);
- dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG12);
- dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG13);
- dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG14);
- dwords[15] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[0] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG0);
+ dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG1);
+ dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG2);
+ dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG3);
+ dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG4);
+ dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG5);
+ dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG6);
+ dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG7);
+ dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG8);
+ dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG9);
+ dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG10);
+ dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG11);
+ dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG12);
+ dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG13);
+ dwords[15] = REG_READ(DMCUB_REG_INBOX0_MSG14);
}
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 1);
+}
+
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
+{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 0);
}
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
+{
+ REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
+}
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK, 1);
@@ -588,11 +664,6 @@ uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub)
return status;
}
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
-{
- REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
-}
-
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN, enable ? 1:0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index 4c8843b79695..88c3a44d67d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -169,7 +169,8 @@ struct dmub_srv;
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_STAT) \
- DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN)
+ DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn401_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
@@ -263,7 +264,7 @@ void dmub_dcn401_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn401_configure_dmub_in_system_memory(struct dmub_srv *dmub);
void dmub_dcn401_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
@@ -276,11 +277,13 @@ uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_outbox0_msg(struct dmub_srv *dmub, uint32_t *msg);
void dmub_dcn401_write_reg_outbox0_rsp(struct dmub_srv *dmub, uint32_t *msg);
uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub);
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable);
uint32_t dmub_dcn401_read_reg_outbox0_rdy_int_status(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 15ea216e903d..a6ae1d2e9685 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -38,6 +38,7 @@
#include "dmub_dcn32.h"
#include "dmub_dcn35.h"
#include "dmub_dcn351.h"
+#include "dmub_dcn36.h"
#include "dmub_dcn401.h"
#include "os_types.h"
/*
@@ -64,6 +65,12 @@
/* Default scratch mem size. */
#define DMUB_SCRATCH_MEM_SIZE (1024)
+/* Default indirect buffer size. */
+#define DMUB_IB_MEM_SIZE (2560)
+
+/* Default LSDMA ring buffer size. */
+#define DMUB_LSDMA_RB_SIZE (64 * 1024)
+
/* Number of windows in use. */
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
/* Base addresses. */
@@ -156,6 +163,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+ /* default to specifying now inbox type */
+ enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT;
+
switch (asic) {
case DMUB_ASIC_DCN20:
case DMUB_ASIC_DCN21:
@@ -314,6 +324,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
case DMUB_ASIC_DCN35:
case DMUB_ASIC_DCN351:
+ case DMUB_ASIC_DCN36:
dmub->regs_dcn35 = &dmub_srv_dcn35_regs;
funcs->configure_dmub_in_system_memory = dmub_dcn35_configure_dmub_in_system_memory;
funcs->send_inbox0_cmd = dmub_dcn35_send_inbox0_cmd;
@@ -348,10 +359,13 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn35_get_current_time;
funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data;
+ funcs->get_preos_fw_info = dmub_dcn35_get_preos_fw_info;
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
if (asic == DMUB_ASIC_DCN351)
- funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
+ funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
+ if (asic == DMUB_ASIC_DCN36)
+ funcs->init_reg_offsets = dmub_srv_dcn36_regs_init;
funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up;
funcs->should_detect = dmub_dcn35_should_detect;
@@ -391,10 +405,15 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn401_get_current_time;
funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data;
+
funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg;
funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status;
funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp;
funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack;
+ funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack;
+ funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
+ default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now
+
funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack;
funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg;
funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp;
@@ -407,6 +426,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
return false;
}
+ /* set default inbox type if not overriden */
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) {
+ if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) {
+ /* use default inbox type as specified by DCN rev */
+ dmub->inbox_type = default_inbox_type;
+ } else if (funcs->send_reg_inbox0_cmd_msg) {
+ /* prefer reg as default inbox type if present */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_REG;
+ } else {
+ /* use fb as fallback */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_FB;
+ }
+ }
+
return true;
}
@@ -422,6 +455,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
dmub->asic = params->asic;
dmub->fw_version = params->fw_version;
dmub->is_virtual = params->is_virtual;
+ dmub->inbox_type = params->inbox_type;
/* Setup asic dependent hardware funcs. */
if (!dmub_srv_hw_setup(dmub, params->asic)) {
@@ -531,8 +565,11 @@ enum dmub_status
window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
- window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = dmub_align(DMUB_SCRATCH_MEM_SIZE, 64);
+ window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
+ window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
+ window_sizes[DMUB_WINDOW_CURSOR_OFFLOAD] = dmub_align(sizeof(struct dmub_cursor_offload_v1), 64);
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
@@ -617,20 +654,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
- struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
+ uint32_t i;
+
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
- if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
- !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) {
- ASSERT(0);
- return DMUB_STATUS_INVALID;
+ for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
+ if (!params->fb[i]) {
+ ASSERT(0);
+ return DMUB_STATUS_INVALID;
+ }
}
dmub->fb_base = params->fb_base;
@@ -691,7 +730,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
inbox1.base = cw4.region.base;
inbox1.top = cw4.region.base + DMUB_RB_SIZE;
outbox1.base = inbox1.top;
- outbox1.top = cw4.region.top;
+ outbox1.top = inbox1.top + DMUB_RB_SIZE;
cw5.offset.quad_part = tracebuff_fb->gpu_addr;
cw5.region.base = DMUB_CW5_BASE;
@@ -704,7 +743,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw6.region.base = DMUB_CW6_BASE;
cw6.region.top = cw6.region.base + fw_state_fb->size;
- dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->fw_state = (void *)((uintptr_t)(fw_state_fb->cpu_addr) + DMUB_DEBUG_FW_STATE_OFFSET);
region6.offset.quad_part = shared_state_fb->gpu_addr;
region6.region.base = DMUB_CW6_BASE;
@@ -712,7 +751,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->shared_state = shared_state_fb->cpu_addr;
- dmub->scratch_mem_fb = *scratch_mem_fb;
+ dmub->scratch_mem_fb = *params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ dmub->ib_mem_gart = *params->fb[DMUB_WINDOW_IB_MEM];
+
+ dmub->cursor_offload_fb = *params->fb[DMUB_WINDOW_CURSOR_OFFLOAD];
+ dmub->cursor_offload_v1 = (struct dmub_cursor_offload_v1 *)dmub->cursor_offload_fb.cpu_addr;
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
@@ -733,7 +776,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
rb_params.ctx = dmub;
rb_params.base_address = mail_fb->cpu_addr;
rb_params.capacity = DMUB_RB_SIZE;
- dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ dmub_rb_init(&dmub->inbox1.rb, &rb_params);
// Initialize outbox1 ring buffer
rb_params.ctx = dmub;
@@ -764,27 +807,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
-{
- if (!dmub->sw_init)
- return DMUB_STATUS_INVALID;
-
- if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
- uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
-
- if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
- return DMUB_STATUS_HW_FAILURE;
- } else {
- dmub->inbox1_rb.rptr = rptr;
- dmub->inbox1_rb.wrpt = wptr;
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- }
- }
-
- return DMUB_STATUS_OK;
-}
-
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@@ -795,8 +817,13 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
/* mailboxes have been reset in hw, so reset the sw state as well */
dmub->inbox1_last_wptr = 0;
- dmub->inbox1_rb.wrpt = 0;
- dmub->inbox1_rb.rptr = 0;
+ dmub->inbox1.rb.wrpt = 0;
+ dmub->inbox1.rb.rptr = 0;
+ dmub->inbox1.num_reported = 0;
+ dmub->inbox1.num_submitted = 0;
+ dmub->reg_inbox0.num_reported = 0;
+ dmub->reg_inbox0.num_submitted = 0;
+ dmub->reg_inbox0.is_pending = 0;
dmub->outbox0_rb.wrpt = 0;
dmub->outbox0_rb.rptr = 0;
dmub->outbox1_rb.wrpt = 0;
@@ -807,7 +834,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
@@ -816,18 +843,20 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (dmub->power_state != DMUB_POWER_STATE_D0)
return DMUB_STATUS_POWER_STATE_D3;
- if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
- dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
+ if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
+ dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
+ dmub->inbox1.num_submitted++;
return DMUB_STATUS_OK;
+ }
return DMUB_STATUS_QUEUE_FULL;
}
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub)
{
struct dmub_rb flush_rb;
@@ -842,13 +871,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- flush_rb = dmub->inbox1_rb;
+ flush_rb = dmub->inbox1.rb;
flush_rb.rptr = dmub->inbox1_last_wptr;
dmub_rb_flush_pending(&flush_rb);
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
return DMUB_STATUS_OK;
}
@@ -906,26 +935,84 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
+{
+ if (dmub->reg_inbox0.is_pending) {
+ dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (!dmub->reg_inbox0.is_pending) {
+ /* ack the rsp interrupt */
+ if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack)
+ dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+
+ /* only update the reported count if commands aren't being batched */
+ if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) {
+ dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted;
+ }
+ }
+ }
+}
+
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+ struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0;
+ struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1;
+ const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0;
+ const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1;
+
+ if (!dmub->hw_init ||
+ !dmub->hw_funcs.get_inbox1_wptr)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; i += polling_interval_us) {
+ scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+ scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
+ dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ /* check current HW state first, but use command submission vs reported as a fallback */
+ if ((dmub_rb_empty(&scratch_inbox1.rb) ||
+ inbox1->num_reported >= scratch_inbox1.num_submitted) &&
+ (!scratch_reg_inbox0.is_pending ||
+ reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted))
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
- uint32_t i, rptr;
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
- for (i = 0; i <= timeout_us; ++i) {
- rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
- if (rptr > dmub->inbox1_rb.capacity)
- return DMUB_STATUS_HW_FAILURE;
-
- dmub->inbox1_rb.rptr = rptr;
+ if (status != DMUB_STATUS_OK)
+ return status;
- if (dmub_rb_empty(&dmub->inbox1_rb))
+ /* check for idle */
+ if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
return DMUB_STATUS_OK;
- udelay(1);
+ udelay(polling_interval_us);
}
return DMUB_STATUS_TIMEOUT;
@@ -1036,35 +1123,6 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd)
-{
- enum dmub_status status = DMUB_STATUS_OK;
-
- // Queue command
- status = dmub_srv_cmd_queue(dmub, cmd);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Execute command
- status = dmub_srv_cmd_execute(dmub);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Wait for DMUB to process command
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Copy data back from ring buffer into command
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd);
-
- return status;
-}
-
static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
void *entry)
{
@@ -1095,11 +1153,11 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr
return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry);
}
-bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data)
+ if (!dmub || !dmub->hw_funcs.get_diagnostic_data)
return false;
- dmub->hw_funcs.get_diagnostic_data(dmub, diag_data);
+ dmub->hw_funcs.get_diagnostic_data(dmub);
return true;
}
@@ -1156,46 +1214,170 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
}
}
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us)
+void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
{
- uint32_t rsp_ready = 0;
- uint32_t i;
+ if (!dmub || !dmub->hw_init)
+ return;
+
+ dmub->power_state = dmub_srv_power_state;
+}
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
+{
+ uint32_t num_pending = 0;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg ||
+ !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported)
+ num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported;
+ else
+ /* num_submitted wrapped */
+ num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY -
+ (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted);
+
+ if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY)
+ return DMUB_STATUS_QUEUE_FULL;
+
+ /* clear last rsp ack and send message */
+ dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub);
dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready)
- break;
- udelay(1);
+ dmub->reg_inbox0.num_submitted++;
+ dmub->reg_inbox0.is_pending = true;
+ dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
+
+ return DMUB_STATUS_OK;
+}
+
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp)
+{
+ if (dmub) {
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG &&
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp) {
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp);
+ } else {
+ dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
+ }
}
- if (rsp_ready == 0)
- return DMUB_STATUS_TIMEOUT;
+}
- if (with_reply)
- dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd);
+static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->sw_init)
+ return DMUB_STATUS_INVALID;
- dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+ dmub->reg_inbox0.is_pending = 0;
+ dmub->reg_inbox0.is_multi_pending = 0;
- /* wait for rsp int status is cleared to initial state before exit */
- for (; i <= timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready == 0)
- break;
- udelay(1);
+ return DMUB_STATUS_OK;
+}
+
+static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+ } else {
+ dmub->inbox1.rb.rptr = rptr;
+ dmub->inbox1.rb.wrpt = wptr;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
+ }
}
- ASSERT(rsp_ready == 0);
return DMUB_STATUS_OK;
}
-void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_init)
- return;
+ enum dmub_status status;
- dmub->power_state = dmub_srv_power_state;
+ status = dmub_srv_sync_reg_inbox0(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ status = dmub_srv_sync_inbox1(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required)
+{
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
+
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ /* check for space in inbox1 */
+ if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
+{
+ uint32_t rptr;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ /* update inbox1 state */
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ if (dmub->inbox1.rb.rptr > rptr) {
+ /* rb wrapped */
+ dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ } else {
+ dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ }
+ dmub->inbox1.rb.rptr = rptr;
+
+ /* update reg_inbox0 */
+ dmub_srv_update_reg_inbox0_status(dmub);
+
+ return DMUB_STATUS_OK;
+}
+
+bool dmub_srv_get_preos_info(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->hw_funcs.get_preos_fw_info)
+ return false;
+
+ return dmub->hw_funcs.get_preos_fw_info(dmub);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index cce887cefc01..e7a58b140388 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -71,7 +71,7 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
switch (cmd.cmd_common.header.type) {
case DMUB_OUT_CMD__DP_AUX_REPLY:
notify->type = DMUB_NOTIFICATION_AUX_REPLY;
- notify->link_index = cmd.dp_aux_reply.control.instance;
+ notify->instance = cmd.dp_aux_reply.control.instance;
notify->result = cmd.dp_aux_reply.control.result;
dmub_memcpy((void *)&notify->aux_reply,
(void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data));
@@ -84,34 +84,17 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->type = DMUB_NOTIFICATION_HPD_IRQ;
}
- notify->link_index = cmd.dp_hpd_notify.hpd_data.instance;
+ notify->instance = cmd.dp_hpd_notify.hpd_data.instance;
notify->result = AUX_RET_SUCCESS;
break;
case DMUB_OUT_CMD__SET_CONFIG_REPLY:
notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY;
- notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
+ notify->instance = cmd.set_config_reply.set_config_reply_control.instance;
notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
break;
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
- notify->link_index = cmd.dpia_notification.payload.header.instance;
-
- if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) {
-
- notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw;
- notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw;
-
- if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed)
- notify->result = DPIA_BW_REQ_FAILED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded)
- notify->result = DPIA_BW_REQ_SUCCESS;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed)
- notify->result = DPIA_EST_BW_CHANGED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed)
- notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
- }
+ notify->instance = cmd.dpia_notification.payload.header.instance;
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
@@ -119,6 +102,10 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
&cmd.hpd_sense_notify.data,
sizeof(cmd.hpd_sense_notify.data));
break;
+ case DMUB_OUT_CMD__FUSED_IO:
+ notify->type = DMUB_NOTIFICATION_FUSED_IO;
+ dmub_memcpy(&notify->fused_request, &cmd.fused_io.request, sizeof(cmd.fused_io.request));
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 812377d9e48f..973b6bdbac63 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -135,12 +135,8 @@ struct bp_external_encoder_control {
struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
- /* from GPU Tx aka asic_signal */
- enum signal_type signal;
- /* sink_signal may differ from asicSignal if Translator encoder */
enum signal_type sink_signal;
- enum display_output_bit_depth display_output_bit_depth;
- bool enable_dp_audio;
+ uint8_t bit_depth;
};
struct bp_transmitter_control {
@@ -166,6 +162,11 @@ struct bp_transmitter_control {
bool single_pll_mode;
};
+struct bp_load_detection_parameters {
+ enum engine_id engine_id;
+ uint16_t device_id;
+};
+
struct bp_hw_crtc_timing_parameters {
enum controller_id controller_id;
/* horizontal part */
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 090230d29df8..8aea50aa9533 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -213,6 +213,11 @@ enum {
#endif
#define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH
#define DEVICE_ID_NV_143F 0x143F
+#define DEVICE_ID_NV_13F9 0x13F9
+#define DEVICE_ID_NV_13FA 0x13FA
+#define DEVICE_ID_NV_13FB 0x13FB
+#define DEVICE_ID_NV_13FC 0x13FC
+#define DEVICE_ID_NV_13DB 0x13DB
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
#define DEVICE_ID_VGH_1435 0x1435
@@ -257,6 +262,7 @@ enum {
#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
#define ASICREV_IS_GC_11_0_4(eChipRev) (eChipRev >= GC_11_0_4_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_DCN36(eChipRev) ((eChipRev) >= 0x50 && (eChipRev) < 0xC0)
#define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 654387cf057f..a021d12acd74 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -63,6 +63,7 @@ enum dce_version {
DCN_VERSION_3_21,
DCN_VERSION_3_5,
DCN_VERSION_3_51,
+ DCN_VERSION_3_6,
DCN_VERSION_4_01,
DCN_VERSION_MAX
};
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index de8f3cfed6c8..07b937b92efc 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -30,6 +30,22 @@
#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
#define DP_SINK_HW_REVISION_START 0x409
#endif
+/* Panel Replay*/
+#ifndef DP_PANEL_REPLAY_CAPABILITY_SUPPORT // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_CAPABILITY_SUPPORT 0x0b0
+#endif /* DP_PANEL_REPLAY_CAPABILITY_SUPPORT */
+#ifndef DP_PANEL_REPLAY_CAPABILITY // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_CAPABILITY 0x0b1
+#endif /* DP_PANEL_REPLAY_CAPABILITY */
+#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 0x1b0
+#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 */
+#ifndef DP_PANEL_REPLAY_ENABLE // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE (1 << 0)
+#endif /* DP_PANEL_REPLAY_ENABLE */
+#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 0x1b1
+#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 */
enum dpcd_revision {
DPCD_REV_10 = 0x10,
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index 7e3240e73c1f..63813009a3a6 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -86,6 +86,9 @@ enum dc_irq_source dal_irq_get_source(
enum dc_irq_source dal_irq_get_rx_source(
const struct gpio *irq);
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq);
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config);
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 813463ffe15c..38a77fa9b4af 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -169,6 +169,7 @@ struct dc_firmware_info {
uint32_t engine_clk_ss_percentage;
} feature;
+ uint32_t max_pixel_clock; /* in KHz */
uint32_t default_display_engine_pll_frequency; /* in KHz */
uint32_t external_clock_source_frequency_for_dp; /* in KHz */
uint32_t smu_gpu_pll_output_freq; /* in KHz */
@@ -424,7 +425,7 @@ struct integrated_info {
/*
* DFS-bypass flag
*/
-/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */
+/* Copy of SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS from atombios.h */
enum {
DFS_BYPASS_ENABLE = 0x10
};
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 54e33062b3c0..1386fa124e85 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -310,4 +310,11 @@ static inline bool dal_graphics_object_id_equal(
}
return false;
}
+
+static inline bool dc_connector_supports_analog(const enum connector_id conn)
+{
+ return conn == CONNECTOR_ID_VGA ||
+ conn == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ conn == CONNECTOR_ID_DUAL_LINK_DVII;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 1867aac57cf2..da74ed66c8f9 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -89,6 +89,8 @@ struct link_training_settings {
bool enhanced_framing;
enum lttpr_mode lttpr_mode;
+ bool lttpr_early_tps2;
+
/* disallow different lanes to have different lane settings */
bool disallow_per_lane_settings;
/* dpcd lane settings will always use the same hw lane settings
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 058f882d5bdd..4c01514b926c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,11 +40,6 @@ struct dc_state;
*
*/
-void pre_surface_trace(
- struct dc *dc,
- const struct dc_plane_state *const *plane_states,
- int surface_count);
-
void update_surface_trace(
struct dc *dc,
const struct dc_surface_update *updates,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 4d68c1c6e210..177acb0574f1 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -32,6 +32,7 @@
#define DC_LOG_WARNING(...) drm_warn((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DEBUG(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DC(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
+#define DC_LOG_INFO(...) drm_info((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)
#define DC_LOG_HW_HOTPLUG(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__)
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index a10d6b988aab..3a2c2d2fb629 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -118,6 +118,18 @@ static inline bool dc_is_dvi_signal(enum signal_type signal)
}
}
+/**
+ * dc_is_rgb_signal() - Whether the signal is analog RGB.
+ *
+ * Returns whether the given signal type is an analog RGB signal
+ * that is used with a DAC on VGA or DVI-I connectors.
+ * Not to be confused with other uses of "RGB", such as RGB color space.
+ */
+static inline bool dc_is_rgb_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_RGB);
+}
+
static inline bool dc_is_tmds_signal(enum signal_type signal)
{
switch (signal) {
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 2b3964529539..1aae46d703ba 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -147,7 +147,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
((unsigned int)(div64_u64((1000000000ULL * 1000000),
refresh_in_uhz)));
- if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) {
+ if (refresh_in_uhz <= stream->timing.min_refresh_in_uhz) {
/* When the target refresh rate is the minimum panel refresh rate,
* round down the vtotal value to avoid stretching vblank over
* panel's vtotal boundary.
@@ -155,6 +155,14 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000000);
+ } else if (refresh_in_uhz >= stream->timing.max_refresh_in_uhz) {
+ /* When the target refresh rate is the maximum panel refresh rate
+ * round up the vtotal value to prevent off-by-one error causing
+ * v_total_min to be below the panel's lower bound
+ */
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
+ stream->timing.h_total) + (1000000 - 1), 1000000);
} else {
v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
@@ -218,8 +226,8 @@ static void update_v_total_for_static_ramp(
unsigned int target_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
in_out_vrr->fixed.target_refresh_in_uhz);
- bool ramp_direction_is_up = (current_duration_in_us >
- target_duration_in_us) ? true : false;
+ bool ramp_direction_is_up = current_duration_in_us >
+ target_duration_in_us;
/* Calculate ratio between new and current frame duration with 3 digit */
unsigned int frame_duration_ratio = div64_u64(1000000,
@@ -552,43 +560,6 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
return false;
}
-bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
- const struct dc_stream_state *stream,
- unsigned int *vmin,
- unsigned int *vmax)
-{
- *vmin = stream->adjust.v_total_min;
- *vmax = stream->adjust.v_total_max;
-
- return true;
-}
-
-bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
- struct dc_stream_state *stream,
- unsigned int *nom_v_pos,
- unsigned int *v_pos)
-{
- struct core_freesync *core_freesync = NULL;
- struct crtc_position position;
-
- if (mod_freesync == NULL)
- return false;
-
- core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
-
- if (dc_stream_get_crtc_position(core_freesync->dc, &stream, 1,
- &position.vertical_count,
- &position.nominal_vcount)) {
-
- *nom_v_pos = position.nominal_vcount;
- *v_pos = position.vertical_count;
-
- return true;
- }
-
- return false;
-}
-
static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket,
bool freesync_on_desktop)
@@ -1289,27 +1260,16 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
update_v_total_for_static_ramp(
core_freesync, stream, in_out_vrr);
}
-}
-void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
- const struct mod_vrr_params *vrr,
- unsigned int *v_total_min, unsigned int *v_total_max,
- unsigned int *event_triggers,
- unsigned int *window_min, unsigned int *window_max,
- unsigned int *lfc_mid_point_in_us,
- unsigned int *inserted_frames,
- unsigned int *inserted_duration_in_us)
-{
- if (mod_freesync == NULL)
+ /*
+ * If VRR is inactive, set vtotal min and max to nominal vtotal
+ */
+ if (in_out_vrr->state == VRR_STATE_INACTIVE) {
+ in_out_vrr->adjust.v_total_min =
+ mod_freesync_calc_v_total_from_refresh(stream,
+ in_out_vrr->max_refresh_in_uhz);
+ in_out_vrr->adjust.v_total_max = in_out_vrr->adjust.v_total_min;
return;
-
- if (vrr->supported) {
- *v_total_min = vrr->adjust.v_total_min;
- *v_total_max = vrr->adjust.v_total_max;
- *event_triggers = 0;
- *lfc_mid_point_in_us = vrr->btr.mid_point_in_us;
- *inserted_frames = vrr->btr.frames_to_insert;
- *inserted_duration_in_us = vrr->btr.inserted_duration_in_us;
}
}
@@ -1328,85 +1288,7 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
return nominal_field_rate_in_uhz;
}
-unsigned long long mod_freesync_calc_field_rate_from_timing(
- unsigned int vtotal, unsigned int htotal, unsigned int pix_clk)
-{
- unsigned long long field_rate_in_uhz = 0;
- unsigned int total = htotal * vtotal;
-
- /* Calculate nominal field rate for stream, rounded up to nearest integer */
- field_rate_in_uhz = pix_clk;
- field_rate_in_uhz *= 1000000ULL;
-
- field_rate_in_uhz = div_u64(field_rate_in_uhz, total);
-
- return field_rate_in_uhz;
-}
-
bool mod_freesync_get_freesync_enabled(struct mod_vrr_params *pVrr)
{
return (pVrr->state != VRR_STATE_UNSUPPORTED) && (pVrr->state != VRR_STATE_DISABLED);
}
-
-bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
- uint32_t max_refresh_cap_in_uhz,
- uint32_t nominal_field_rate_in_uhz)
-{
-
- /* Typically nominal refresh calculated can have some fractional part.
- * Allow for some rounding error of actual video timing by taking floor
- * of caps and request. Round the nominal refresh rate.
- *
- * Dividing will convert everything to units in Hz although input
- * variable name is in uHz!
- *
- * Also note, this takes care of rounding error on the nominal refresh
- * so by rounding error we only expect it to be off by a small amount,
- * such as < 0.1 Hz. i.e. 143.9xxx or 144.1xxx.
- *
- * Example 1. Caps Min = 40 Hz, Max = 144 Hz
- * Request Min = 40 Hz, Max = 144 Hz
- * Nominal = 143.5x Hz rounded to 144 Hz
- * This function should allow this as valid request
- *
- * Example 2. Caps Min = 40 Hz, Max = 144 Hz
- * Request Min = 40 Hz, Max = 144 Hz
- * Nominal = 144.4x Hz rounded to 144 Hz
- * This function should allow this as valid request
- *
- * Example 3. Caps Min = 40 Hz, Max = 144 Hz
- * Request Min = 40 Hz, Max = 144 Hz
- * Nominal = 120.xx Hz rounded to 120 Hz
- * This function should return NOT valid since the requested
- * max is greater than current timing's nominal
- *
- * Example 4. Caps Min = 40 Hz, Max = 120 Hz
- * Request Min = 40 Hz, Max = 120 Hz
- * Nominal = 144.xx Hz rounded to 144 Hz
- * This function should return NOT valid since the nominal
- * is greater than the capability's max refresh
- */
- nominal_field_rate_in_uhz =
- div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
- min_refresh_cap_in_uhz /= 1000000;
- max_refresh_cap_in_uhz /= 1000000;
-
- /* Check nominal is within range */
- if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz ||
- nominal_field_rate_in_uhz < min_refresh_cap_in_uhz)
- return false;
-
- /* If nominal is less than max, limit the max allowed refresh rate */
- if (nominal_field_rate_in_uhz < max_refresh_cap_in_uhz)
- max_refresh_cap_in_uhz = nominal_field_rate_in_uhz;
-
- /* Check min is within range */
- if (min_refresh_cap_in_uhz > max_refresh_cap_in_uhz)
- return false;
-
- /* For variable range, check for at least 10 Hz range */
- if (nominal_field_rate_in_uhz - min_refresh_cap_in_uhz < 10)
- return false;
-
- return true;
-}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 5e01c6e24cbc..ca402ddcdacc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -29,6 +29,7 @@ static void push_error_status(struct mod_hdcp *hdcp,
enum mod_hdcp_status status)
{
struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+ const uint8_t retry_limit = hdcp->connection.link.adjust.retry_limit;
if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
trace->errors[trace->error_count].status = status;
@@ -39,11 +40,11 @@ static void push_error_status(struct mod_hdcp *hdcp,
if (is_hdcp1(hdcp)) {
hdcp->connection.hdcp1_retry_count++;
- if (hdcp->connection.hdcp1_retry_count == MAX_NUM_OF_ATTEMPTS)
+ if (hdcp->connection.hdcp1_retry_count == retry_limit)
hdcp->connection.link.adjust.hdcp1.disable = 1;
} else if (is_hdcp2(hdcp)) {
hdcp->connection.hdcp2_retry_count++;
- if (hdcp->connection.hdcp2_retry_count == MAX_NUM_OF_ATTEMPTS)
+ if (hdcp->connection.hdcp2_retry_count == retry_limit)
hdcp->connection.link.adjust.hdcp2.disable = 1;
}
}
@@ -353,7 +354,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* reset retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* add display to connection */
@@ -399,7 +400,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
/* clear retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* remove display */
@@ -463,7 +464,7 @@ enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
/* clear retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* set new adjustment */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 55c7d873175f..26a351a184f3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -88,6 +88,7 @@ struct mod_hdcp_transition_input_hdcp2 {
uint8_t lc_init_write;
uint8_t l_prime_available_poll;
uint8_t l_prime_read;
+ uint8_t l_prime_combo_read;
uint8_t l_prime_validation;
uint8_t eks_prepare;
uint8_t eks_write;
@@ -386,6 +387,7 @@ enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
@@ -507,7 +509,7 @@ static inline void set_auth_complete(struct mod_hdcp *hdcp,
struct mod_hdcp_output *output)
{
output->auth_complete = 1;
- mod_hdcp_log_ddc_trace(hdcp);
+ HDCP_AUTH_COMPLETE_TRACE(hdcp);
}
/* connection topology helpers */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 8bc377560787..1bbd728d4345 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -29,6 +29,7 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
uint64_t n = 0;
uint8_t count = 0;
+ enum mod_hdcp_status status;
u8 bksv[sizeof(n)] = { };
memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
@@ -38,8 +39,14 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
count++;
n &= (n - 1);
}
- return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
- MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+
+ if (count == 20) {
+ hdcp->connection.trace.hdcp1.attempt_count++;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+ }
+ return status;
}
static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
@@ -135,6 +142,8 @@ static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
+ hdcp->connection.trace.hdcp1.downstream_device_count = get_device_count(hdcp);
+
/* Some MST display may choose to report the internal panel as an HDCP RX.
* To update this condition with 1(because the immediate repeater's internal
* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp).
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 1d41dd58f6bc..27500abf9fee 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -48,6 +48,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
if (is_dp_hdcp(hdcp))
status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) &&
@@ -55,9 +56,14 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
else
- status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ?
- MOD_HDCP_STATUS_SUCCESS :
- MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+ status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi
+ & HDCP_2_2_HDMI_SUPPORT_MASK)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ trace->hdcp2.attempt_count++;
+
return status;
}
@@ -201,10 +207,17 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
/* Avoid device count == 0 to do authentication */
if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
+ trace->hdcp2.downstream_device_count = get_device_count(hdcp);
+ trace->hdcp2.hdcp1_device_downstream =
+ HDCP_2_2_HDCP1_DEVICE_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]);
+ trace->hdcp2.hdcp2_legacy_device_downstream =
+ HDCP_2_2_HDCP_2_0_REP_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]);
/* Some MST display may choose to report the internal panel as an HDCP RX. */
/* To update this condition with 1(because the immediate repeater's internal */
/* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */
@@ -467,21 +480,30 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
&input->lc_init_prepare, &status,
hdcp, "lc_init_prepare"))
goto out;
- if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
- &input->lc_init_write, &status,
- hdcp, "lc_init_write"))
- goto out;
- if (is_dp_hdcp(hdcp))
- msleep(16);
- else
- if (!mod_hdcp_execute_and_set(poll_l_prime_available,
- &input->l_prime_available_poll, &status,
- hdcp, "l_prime_available_poll"))
+
+ if (hdcp->connection.link.adjust.hdcp2.use_fw_locality_check) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
+ &input->l_prime_combo_read, &status,
+ hdcp, "l_prime_combo_read"))
goto out;
- if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
- &input->l_prime_read, &status,
- hdcp, "l_prime_read"))
- goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
+ &input->lc_init_write, &status,
+ hdcp, "lc_init_write"))
+ goto out;
+ if (is_dp_hdcp(hdcp))
+ msleep(16);
+ else
+ if (!mod_hdcp_execute_and_set(poll_l_prime_available,
+ &input->l_prime_available_poll, &status,
+ hdcp, "l_prime_available_poll"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+ }
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
&input->l_prime_validation, &status,
hdcp, "l_prime_validation"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index c5f6c11de7e5..9316312a4df5 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -185,19 +185,32 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
case H2_A2_LOCALITY_CHECK:
+ /* 1A-05: consider disconnection after LC init a failure */
if (hdcp->state.stay_count > 10 ||
- input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_available_poll != PASS ||
- input->l_prime_read != PASS) {
- /*
- * 1A-05: consider disconnection after LC init a failure
- * 1A-13-1: consider invalid l' a failure
- * 1A-13-2: consider l' timeout a failure
- */
+ input->lc_init_prepare != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (adjust->hdcp2.use_fw_locality_check &&
+ input->l_prime_combo_read != PASS) {
+ /* 1A-13-2: consider l' timeout a failure */
+ if (adjust->hdcp2.use_sw_locality_fallback) {
+ /* switch to software locality check */
+ adjust->hdcp2.use_fw_locality_check = 0;
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!adjust->hdcp2.use_fw_locality_check &&
+ (input->lc_init_write != PASS ||
+ input->l_prime_available_poll != PASS ||
+ input->l_prime_read != PASS)) {
+ /* 1A-13-2: consider l' timeout a failure */
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
+ /* 1A-13-1: consider invalid l' a failure */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
break;
@@ -500,13 +513,27 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
break;
case D2_A2_LOCALITY_CHECK:
if (hdcp->state.stay_count > 10 ||
- input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_read != PASS) {
- /* 1A-12: consider invalid l' a failure */
+ input->lc_init_prepare != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (adjust->hdcp2.use_fw_locality_check &&
+ input->l_prime_combo_read != PASS) {
+ if (adjust->hdcp2.use_sw_locality_fallback) {
+ /* switch to software locality check */
+ adjust->hdcp2.use_fw_locality_check = 0;
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!adjust->hdcp2.use_fw_locality_check &&
+ (input->lc_init_write != PASS ||
+ input->l_prime_read != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
+ /* 1A-12: consider invalid l' a failure */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 6e064e6ae949..0ca39873f807 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -688,3 +688,76 @@ enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_INVALID_OPERATION;
}
+
+static bool write_stall_read_lc_fw_aux(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+
+ struct mod_hdcp_atomic_op_aux write = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT],
+ hdcp2->lc_init + 1,
+ sizeof(hdcp2->lc_init) - 1,
+ };
+ struct mod_hdcp_atomic_op_aux stall = { 0, NULL, 0, };
+ struct mod_hdcp_atomic_op_aux read = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime + 1,
+ sizeof(hdcp2->lc_l_prime) - 1,
+ };
+
+ hdcp2->lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_aux(
+ hdcp->config.ddc.handle,
+ &write,
+ &stall,
+ &read,
+ 16 * 1000,
+ 0
+ );
+}
+
+static bool write_poll_read_lc_fw_i2c(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+ uint8_t expected_rxstatus[2] = { sizeof(hdcp2->lc_l_prime) };
+
+ hdcp->buf[0] = hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT];
+ memmove(&hdcp->buf[1], hdcp2->lc_init, sizeof(hdcp2->lc_init));
+
+ struct mod_hdcp_atomic_op_i2c write = {
+ HDCP_I2C_ADDR,
+ 0,
+ hdcp->buf,
+ sizeof(hdcp2->lc_init) + 1,
+ };
+ struct mod_hdcp_atomic_op_i2c poll = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS],
+ expected_rxstatus,
+ sizeof(expected_rxstatus),
+ };
+ struct mod_hdcp_atomic_op_i2c read = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime,
+ sizeof(hdcp2->lc_l_prime),
+ };
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_i2c(
+ hdcp->config.ddc.handle,
+ &write,
+ &poll,
+ &read,
+ 20 * 1000,
+ 6
+ );
+}
+
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp)
+{
+ const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp);
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 6b3b5f610907..5cb979c2cf8c 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -125,129 +125,11 @@ void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp)
}
}
+#define CASE_FORMAT(entry) case entry: return #entry;
char *mod_hdcp_status_to_str(int32_t status)
{
switch (status) {
- case MOD_HDCP_STATUS_SUCCESS:
- return "MOD_HDCP_STATUS_SUCCESS";
- case MOD_HDCP_STATUS_FAILURE:
- return "MOD_HDCP_STATUS_FAILURE";
- case MOD_HDCP_STATUS_RESET_NEEDED:
- return "MOD_HDCP_STATUS_RESET_NEEDED";
- case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
- return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
- case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
- return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
- case MOD_HDCP_STATUS_INVALID_STATE:
- return "MOD_HDCP_STATUS_INVALID_STATE";
- case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
- return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
- case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
- return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
- case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
- return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
- case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
- return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
- case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
- return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
- return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
- case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
- return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
- case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
- return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
- return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
- return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
- return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
- case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
- return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
- case MOD_HDCP_STATUS_DDC_FAILURE:
- return "MOD_HDCP_STATUS_DDC_FAILURE";
- case MOD_HDCP_STATUS_INVALID_OPERATION:
- return "MOD_HDCP_STATUS_INVALID_OPERATION";
- case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE:
- return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE";
- case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING";
- case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED:
- return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED:
- return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
- return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST:
- return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST";
- case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";
- case MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE:
- return "MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE";
+ MOD_HDCP_STATUS_LIST(CASE_FORMAT)
default:
return "MOD_HDCP_STATUS_UNKNOWN";
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index 1d83c1b9da10..26553aa4c5ca 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -31,6 +31,7 @@
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#define HDCP_LOG_TRA(hdcp) do {} while (0)
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
@@ -131,4 +132,9 @@
HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
} while (0)
+#define HDCP_AUTH_COMPLETE_TRACE(hdcp) do { \
+ mod_hdcp_log_ddc_trace(hdcp); \
+ HDCP_LOG_TRA(hdcp); \
+} while (0)
+
#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 8c137d7c032e..6b7db8ec9a53 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_FAILURE;
}
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
mutex_lock(&psp->hdcp_context.mutex);
@@ -368,6 +371,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index cc3dc9b589f6..57916ed98c86 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -110,25 +110,6 @@ struct mod_vrr_params {
struct mod_freesync *mod_freesync_create(struct dc *dc);
void mod_freesync_destroy(struct mod_freesync *mod_freesync);
-bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
- const struct dc_stream_state *stream,
- unsigned int *vmin,
- unsigned int *vmax);
-
-bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
- struct dc_stream_state *stream,
- unsigned int *nom_v_pos,
- unsigned int *v_pos);
-
-void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
- const struct mod_vrr_params *vrr,
- unsigned int *v_total_min, unsigned int *v_total_max,
- unsigned int *event_triggers,
- unsigned int *window_min, unsigned int *window_max,
- unsigned int *lfc_mid_point_in_us,
- unsigned int *inserted_frames,
- unsigned int *inserted_duration_in_us);
-
void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
@@ -155,13 +136,6 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream);
-unsigned long long mod_freesync_calc_field_rate_from_timing(
- unsigned int vtotal, unsigned int htotal, unsigned int pix_clk);
-
-bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
- uint32_t max_refresh_cap_in_uhz,
- uint32_t nominal_field_rate_in_uhz);
-
unsigned int mod_freesync_calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index a4d344a4db9e..835467225458 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -35,69 +35,74 @@ struct mod_hdcp;
#define MAX_NUM_OF_DISPLAYS 6
#define MAX_NUM_OF_ATTEMPTS 4
#define MAX_NUM_OF_ERROR_TRACE 10
+#define MOD_HDCP_STATUS_LIST(FORMAT) \
+ FORMAT(MOD_HDCP_STATUS_SUCCESS) \
+ FORMAT(MOD_HDCP_STATUS_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_RESET_NEEDED) \
+ FORMAT(MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND) \
+ FORMAT(MOD_HDCP_STATUS_DISPLAY_NOT_FOUND) \
+ FORMAT(MOD_HDCP_STATUS_INVALID_STATE) \
+ FORMAT(MOD_HDCP_STATUS_NOT_IMPLEMENTED) \
+ FORMAT(MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_INVALID_BKSV) \
+ FORMAT(MOD_HDCP_STATUS_DDC_FAILURE) /* TODO: specific errors */ \
+ FORMAT(MOD_HDCP_STATUS_INVALID_OPERATION) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE)
+
+#define ENUM_FORMAT(entry) entry,
/* detailed return status */
enum mod_hdcp_status {
- MOD_HDCP_STATUS_SUCCESS = 0,
- MOD_HDCP_STATUS_FAILURE,
- MOD_HDCP_STATUS_RESET_NEEDED,
- MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND,
- MOD_HDCP_STATUS_DISPLAY_NOT_FOUND,
- MOD_HDCP_STATUS_INVALID_STATE,
- MOD_HDCP_STATUS_NOT_IMPLEMENTED,
- MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE,
- MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE,
- MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE,
- MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE,
- MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER,
- MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
- MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
- MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED,
- MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
- MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
- MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
- MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE,
- MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE,
- MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED,
- MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE,
- MOD_HDCP_STATUS_HDCP1_INVALID_BKSV,
- MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */
- MOD_HDCP_STATUS_INVALID_OPERATION,
- MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE,
- MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING,
- MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE,
- MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
- MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
- MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
- MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
- MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE,
+ MOD_HDCP_STATUS_LIST(ENUM_FORMAT)
};
struct mod_hdcp_displayport {
@@ -133,9 +138,22 @@ enum mod_hdcp_display_disable_option {
MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION,
};
+struct mod_hdcp_atomic_op_i2c {
+ uint8_t address;
+ uint8_t offset;
+ uint8_t *data;
+ uint32_t size;
+};
+
+struct mod_hdcp_atomic_op_aux {
+ uint32_t address;
+ uint8_t *data;
+ uint32_t size;
+};
+
struct mod_hdcp_ddc {
void *handle;
- struct {
+ struct mod_hdcp_ddc_funcs {
bool (*read_i2c)(void *handle,
uint32_t address,
uint8_t offset,
@@ -153,6 +171,22 @@ struct mod_hdcp_ddc {
uint32_t address,
const uint8_t *data,
uint32_t size);
+ bool (*atomic_write_poll_read_i2c)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
+ bool (*atomic_write_poll_read_aux)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
} funcs;
};
@@ -185,11 +219,14 @@ struct mod_hdcp_link_adjustment_hdcp2 {
uint8_t force_type : 2;
uint8_t force_no_stored_km : 1;
uint8_t increase_h_prime_timeout: 1;
- uint8_t reserved : 3;
+ uint8_t use_fw_locality_check : 1;
+ uint8_t use_sw_locality_fallback: 1;
+ uint8_t reserved : 1;
};
struct mod_hdcp_link_adjustment {
uint8_t auth_delay;
+ uint8_t retry_limit;
struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
};
@@ -199,9 +236,23 @@ struct mod_hdcp_error {
uint8_t state_id;
};
+struct mod_hdcp1_trace {
+ uint8_t attempt_count;
+ uint8_t downstream_device_count;
+};
+
+struct mod_hdcp2_trace {
+ uint8_t attempt_count;
+ uint8_t downstream_device_count;
+ uint8_t hdcp1_device_downstream;
+ uint8_t hdcp2_legacy_device_downstream;
+};
+
struct mod_hdcp_trace {
struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
uint8_t error_count;
+ struct mod_hdcp1_trace hdcp1;
+ struct mod_hdcp2_trace hdcp2;
};
enum mod_hdcp_encryption_status {
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index a344e2e49b0e..b3d55cac3569 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -383,10 +383,10 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
colorimetryFormat = ColorimetryYCC_DP_ITU709;
else if (cs == COLOR_SPACE_ADOBERGB)
colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
- else if (cs == COLOR_SPACE_2020_YCBCR)
+ else if (cs == COLOR_SPACE_2020_YCBCR_LIMITED)
colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
- if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22)
+ if (cs == COLOR_SPACE_2020_YCBCR_LIMITED && tf == TRANSFER_FUNC_GAMMA_22)
colorimetryFormat = ColorimetryYCC_DP_ITU709;
break;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 95838c7ab054..fd139b219bf9 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -975,6 +975,34 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
return true;
}
+void set_replay_frame_skip_number(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t coasting_vtotal_refresh_rate_mhz,
+ uint32_t flicker_free_refresh_rate_mhz,
+ bool is_defer)
+{
+ uint32_t *frame_skip_number_array = NULL;
+ uint32_t frame_skip_number = 0;
+
+ if (link == NULL || flicker_free_refresh_rate_mhz == 0 || coasting_vtotal_refresh_rate_mhz == 0)
+ return;
+
+ if (is_defer)
+ frame_skip_number_array = link->replay_settings.defer_frame_skip_number_table;
+ else
+ frame_skip_number_array = link->replay_settings.frame_skip_number_table;
+
+ if (frame_skip_number_array == NULL)
+ return;
+
+ frame_skip_number = coasting_vtotal_refresh_rate_mhz / flicker_free_refresh_rate_mhz;
+
+ if (frame_skip_number >= 1)
+ frame_skip_number_array[type] = frame_skip_number - 1;
+ else
+ frame_skip_number_array[type] = 0;
+}
+
void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint32_t vtotal)
@@ -987,6 +1015,8 @@ void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
{
link->replay_settings.coasting_vtotal_table[type] =
link->replay_settings.defer_update_coasting_vtotal_table[type];
+ link->replay_settings.frame_skip_number_table[type] =
+ link->replay_settings.defer_frame_skip_number_table[type];
}
void set_replay_coasting_vtotal(struct dc_link *link,
@@ -996,9 +1026,9 @@ void set_replay_coasting_vtotal(struct dc_link *link,
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
-void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
{
- link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+ link->replay_settings.low_rr_full_screen_video_pseudo_vtotal = vtotal;
}
void calculate_replay_link_off_frame_count(struct dc_link *link,
@@ -1007,6 +1037,9 @@ void calculate_replay_link_off_frame_count(struct dc_link *link,
uint8_t max_link_off_frame_count = 0;
uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0;
+ if (!link || link->replay_settings.config.replay_version != DC_FREESYNC_REPLAY)
+ return;
+
max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
@@ -1039,3 +1072,8 @@ bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_back
memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size);
return true;
}
+
+void reset_replay_dsync_error_count(struct dc_link *link)
+{
+ link->replay_settings.replay_desync_error_fail_count = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index cac302e8fa10..87d31d9dce5a 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -60,9 +60,14 @@ void set_replay_coasting_vtotal(struct dc_link *link,
void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint32_t vtotal);
+void set_replay_frame_skip_number(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t coasting_vtotal_refresh_rate_Mhz,
+ uint32_t flicker_free_refresh_rate_Mhz,
+ bool is_defer);
void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
enum replay_coasting_vtotal_type type);
-void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
+void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
@@ -78,4 +83,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
bool fill_custom_backlight_caps(unsigned int config_no,
struct dm_acpi_atif_backlight_caps *caps);
+void reset_replay_dsync_error_count(struct dc_link *link);
+void change_replay_to_psr(struct dc_link *link);
+void change_psr_to_replay(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 2d089d30518f..06badbf0c5b9 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -61,7 +61,7 @@ struct atif_qbtc_arguments {
struct atif_qbtc_data_point {
u8 luminance; /* luminance in percent */
- u8 ipnut_signal; /* input signal in range 0-255 */
+ u8 input_signal; /* input signal in range 0-255 */
} __packed;
struct atif_qbtc_output {
@@ -75,6 +75,8 @@ struct atif_qbtc_output {
u8 number_of_points; /* number of data points */
struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS];
} __packed;
+static_assert(ATIF_QBTC_MAX_DATA_POINTS == MAX_LUMINANCE_DATA_POINTS);
+static_assert(sizeof(struct atif_qbtc_data_point) == sizeof(struct amdgpu_dm_luminance_data));
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h
new file mode 100644
index 000000000000..a252ee4c7874
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/amd_cper.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMD_CPER_H__
+#define __AMD_CPER_H__
+
+#include <linux/uuid.h>
+
+#define CPER_HDR_REV_1 (0x100)
+#define CPER_SEC_MINOR_REV_1 (0x01)
+#define CPER_SEC_MAJOR_REV_22 (0x22)
+#define CPER_MAX_OAM_COUNT (8)
+
+#define CPER_CTX_TYPE_CRASH (1)
+#define CPER_CTX_TYPE_BOOT (9)
+
+#define CPER_CREATOR_ID_AMDGPU "amdgpu"
+
+#define CPER_NOTIFY_MCE \
+ GUID_INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
+#define CPER_NOTIFY_CMC \
+ GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
+#define BOOT_TYPE \
+ GUID_INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
+
+#define AMD_CRASHDUMP \
+ GUID_INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \
+ 0x72, 0x5F, 0xD6, 0xAE)
+#define AMD_GPU_NONSTANDARD_ERROR \
+ GUID_INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \
+ 0x17, 0x80, 0x55, 0x1D)
+#define PROC_ERR_SECTION_TYPE \
+ GUID_INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
+
+enum cper_error_severity {
+ CPER_SEV_NON_FATAL_UNCORRECTED = 0,
+ CPER_SEV_FATAL = 1,
+ CPER_SEV_NON_FATAL_CORRECTED = 2,
+ CPER_SEV_NUM = 3,
+
+ CPER_SEV_UNUSED = 10,
+};
+
+enum cper_aca_reg {
+ CPER_ACA_REG_CTL_LO = 0,
+ CPER_ACA_REG_CTL_HI = 1,
+ CPER_ACA_REG_STATUS_LO = 2,
+ CPER_ACA_REG_STATUS_HI = 3,
+ CPER_ACA_REG_ADDR_LO = 4,
+ CPER_ACA_REG_ADDR_HI = 5,
+ CPER_ACA_REG_MISC0_LO = 6,
+ CPER_ACA_REG_MISC0_HI = 7,
+ CPER_ACA_REG_CONFIG_LO = 8,
+ CPER_ACA_REG_CONFIG_HI = 9,
+ CPER_ACA_REG_IPID_LO = 10,
+ CPER_ACA_REG_IPID_HI = 11,
+ CPER_ACA_REG_SYND_LO = 12,
+ CPER_ACA_REG_SYND_HI = 13,
+
+ CPER_ACA_REG_COUNT = 32,
+};
+
+#pragma pack(push, 1)
+
+struct cper_timestamp {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t hours;
+ uint8_t flag;
+ uint8_t day;
+ uint8_t month;
+ uint8_t year;
+ uint8_t century;
+};
+
+struct cper_hdr {
+ char signature[4]; /* "CPER" */
+ uint16_t revision;
+ uint32_t signature_end; /* 0xFFFFFFFF */
+ uint16_t sec_cnt;
+ enum cper_error_severity error_severity;
+ union {
+ struct {
+ uint32_t platform_id : 1;
+ uint32_t timestamp : 1;
+ uint32_t partition_id : 1;
+ uint32_t reserved : 29;
+ } valid_bits;
+ uint32_t valid_mask;
+ };
+ uint32_t record_length; /* Total size of CPER Entry */
+ struct cper_timestamp timestamp;
+ char platform_id[16];
+ guid_t partition_id; /* Reserved */
+ char creator_id[16];
+ guid_t notify_type; /* CMC, MCE */
+ char record_id[8]; /* Unique CPER Entry ID */
+ uint32_t flags; /* Reserved */
+ uint64_t persistence_info; /* Reserved */
+ uint8_t reserved[12]; /* Reserved */
+};
+
+struct cper_sec_desc {
+ uint32_t sec_offset; /* Offset from the start of CPER entry */
+ uint32_t sec_length;
+ uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */
+ uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */
+ union {
+ struct {
+ uint8_t fru_id : 1;
+ uint8_t fru_text : 1;
+ uint8_t reserved : 6;
+ } valid_bits;
+ uint8_t valid_mask;
+ };
+ uint8_t reserved;
+ union {
+ struct {
+ uint32_t primary : 1;
+ uint32_t reserved1 : 2;
+ uint32_t exceed_err_threshold : 1;
+ uint32_t latent_err : 1;
+ uint32_t reserved2 : 27;
+ } flag_bits;
+ uint32_t flag_mask;
+ };
+ guid_t sec_type;
+ char fru_id[16];
+ enum cper_error_severity severity;
+ char fru_text[20];
+};
+
+struct cper_sec_nonstd_err_hdr {
+ union {
+ struct {
+ uint64_t apic_id : 1;
+ uint64_t fw_id : 1;
+ uint64_t err_info_cnt : 6;
+ uint64_t err_context_cnt : 6;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ uint64_t apic_id;
+ char fw_id[48];
+};
+
+struct cper_sec_nonstd_err_info {
+ guid_t error_type;
+ union {
+ struct {
+ uint64_t ms_chk : 1;
+ uint64_t target_addr_id : 1;
+ uint64_t req_id : 1;
+ uint64_t resp_id : 1;
+ uint64_t instr_ptr : 1;
+ uint64_t reserved : 59;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ union {
+ struct {
+ uint64_t err_type_valid : 1;
+ uint64_t pcc_valid : 1;
+ uint64_t uncorr_valid : 1;
+ uint64_t precise_ip_valid : 1;
+ uint64_t restartable_ip_valid : 1;
+ uint64_t overflow_valid : 1;
+ uint64_t reserved1 : 10;
+ uint64_t err_type : 2;
+ uint64_t pcc : 1;
+ uint64_t uncorr : 1;
+ uint64_t precised_ip : 1;
+ uint64_t restartable_ip : 1;
+ uint64_t overflow : 1;
+ uint64_t reserved2 : 41;
+ } ms_chk_bits;
+ uint64_t ms_chk_mask;
+ };
+ uint64_t target_addr_id;
+ uint64_t req_id;
+ uint64_t resp_id;
+ uint64_t instr_ptr;
+};
+
+struct cper_sec_nonstd_err_ctx {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t msr_addr;
+ uint64_t mm_reg_addr;
+ uint32_t reg_dump[CPER_ACA_REG_COUNT];
+};
+
+struct cper_sec_nonstd_err {
+ struct cper_sec_nonstd_err_hdr hdr;
+ struct cper_sec_nonstd_err_info info;
+ struct cper_sec_nonstd_err_ctx ctx;
+};
+
+struct cper_sec_crashdump_hdr {
+ uint64_t reserved1;
+ uint64_t reserved2;
+ char fw_id[48];
+ uint64_t reserved3[8];
+};
+
+struct cper_sec_crashdump_reg_data {
+ uint32_t status_lo;
+ uint32_t status_hi;
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t ipid_lo;
+ uint32_t ipid_hi;
+ uint32_t synd_lo;
+ uint32_t synd_hi;
+};
+
+struct cper_sec_crashdump_body_fatal {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ struct cper_sec_crashdump_reg_data data;
+};
+
+struct cper_sec_crashdump_body_boot {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ uint64_t msg[CPER_MAX_OAM_COUNT];
+};
+
+struct cper_sec_crashdump_fatal {
+ struct cper_sec_crashdump_hdr hdr;
+ struct cper_sec_crashdump_body_fatal body;
+};
+
+struct cper_sec_crashdump_boot {
+ struct cper_sec_crashdump_hdr hdr;
+ struct cper_sec_crashdump_body_boot body;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index a1ece3eecdf5..a08611cb8041 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -49,6 +49,17 @@
| CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
+
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1 0x00000001
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 0x00000002
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 0x00000004
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 0x00000008
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 0x00000010
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 0x00000020
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 0x00000040
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_MASK 0x0000FFFF
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_SHIFT 0
+
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 0x00040000
@@ -56,6 +67,7 @@
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 0x00100000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 0x00200000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
+#define CAIL_PCIE_LINK_WIDTH_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
/* 1/2/4/8/16 lanes */
@@ -65,4 +77,10 @@
| CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
| CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+#define AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16)
+
#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 98d9e840b0e2..17945094a138 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -109,6 +109,7 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_VPE,
AMD_IP_BLOCK_TYPE_UMSCH_MM,
AMD_IP_BLOCK_TYPE_ISP,
+ AMD_IP_BLOCK_TYPE_RAS,
AMD_IP_BLOCK_TYPE_NUM,
};
@@ -239,18 +240,51 @@ enum amd_harvest_ip_mask {
AMD_HARVEST_IP_DMU_MASK = 0x4,
};
+/**
+ * enum DC_FEATURE_MASK - Bits that control DC feature defaults
+ */
enum DC_FEATURE_MASK {
//Default value can be found at "uint amdgpu_dc_feature_mask"
- DC_FBC_MASK = (1 << 0), //0x1, disabled by default
- DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
- DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
- DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
- DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
- DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default
- DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
- DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
- DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
- DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
+ /**
+ * @DC_FBC_MASK: (0x1) disabled by default
+ */
+ DC_FBC_MASK = (1 << 0),
+ /**
+ * @DC_MULTI_MON_PP_MCLK_SWITCH_MASK: (0x2) enabled by default
+ */
+ DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1),
+ /**
+ * @DC_DISABLE_FRACTIONAL_PWM_MASK: (0x4) disabled by default
+ */
+ DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2),
+ /**
+ * @DC_PSR_MASK: (0x8) disabled by default for DCN < 3.1
+ */
+ DC_PSR_MASK = (1 << 3),
+ /**
+ * @DC_EDP_NO_POWER_SEQUENCING: (0x10) disabled by default
+ */
+ DC_EDP_NO_POWER_SEQUENCING = (1 << 4),
+ /**
+ * @DC_DISABLE_LTTPR_DP1_4A: (0x20) disabled by default
+ */
+ DC_DISABLE_LTTPR_DP1_4A = (1 << 5),
+ /**
+ * @DC_DISABLE_LTTPR_DP2_0: (0x40) disabled by default
+ */
+ DC_DISABLE_LTTPR_DP2_0 = (1 << 6),
+ /**
+ * @DC_PSR_ALLOW_SMU_OPT: (0x80) disabled by default
+ */
+ DC_PSR_ALLOW_SMU_OPT = (1 << 7),
+ /**
+ * @DC_PSR_ALLOW_MULTI_DISP_OPT: (0x100) disabled by default
+ */
+ DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8),
+ /**
+ * @DC_REPLAY_MASK: (0x200) disabled by default for DCN < 3.1.4
+ */
+ DC_REPLAY_MASK = (1 << 9),
};
/**
@@ -258,64 +292,64 @@ enum DC_FEATURE_MASK {
*/
enum DC_DEBUG_MASK {
/**
- * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting
+ * @DC_DISABLE_PIPE_SPLIT: (0x1) If set, disable pipe-splitting
*/
DC_DISABLE_PIPE_SPLIT = 0x1,
/**
- * @DC_DISABLE_STUTTER: If set, disable memory stutter mode
+ * @DC_DISABLE_STUTTER: (0x2) If set, disable memory stutter mode
*/
DC_DISABLE_STUTTER = 0x2,
/**
- * @DC_DISABLE_DSC: If set, disable display stream compression
+ * @DC_DISABLE_DSC: (0x4) If set, disable display stream compression
*/
DC_DISABLE_DSC = 0x4,
/**
- * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+ * @DC_DISABLE_CLOCK_GATING: (0x8) If set, disable clock gating optimizations
*/
DC_DISABLE_CLOCK_GATING = 0x8,
/**
- * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU
+ * @DC_DISABLE_PSR: (0x10) If set, disable Panel self refresh v1 and PSR-SU
*/
DC_DISABLE_PSR = 0x10,
/**
- * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+ * @DC_FORCE_SUBVP_MCLK_SWITCH: (0x20) If set, force mclk switch in subvp, even
* if mclk switch in vblank is possible
*/
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
/**
- * @DC_DISABLE_MPO: If set, disable multi-plane offloading
+ * @DC_DISABLE_MPO: (0x40) If set, disable multi-plane offloading
*/
DC_DISABLE_MPO = 0x40,
/**
- * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA
+ * @DC_ENABLE_DPIA_TRACE: (0x80) If set, enable trace logging for DPIA
*/
DC_ENABLE_DPIA_TRACE = 0x80,
/**
- * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+ * @DC_ENABLE_DML2: (0x100) If set, force usage of DML2, even if the DCN version
* does not default to it.
*/
DC_ENABLE_DML2 = 0x100,
/**
- * @DC_DISABLE_PSR_SU: If set, disable PSR SU
+ * @DC_DISABLE_PSR_SU: (0x200) If set, disable PSR SU
*/
DC_DISABLE_PSR_SU = 0x200,
/**
- * @DC_DISABLE_REPLAY: If set, disable Panel Replay
+ * @DC_DISABLE_REPLAY: (0x400) If set, disable Panel Replay
*/
DC_DISABLE_REPLAY = 0x400,
/**
- * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+ * @DC_DISABLE_IPS: (0x800) If set, disable all Idle Power States, all the time.
* If more than one IPS debug bit is set, the lowest bit takes
* precedence. For example, if DC_FORCE_IPS_ENABLE and
* DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes
@@ -324,26 +358,59 @@ enum DC_DEBUG_MASK {
DC_DISABLE_IPS = 0x800,
/**
- * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time,
+ * @DC_DISABLE_IPS_DYNAMIC: (0x1000) If set, disable all IPS, all the time,
* *except* when driver goes into suspend.
*/
DC_DISABLE_IPS_DYNAMIC = 0x1000,
/**
- * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if
+ * @DC_DISABLE_IPS2_DYNAMIC: (0x2000) If set, disable IPS2 (IPS1 allowed) if
* there is an enabled display. Otherwise, enable all IPS.
*/
DC_DISABLE_IPS2_DYNAMIC = 0x2000,
/**
- * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
+ * @DC_FORCE_IPS_ENABLE: (0x4000) If set, force enable all IPS, all the time.
*/
DC_FORCE_IPS_ENABLE = 0x4000,
/**
- * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for
+ * @DC_DISABLE_ACPI_EDID: (0x8000) If set, don't attempt to fetch EDID for
* eDP display from ACPI _DDC method.
*/
DC_DISABLE_ACPI_EDID = 0x8000,
+
+ /**
+ * @DC_DISABLE_HDMI_CEC: (0x10000) If set, disable HDMI-CEC feature in amdgpu driver.
+ */
+ DC_DISABLE_HDMI_CEC = 0x10000,
+
+ /**
+ * @DC_DISABLE_SUBVP_FAMS: (0x20000) If set, disable DCN Sub-Viewport & Firmware Assisted
+ * Memory Clock Switching (FAMS) feature in amdgpu driver.
+ */
+ DC_DISABLE_SUBVP_FAMS = 0x20000,
+ /**
+ * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: (0x40000) If set, disable support for custom
+ * brightness curves
+ */
+ DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000,
+
+ /**
+ * @DC_HDCP_LC_FORCE_FW_ENABLE: (0x80000) If set, use HDCP Locality Check FW
+ * path regardless of reported HW capabilities.
+ */
+ DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
+
+ /**
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK: (0x100000) If set, upon HDCP Locality Check FW
+ * path failure, retry using legacy SW path.
+ */
+ DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
+
+ /**
+ * @DC_SKIP_DETECTION_LT: (0x200000) If set, skip detection link training
+ */
+ DC_SKIP_DETECTION_LT = 0x200000,
};
enum amd_dpm_forced_level;
@@ -364,6 +431,7 @@ enum amd_dpm_forced_level;
* (such as allocating any required memory)
* @suspend: handles IP specific hw/sw changes for suspend
* @resume: handles IP specific hw/sw changes for resume
+ * @complete: handles IP specific changes after resume
* @is_idle: returns current IP block idle status
* @wait_for_idle: poll for idle
* @check_soft_reset: check soft reset the IP block
@@ -395,7 +463,8 @@ struct amd_ip_funcs {
int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
int (*suspend)(struct amdgpu_ip_block *ip_block);
int (*resume)(struct amdgpu_ip_block *ip_block);
- bool (*is_idle)(void *handle);
+ void (*complete)(struct amdgpu_ip_block *ip_block);
+ bool (*is_idle)(struct amdgpu_ip_block *ip_block);
int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
@@ -405,7 +474,7 @@ struct amd_ip_funcs {
enum amd_clockgating_state state);
int (*set_powergating_state)(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
- void (*get_clockgating_state)(void *handle, u64 *flags);
+ void (*get_clockgating_state)(struct amdgpu_ip_block *ip_block, u64 *flags);
void (*dump_ip_state)(struct amdgpu_ip_block *ip_block);
void (*print_ip_state)(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
index 9de01ae574c0..067eddd9c62d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
@@ -4115,6 +4115,7 @@
#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55
#define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40
#define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL0_SCL_SCALER_ENABLE 0x1B42
#define mmSCL0_SCL_CONTROL 0x1B44
#define mmSCL0_SCL_DEBUG 0x1B6A
#define mmSCL0_SCL_DEBUG2 0x1B69
@@ -4144,6 +4145,7 @@
#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55
#define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40
#define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41
+#define mmSCL1_SCL_SCALER_ENABLE 0x1E42
#define mmSCL1_SCL_CONTROL 0x1E44
#define mmSCL1_SCL_DEBUG 0x1E6A
#define mmSCL1_SCL_DEBUG2 0x1E69
@@ -4173,6 +4175,7 @@
#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
#define mmSCL2_SCL_COEF_RAM_SELECT 0x4140
#define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141
+#define mmSCL2_SCL_SCALER_ENABLE 0x4142
#define mmSCL2_SCL_CONTROL 0x4144
#define mmSCL2_SCL_DEBUG 0x416A
#define mmSCL2_SCL_DEBUG2 0x4169
@@ -4202,6 +4205,7 @@
#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455
#define mmSCL3_SCL_COEF_RAM_SELECT 0x4440
#define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441
+#define mmSCL3_SCL_SCALER_ENABLE 0x4442
#define mmSCL3_SCL_CONTROL 0x4444
#define mmSCL3_SCL_DEBUG 0x446A
#define mmSCL3_SCL_DEBUG2 0x4469
@@ -4231,6 +4235,7 @@
#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755
#define mmSCL4_SCL_COEF_RAM_SELECT 0x4740
#define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741
+#define mmSCL4_SCL_SCALER_ENABLE 0x4742
#define mmSCL4_SCL_CONTROL 0x4744
#define mmSCL4_SCL_DEBUG 0x476A
#define mmSCL4_SCL_DEBUG2 0x4769
@@ -4260,6 +4265,7 @@
#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55
#define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40
#define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41
+#define mmSCL5_SCL_SCALER_ENABLE 0x4A42
#define mmSCL5_SCL_CONTROL 0x4A44
#define mmSCL5_SCL_DEBUG 0x4A6A
#define mmSCL5_SCL_DEBUG2 0x4A69
@@ -4287,6 +4293,7 @@
#define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55
#define mmSCL_COEF_RAM_SELECT 0x1B40
#define mmSCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL_SCALER_ENABLE 0x1B42
#define mmSCL_CONTROL 0x1B44
#define mmSCL_DEBUG 0x1B6A
#define mmSCL_DEBUG2 0x1B69
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index bd8085ec54ed..9317a7afa621 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -5242,6 +5242,8 @@
#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0x0000000c
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x00000003L
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x00000000
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE_MASK 0x00000300L
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT 0x00000008
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE_MASK 0x00000030L
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT 0x00000004
#define DENORM_CONTROL__DENORM_MODE_MASK 0x00000007L
@@ -8648,6 +8650,8 @@
#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000
#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L
#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000
+#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L
+#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000
#define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L
#define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000
#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h
new file mode 100644
index 000000000000..b070f9b91f17
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h
@@ -0,0 +1,15485 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+
+#ifndef _dcn_3_6_0_OFFSET_HEADER
+#define _dcn_3_6_0_OFFSET_HEADER
+
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+// base address: 0x1300000
+#define regGLOBAL_CAPABILITIES 0x4b7000
+#define regGLOBAL_CAPABILITIES_BASE_IDX 3
+#define regMINOR_VERSION 0x4b7000
+#define regMINOR_VERSION_BASE_IDX 3
+#define regMAJOR_VERSION 0x4b7000
+#define regMAJOR_VERSION_BASE_IDX 3
+#define regOUTPUT_PAYLOAD_CAPABILITY 0x4b7001
+#define regOUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINPUT_PAYLOAD_CAPABILITY 0x4b7001
+#define regINPUT_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regGLOBAL_CONTROL 0x4b7002
+#define regGLOBAL_CONTROL_BASE_IDX 3
+#define regWAKE_ENABLE 0x4b7003
+#define regWAKE_ENABLE_BASE_IDX 3
+#define regSTATE_CHANGE_STATUS 0x4b7003
+#define regSTATE_CHANGE_STATUS_BASE_IDX 3
+#define regGLOBAL_STATUS 0x4b7004
+#define regGLOBAL_STATUS_BASE_IDX 3
+#define regOUTPUT_STREAM_PAYLOAD_CAPABILITY 0x4b7006
+#define regOUTPUT_STREAM_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINPUT_STREAM_PAYLOAD_CAPABILITY 0x4b7006
+#define regINPUT_STREAM_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINTERRUPT_CONTROL 0x4b7008
+#define regINTERRUPT_CONTROL_BASE_IDX 3
+#define regINTERRUPT_STATUS 0x4b7009
+#define regINTERRUPT_STATUS_BASE_IDX 3
+#define regWALL_CLOCK_COUNTER 0x4b700c
+#define regWALL_CLOCK_COUNTER_BASE_IDX 3
+#define regSTREAM_SYNCHRONIZATION 0x4b700e
+#define regSTREAM_SYNCHRONIZATION_BASE_IDX 3
+#define regCORB_LOWER_BASE_ADDRESS 0x4b7010
+#define regCORB_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regCORB_UPPER_BASE_ADDRESS 0x4b7011
+#define regCORB_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_WRITE_POINTER 0x4b7012
+#define regAZCONTROLLER0_CORB_WRITE_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_READ_POINTER 0x4b7012
+#define regAZCONTROLLER0_CORB_READ_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_CONTROL 0x4b7013
+#define regAZCONTROLLER0_CORB_CONTROL_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_STATUS 0x4b7013
+#define regAZCONTROLLER0_CORB_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_SIZE 0x4b7013
+#define regAZCONTROLLER0_CORB_SIZE_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS 0x4b7014
+#define regAZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS 0x4b7015
+#define regAZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_WRITE_POINTER 0x4b7016
+#define regAZCONTROLLER0_RIRB_WRITE_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_RESPONSE_INTERRUPT_COUNT 0x4b7016
+#define regAZCONTROLLER0_RESPONSE_INTERRUPT_COUNT_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_CONTROL 0x4b7017
+#define regAZCONTROLLER0_RIRB_CONTROL_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_STATUS 0x4b7017
+#define regAZCONTROLLER0_RIRB_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_SIZE 0x4b7017
+#define regAZCONTROLLER0_RIRB_SIZE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE 0x4b7019
+#define regAZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_STATUS 0x4b701a
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS 0x4b701c
+#define regAZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS 0x4b701d
+#define regAZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS 0x4b780c
+#define regAZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+// base address: 0x1300000
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x4b7018
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x4b7018
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+// base address: 0x1300000
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA 0x4b7018
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX 0x4b7018
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+// base address: 0x0
+#define regAZCONTROLLER1_CORB_WRITE_POINTER 0x0000
+#define regAZCONTROLLER1_CORB_WRITE_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_READ_POINTER 0x0000
+#define regAZCONTROLLER1_CORB_READ_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_CONTROL 0x0001
+#define regAZCONTROLLER1_CORB_CONTROL_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_STATUS 0x0001
+#define regAZCONTROLLER1_CORB_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_SIZE 0x0001
+#define regAZCONTROLLER1_CORB_SIZE_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS 0x0002
+#define regAZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS 0x0003
+#define regAZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_WRITE_POINTER 0x0004
+#define regAZCONTROLLER1_RIRB_WRITE_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_RESPONSE_INTERRUPT_COUNT 0x0004
+#define regAZCONTROLLER1_RESPONSE_INTERRUPT_COUNT_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_CONTROL 0x0005
+#define regAZCONTROLLER1_RIRB_CONTROL_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_STATUS 0x0005
+#define regAZCONTROLLER1_RIRB_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_SIZE 0x0005
+#define regAZCONTROLLER1_RIRB_SIZE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE 0x0007
+#define regAZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_STATUS 0x0008
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS 0x000a
+#define regAZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS 0x000b
+#define regAZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS 0x074c
+#define regAZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS_BASE_IDX 1
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+// base address: 0x0
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x0006
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x0006
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 0
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+// base address: 0x0
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA 0x0006
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX 0x0006
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_BASE_IDX 0
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+// base address: 0x0
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+// base address: 0x0
+#define regPHYPLLA_PIXCLK_RESYNC_CNTL 0x0040
+#define regPHYPLLA_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLB_PIXCLK_RESYNC_CNTL 0x0041
+#define regPHYPLLB_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLC_PIXCLK_RESYNC_CNTL 0x0042
+#define regPHYPLLC_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLD_PIXCLK_RESYNC_CNTL 0x0043
+#define regPHYPLLD_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regDP_DTO_DBUF_EN 0x0044
+#define regDP_DTO_DBUF_EN_BASE_IDX 1
+#define regDSCCLK3_DTO_PARAM 0x0045
+#define regDSCCLK3_DTO_PARAM_BASE_IDX 1
+#define regDPREFCLK_CGTT_BLK_CTRL_REG 0x0048
+#define regDPREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL4 0x0049
+#define regDCCG_GATE_DISABLE_CNTL4_BASE_IDX 1
+#define regDPSTREAMCLK_CNTL 0x004a
+#define regDPSTREAMCLK_CNTL_BASE_IDX 1
+#define regREFCLK_CGTT_BLK_CTRL_REG 0x004b
+#define regREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regPHYPLLE_PIXCLK_RESYNC_CNTL 0x004c
+#define regPHYPLLE_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regDCCG_PERFMON_CNTL2 0x004e
+#define regDCCG_PERFMON_CNTL2_BASE_IDX 1
+#define regDCCG_GLOBAL_FGCG_REP_CNTL 0x0050
+#define regDCCG_GLOBAL_FGCG_REP_CNTL_BASE_IDX 1
+#define regDCCG_DS_DTO_INCR 0x0053
+#define regDCCG_DS_DTO_INCR_BASE_IDX 1
+#define regDCCG_DS_DTO_MODULO 0x0054
+#define regDCCG_DS_DTO_MODULO_BASE_IDX 1
+#define regDCCG_DS_CNTL 0x0055
+#define regDCCG_DS_CNTL_BASE_IDX 1
+#define regDCCG_DS_HW_CAL_INTERVAL 0x0056
+#define regDCCG_DS_HW_CAL_INTERVAL_BASE_IDX 1
+#define regDPREFCLK_CNTL 0x0058
+#define regDPREFCLK_CNTL_BASE_IDX 1
+#define regDCE_VERSION 0x005e
+#define regDCE_VERSION_BASE_IDX 1
+#define regDCCG_GTC_CNTL 0x0060
+#define regDCCG_GTC_CNTL_BASE_IDX 1
+#define regDCCG_GTC_DTO_INCR 0x0061
+#define regDCCG_GTC_DTO_INCR_BASE_IDX 1
+#define regDCCG_GTC_DTO_MODULO 0x0062
+#define regDCCG_GTC_DTO_MODULO_BASE_IDX 1
+#define regDCCG_GTC_CURRENT 0x0063
+#define regDCCG_GTC_CURRENT_BASE_IDX 1
+#define regSYMCLK32_SE_CNTL 0x0065
+#define regSYMCLK32_SE_CNTL_BASE_IDX 1
+#define regSYMCLK32_LE_CNTL 0x0066
+#define regSYMCLK32_LE_CNTL_BASE_IDX 1
+#define regDTBCLK_P_CNTL 0x0068
+#define regDTBCLK_P_CNTL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL5 0x0069
+#define regDCCG_GATE_DISABLE_CNTL5_BASE_IDX 1
+#define regDSCCLK0_DTO_PARAM 0x006c
+#define regDSCCLK0_DTO_PARAM_BASE_IDX 1
+#define regDSCCLK1_DTO_PARAM 0x006d
+#define regDSCCLK1_DTO_PARAM_BASE_IDX 1
+#define regDSCCLK2_DTO_PARAM 0x006e
+#define regDSCCLK2_DTO_PARAM_BASE_IDX 1
+#define regOTG_PIXEL_RATE_DIV 0x006f
+#define regOTG_PIXEL_RATE_DIV_BASE_IDX 1
+#define regMILLISECOND_TIME_BASE_DIV 0x0070
+#define regMILLISECOND_TIME_BASE_DIV_BASE_IDX 1
+#define regDISPCLK_FREQ_CHANGE_CNTL 0x0071
+#define regDISPCLK_FREQ_CHANGE_CNTL_BASE_IDX 1
+#define regDC_MEM_GLOBAL_PWR_REQ_CNTL 0x0072
+#define regDC_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 1
+#define regDCCG_PERFMON_CNTL 0x0073
+#define regDCCG_PERFMON_CNTL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL 0x0074
+#define regDCCG_GATE_DISABLE_CNTL_BASE_IDX 1
+#define regDISPCLK_CGTT_BLK_CTRL_REG 0x0075
+#define regDISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regSOCCLK_CGTT_BLK_CTRL_REG 0x0076
+#define regSOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_CAC_STATUS 0x0077
+#define regDCCG_CAC_STATUS_BASE_IDX 1
+#define regMICROSECOND_TIME_BASE_DIV 0x007b
+#define regMICROSECOND_TIME_BASE_DIV_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL2 0x007c
+#define regDCCG_GATE_DISABLE_CNTL2_BASE_IDX 1
+#define regSYMCLK_CGTT_BLK_CTRL_REG 0x007d
+#define regSYMCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_DISP_CNTL_REG 0x007f
+#define regDCCG_DISP_CNTL_REG_BASE_IDX 1
+#define regOTG0_PIXEL_RATE_CNTL 0x0080
+#define regOTG0_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO0_PHASE 0x0081
+#define regDP_DTO0_PHASE_BASE_IDX 1
+#define regDP_DTO0_MODULO 0x0082
+#define regDP_DTO0_MODULO_BASE_IDX 1
+#define regOTG0_PHYPLL_PIXEL_RATE_CNTL 0x0083
+#define regOTG0_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG1_PIXEL_RATE_CNTL 0x0084
+#define regOTG1_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO1_PHASE 0x0085
+#define regDP_DTO1_PHASE_BASE_IDX 1
+#define regDP_DTO1_MODULO 0x0086
+#define regDP_DTO1_MODULO_BASE_IDX 1
+#define regOTG1_PHYPLL_PIXEL_RATE_CNTL 0x0087
+#define regOTG1_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG2_PIXEL_RATE_CNTL 0x0088
+#define regOTG2_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO2_PHASE 0x0089
+#define regDP_DTO2_PHASE_BASE_IDX 1
+#define regDP_DTO2_MODULO 0x008a
+#define regDP_DTO2_MODULO_BASE_IDX 1
+#define regOTG2_PHYPLL_PIXEL_RATE_CNTL 0x008b
+#define regOTG2_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG3_PIXEL_RATE_CNTL 0x008c
+#define regOTG3_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO3_PHASE 0x008d
+#define regDP_DTO3_PHASE_BASE_IDX 1
+#define regDP_DTO3_MODULO 0x008e
+#define regDP_DTO3_MODULO_BASE_IDX 1
+#define regOTG3_PHYPLL_PIXEL_RATE_CNTL 0x008f
+#define regOTG3_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDPPCLK_CGTT_BLK_CTRL_REG 0x0098
+#define regDPPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDPPCLK0_DTO_PARAM 0x0099
+#define regDPPCLK0_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK1_DTO_PARAM 0x009a
+#define regDPPCLK1_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK2_DTO_PARAM 0x009b
+#define regDPPCLK2_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK3_DTO_PARAM 0x009c
+#define regDPPCLK3_DTO_PARAM_BASE_IDX 1
+#define regDCCG_CAC_STATUS2 0x009f
+#define regDCCG_CAC_STATUS2_BASE_IDX 1
+#define regSYMCLKA_CLOCK_ENABLE 0x00a0
+#define regSYMCLKA_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKB_CLOCK_ENABLE 0x00a1
+#define regSYMCLKB_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKC_CLOCK_ENABLE 0x00a2
+#define regSYMCLKC_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKD_CLOCK_ENABLE 0x00a3
+#define regSYMCLKD_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKE_CLOCK_ENABLE 0x00a4
+#define regSYMCLKE_CLOCK_ENABLE_BASE_IDX 1
+#define regDCCG_SOFT_RESET 0x00a6
+#define regDCCG_SOFT_RESET_BASE_IDX 1
+#define regDSCCLK_DTO_CTRL 0x00a7
+#define regDSCCLK_DTO_CTRL_BASE_IDX 1
+#define regDPPCLK_CTRL 0x00a8
+#define regDPPCLK_CTRL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL6 0x00a9
+#define regDCCG_GATE_DISABLE_CNTL6_BASE_IDX 1
+#define regSYMCLK_PSP_CNTL 0x00aa
+#define regSYMCLK_PSP_CNTL_BASE_IDX 1
+#define regDCCG_AUDIO_DTO_SOURCE 0x00ab
+#define regDCCG_AUDIO_DTO_SOURCE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO0_PHASE 0x00ac
+#define regDCCG_AUDIO_DTO0_PHASE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO0_MODULE 0x00ad
+#define regDCCG_AUDIO_DTO0_MODULE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO1_PHASE 0x00ae
+#define regDCCG_AUDIO_DTO1_PHASE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO1_MODULE 0x00af
+#define regDCCG_AUDIO_DTO1_MODULE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG0_LATCH_VALUE 0x00b0
+#define regDCCG_VSYNC_OTG0_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG1_LATCH_VALUE 0x00b1
+#define regDCCG_VSYNC_OTG1_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG2_LATCH_VALUE 0x00b2
+#define regDCCG_VSYNC_OTG2_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG3_LATCH_VALUE 0x00b3
+#define regDCCG_VSYNC_OTG3_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG4_LATCH_VALUE 0x00b4
+#define regDCCG_VSYNC_OTG4_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG5_LATCH_VALUE 0x00b5
+#define regDCCG_VSYNC_OTG5_LATCH_VALUE_BASE_IDX 1
+#define regDPPCLK_DTO_CTRL 0x00b6
+#define regDPPCLK_DTO_CTRL_BASE_IDX 1
+#define regDCCG_VSYNC_CNT_CTRL 0x00b8
+#define regDCCG_VSYNC_CNT_CTRL_BASE_IDX 1
+#define regDCCG_VSYNC_CNT_INT_CTRL 0x00b9
+#define regDCCG_VSYNC_CNT_INT_CTRL_BASE_IDX 1
+#define regFORCE_SYMCLK_DISABLE 0x00ba
+#define regFORCE_SYMCLK_DISABLE_BASE_IDX 1
+#define regDTBCLK_DTO0_PHASE 0x0018
+#define regDTBCLK_DTO0_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO1_PHASE 0x0019
+#define regDTBCLK_DTO1_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO2_PHASE 0x001a
+#define regDTBCLK_DTO2_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO3_PHASE 0x001b
+#define regDTBCLK_DTO3_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO0_MODULO 0x001f
+#define regDTBCLK_DTO0_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO1_MODULO 0x0020
+#define regDTBCLK_DTO1_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO2_MODULO 0x0021
+#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO3_MODULO 0x0022
+#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
+#define regPHYASYMCLK_CLOCK_CNTL 0x0052
+#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
+#define regPHYBSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYCSYMCLK_CLOCK_CNTL 0x0054
+#define regPHYCSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYDSYMCLK_CLOCK_CNTL 0x0055
+#define regPHYDSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYESYMCLK_CLOCK_CNTL 0x0056
+#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
+#define regDCCG_GATE_DISABLE_CNTL3 0x005a
+#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
+#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
+#define regHDMISTREAMCLK0_DTO_PARAM_BASE_IDX 2
+#define regDCCG_AUDIO_DTBCLK_DTO_PHASE 0x0061
+#define regDCCG_AUDIO_DTBCLK_DTO_PHASE_BASE_IDX 2
+#define regDCCG_AUDIO_DTBCLK_DTO_MODULO 0x0062
+#define regDCCG_AUDIO_DTBCLK_DTO_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO_DBUF_EN 0x0063
+#define regDTBCLK_DTO_DBUF_EN_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
+// base address: 0x0
+#define regDC_PERFMON0_PERFCOUNTER_CNTL 0x0000
+#define regDC_PERFMON0_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON0_PERFCOUNTER_CNTL2 0x0001
+#define regDC_PERFMON0_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON0_PERFCOUNTER_STATE 0x0002
+#define regDC_PERFMON0_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CNTL 0x0003
+#define regDC_PERFMON0_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CNTL2 0x0004
+#define regDC_PERFMON0_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CVALUE_INT_MISC 0x0005
+#define regDC_PERFMON0_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CVALUE_LOW 0x0006
+#define regDC_PERFMON0_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_HI 0x0007
+#define regDC_PERFMON0_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_LOW 0x0008
+#define regDC_PERFMON0_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon1_dc_perfmon_dispdec
+// base address: 0x30
+#define regDC_PERFMON1_PERFCOUNTER_CNTL 0x000c
+#define regDC_PERFMON1_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON1_PERFCOUNTER_CNTL2 0x000d
+#define regDC_PERFMON1_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON1_PERFCOUNTER_STATE 0x000e
+#define regDC_PERFMON1_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CNTL 0x000f
+#define regDC_PERFMON1_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CNTL2 0x0010
+#define regDC_PERFMON1_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CVALUE_INT_MISC 0x0011
+#define regDC_PERFMON1_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CVALUE_LOW 0x0012
+#define regDC_PERFMON1_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_HI 0x0013
+#define regDC_PERFMON1_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_LOW 0x0014
+#define regDC_PERFMON1_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_fgsec_dispdec
+// base address: 0x0
+#define regDMCUB_RBBMIF_SEC_CNTL 0x017a
+#define regDMCUB_RBBMIF_SEC_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+// base address: 0x0
+#define regRBBMIF_TIMEOUT 0x017f
+#define regRBBMIF_TIMEOUT_BASE_IDX 2
+#define regRBBMIF_STATUS 0x0180
+#define regRBBMIF_STATUS_BASE_IDX 2
+#define regRBBMIF_STATUS_2 0x0181
+#define regRBBMIF_STATUS_2_BASE_IDX 2
+#define regRBBMIF_INT_STATUS 0x0182
+#define regRBBMIF_INT_STATUS_BASE_IDX 2
+#define regRBBMIF_TIMEOUT_DIS 0x0183
+#define regRBBMIF_TIMEOUT_DIS_BASE_IDX 2
+#define regRBBMIF_TIMEOUT_DIS_2 0x0184
+#define regRBBMIF_TIMEOUT_DIS_2_BASE_IDX 2
+#define regRBBMIF_STATUS_FLAG 0x0185
+#define regRBBMIF_STATUS_FLAG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmu_dcperfmon_dc_perfmon_dispdec
+// base address: 0x2f8
+#define regDC_PERFMON2_PERFCOUNTER_CNTL 0x00be
+#define regDC_PERFMON2_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON2_PERFCOUNTER_CNTL2 0x00bf
+#define regDC_PERFMON2_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON2_PERFCOUNTER_STATE 0x00c0
+#define regDC_PERFMON2_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CNTL 0x00c1
+#define regDC_PERFMON2_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CNTL2 0x00c2
+#define regDC_PERFMON2_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CVALUE_INT_MISC 0x00c3
+#define regDC_PERFMON2_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CVALUE_LOW 0x00c4
+#define regDC_PERFMON2_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_HI 0x00c5
+#define regDC_PERFMON2_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_LOW 0x00c6
+#define regDC_PERFMON2_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_ihc_dispdec
+// base address: 0x0
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE 0x0126
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_VSTARTUP 0x0127
+#define regDC_GPU_TIMER_START_POSITION_VSTARTUP_BASE_IDX 2
+#define regDC_GPU_TIMER_READ 0x0128
+#define regDC_GPU_TIMER_READ_BASE_IDX 2
+#define regDC_GPU_TIMER_READ_CNTL 0x0129
+#define regDC_GPU_TIMER_READ_CNTL_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS 0x012a
+#define regDISP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE 0x012b
+#define regDISP_INTERRUPT_STATUS_CONTINUE_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE2 0x012c
+#define regDISP_INTERRUPT_STATUS_CONTINUE2_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE3 0x012d
+#define regDISP_INTERRUPT_STATUS_CONTINUE3_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE4 0x012e
+#define regDISP_INTERRUPT_STATUS_CONTINUE4_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE5 0x012f
+#define regDISP_INTERRUPT_STATUS_CONTINUE5_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE6 0x0130
+#define regDISP_INTERRUPT_STATUS_CONTINUE6_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE7 0x0131
+#define regDISP_INTERRUPT_STATUS_CONTINUE7_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE8 0x0132
+#define regDISP_INTERRUPT_STATUS_CONTINUE8_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE9 0x0133
+#define regDISP_INTERRUPT_STATUS_CONTINUE9_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE10 0x0134
+#define regDISP_INTERRUPT_STATUS_CONTINUE10_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE11 0x0135
+#define regDISP_INTERRUPT_STATUS_CONTINUE11_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE12 0x0136
+#define regDISP_INTERRUPT_STATUS_CONTINUE12_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE13 0x0137
+#define regDISP_INTERRUPT_STATUS_CONTINUE13_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE14 0x0138
+#define regDISP_INTERRUPT_STATUS_CONTINUE14_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE15 0x0139
+#define regDISP_INTERRUPT_STATUS_CONTINUE15_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE16 0x013a
+#define regDISP_INTERRUPT_STATUS_CONTINUE16_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE17 0x013b
+#define regDISP_INTERRUPT_STATUS_CONTINUE17_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE18 0x013c
+#define regDISP_INTERRUPT_STATUS_CONTINUE18_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE19 0x013d
+#define regDISP_INTERRUPT_STATUS_CONTINUE19_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE20 0x013e
+#define regDISP_INTERRUPT_STATUS_CONTINUE20_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE21 0x013f
+#define regDISP_INTERRUPT_STATUS_CONTINUE21_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE22 0x0140
+#define regDISP_INTERRUPT_STATUS_CONTINUE22_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_VREADY 0x0141
+#define regDC_GPU_TIMER_START_POSITION_VREADY_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_FLIP 0x0142
+#define regDC_GPU_TIMER_START_POSITION_FLIP_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK 0x0143
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_FLIP_AWAY 0x0144
+#define regDC_GPU_TIMER_START_POSITION_FLIP_AWAY_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE23 0x0145
+#define regDISP_INTERRUPT_STATUS_CONTINUE23_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE24 0x0146
+#define regDISP_INTERRUPT_STATUS_CONTINUE24_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE25 0x0147
+#define regDISP_INTERRUPT_STATUS_CONTINUE25_BASE_IDX 2
+#define regDCCG_INTERRUPT_DEST 0x0148
+#define regDCCG_INTERRUPT_DEST_BASE_IDX 2
+#define regDMU_INTERRUPT_DEST 0x0149
+#define regDMU_INTERRUPT_DEST_BASE_IDX 2
+#define regDMU_INTERRUPT_DEST2 0x014a
+#define regDMU_INTERRUPT_DEST2_BASE_IDX 2
+#define regDCPG_INTERRUPT_DEST 0x014b
+#define regDCPG_INTERRUPT_DEST_BASE_IDX 2
+#define regDCPG_INTERRUPT_DEST2 0x014c
+#define regDCPG_INTERRUPT_DEST2_BASE_IDX 2
+#define regMMHUBBUB_INTERRUPT_DEST 0x014d
+#define regMMHUBBUB_INTERRUPT_DEST_BASE_IDX 2
+#define regWB_INTERRUPT_DEST 0x014e
+#define regWB_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_INTERRUPT_DEST 0x014f
+#define regDCHUB_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_PERFCOUNTER_INTERRUPT_DEST 0x0150
+#define regDCHUB_PERFCOUNTER_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_INTERRUPT_DEST2 0x0151
+#define regDCHUB_INTERRUPT_DEST2_BASE_IDX 2
+#define regDPP_PERFCOUNTER_INTERRUPT_DEST 0x0152
+#define regDPP_PERFCOUNTER_INTERRUPT_DEST_BASE_IDX 2
+#define regMPC_INTERRUPT_DEST 0x0153
+#define regMPC_INTERRUPT_DEST_BASE_IDX 2
+#define regOPP_INTERRUPT_DEST 0x0154
+#define regOPP_INTERRUPT_DEST_BASE_IDX 2
+#define regOPTC_INTERRUPT_DEST 0x0155
+#define regOPTC_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG0_INTERRUPT_DEST 0x0156
+#define regOTG0_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG1_INTERRUPT_DEST 0x0157
+#define regOTG1_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG2_INTERRUPT_DEST 0x0158
+#define regOTG2_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG3_INTERRUPT_DEST 0x0159
+#define regOTG3_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG4_INTERRUPT_DEST 0x015a
+#define regOTG4_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG5_INTERRUPT_DEST 0x015b
+#define regOTG5_INTERRUPT_DEST_BASE_IDX 2
+#define regDIG_INTERRUPT_DEST 0x015c
+#define regDIG_INTERRUPT_DEST_BASE_IDX 2
+#define regI2C_DDC_HPD_INTERRUPT_DEST 0x015d
+#define regI2C_DDC_HPD_INTERRUPT_DEST_BASE_IDX 2
+#define regDIO_INTERRUPT_DEST 0x015f
+#define regDIO_INTERRUPT_DEST_BASE_IDX 2
+#define regDCIO_INTERRUPT_DEST 0x0160
+#define regDCIO_INTERRUPT_DEST_BASE_IDX 2
+#define regHPD_INTERRUPT_DEST 0x0161
+#define regHPD_INTERRUPT_DEST_BASE_IDX 2
+#define regAZ_INTERRUPT_DEST 0x0162
+#define regAZ_INTERRUPT_DEST_BASE_IDX 2
+#define regAUX_INTERRUPT_DEST 0x0163
+#define regAUX_INTERRUPT_DEST_BASE_IDX 2
+#define regDSC_INTERRUPT_DEST 0x0164
+#define regDSC_INTERRUPT_DEST_BASE_IDX 2
+#define regHPO_INTERRUPT_DEST 0x0165
+#define regHPO_INTERRUPT_DEST_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmu_misc_dispdec
+// base address: 0x0
+#define regCC_DC_PIPE_DIS 0x00ca
+#define regCC_DC_PIPE_DIS_BASE_IDX 2
+#define regDMU_CLK_CNTL 0x00cb
+#define regDMU_CLK_CNTL_BASE_IDX 2
+#define regDMCUB_SMU_INTERRUPT_CNTL 0x00cd
+#define regDMCUB_SMU_INTERRUPT_CNTL_BASE_IDX 2
+#define regSMU_INTERRUPT_CONTROL 0x00ce
+#define regSMU_INTERRUPT_CONTROL_BASE_IDX 2
+#define regZSC_CNTL 0x00cf
+#define regZSC_CNTL_BASE_IDX 2
+#define regZSC_CNTL2 0x00d0
+#define regZSC_CNTL2_BASE_IDX 2
+#define regDMU_MISC_ALLOW_DS_FORCE 0x00d6
+#define regDMU_MISC_ALLOW_DS_FORCE_BASE_IDX 2
+#define regZSC_STATUS 0x00d7
+#define regZSC_STATUS_BASE_IDX 2
+#define regDMU_DISPCLK_CGTT_BLK_CTRL_REG 0x00d8
+#define regDMU_DISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 2
+#define regDMU_SOCCLK_CGTT_BLK_CTRL_REG 0x00d9
+#define regDMU_SOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 2
+#define regZPR_CLK_UNGATE_DELAY 0x00da
+#define regZPR_CLK_UNGATE_DELAY_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dc_pg_dispdec
+// base address: 0x0
+#define regDOMAIN0_PG_CONFIG 0x0080
+#define regDOMAIN0_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN0_PG_STATUS 0x0081
+#define regDOMAIN0_PG_STATUS_BASE_IDX 2
+#define regDOMAIN1_PG_CONFIG 0x0082
+#define regDOMAIN1_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN1_PG_STATUS 0x0083
+#define regDOMAIN1_PG_STATUS_BASE_IDX 2
+#define regDOMAIN2_PG_CONFIG 0x0084
+#define regDOMAIN2_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN2_PG_STATUS 0x0085
+#define regDOMAIN2_PG_STATUS_BASE_IDX 2
+#define regDOMAIN3_PG_CONFIG 0x0086
+#define regDOMAIN3_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN3_PG_STATUS 0x0087
+#define regDOMAIN3_PG_STATUS_BASE_IDX 2
+#define regDOMAIN16_PG_CONFIG 0x0089
+#define regDOMAIN16_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN16_PG_STATUS 0x008a
+#define regDOMAIN16_PG_STATUS_BASE_IDX 2
+#define regDOMAIN17_PG_CONFIG 0x008b
+#define regDOMAIN17_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN17_PG_STATUS 0x008c
+#define regDOMAIN17_PG_STATUS_BASE_IDX 2
+#define regDOMAIN18_PG_CONFIG 0x008d
+#define regDOMAIN18_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN18_PG_STATUS 0x008e
+#define regDOMAIN18_PG_STATUS_BASE_IDX 2
+#define regDOMAIN19_PG_CONFIG 0x008f
+#define regDOMAIN19_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN19_PG_STATUS 0x0090
+#define regDOMAIN19_PG_STATUS_BASE_IDX 2
+#define regDOMAIN22_PG_CONFIG 0x0092
+#define regDOMAIN22_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN22_PG_STATUS 0x0093
+#define regDOMAIN22_PG_STATUS_BASE_IDX 2
+#define regDOMAIN23_PG_CONFIG 0x0094
+#define regDOMAIN23_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN23_PG_STATUS 0x0095
+#define regDOMAIN23_PG_STATUS_BASE_IDX 2
+#define regDOMAIN24_PG_CONFIG 0x0096
+#define regDOMAIN24_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN24_PG_STATUS 0x0097
+#define regDOMAIN24_PG_STATUS_BASE_IDX 2
+#define regDOMAIN25_PG_CONFIG 0x0098
+#define regDOMAIN25_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN25_PG_STATUS 0x0099
+#define regDOMAIN25_PG_STATUS_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS 0x009a
+#define regDCPG_INTERRUPT_STATUS_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS_2 0x009b
+#define regDCPG_INTERRUPT_STATUS_2_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS_3 0x009c
+#define regDCPG_INTERRUPT_STATUS_3_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_1 0x009d
+#define regDCPG_INTERRUPT_CONTROL_1_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_2 0x009e
+#define regDCPG_INTERRUPT_CONTROL_2_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_3 0x009f
+#define regDCPG_INTERRUPT_CONTROL_3_BASE_IDX 2
+#define regDC_IP_REQUEST_CNTL 0x00a0
+#define regDC_IP_REQUEST_CNTL_BASE_IDX 2
+#define regLONO_MEM_PWR_REQ_CNTL 0x00a4
+#define regLONO_MEM_PWR_REQ_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmcub_dispdec
+// base address: 0x0
+#define regDMCUB_REGION0_OFFSET 0x018e
+#define regDMCUB_REGION0_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION0_OFFSET_HIGH 0x018f
+#define regDMCUB_REGION0_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION1_OFFSET 0x0190
+#define regDMCUB_REGION1_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION1_OFFSET_HIGH 0x0191
+#define regDMCUB_REGION1_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION2_OFFSET 0x0192
+#define regDMCUB_REGION2_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION2_OFFSET_HIGH 0x0193
+#define regDMCUB_REGION2_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION4_OFFSET 0x0196
+#define regDMCUB_REGION4_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION4_OFFSET_HIGH 0x0197
+#define regDMCUB_REGION4_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION5_OFFSET 0x0198
+#define regDMCUB_REGION5_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION5_OFFSET_HIGH 0x0199
+#define regDMCUB_REGION5_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION6_OFFSET 0x019a
+#define regDMCUB_REGION6_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION6_OFFSET_HIGH 0x019b
+#define regDMCUB_REGION6_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION7_OFFSET 0x019c
+#define regDMCUB_REGION7_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION7_OFFSET_HIGH 0x019d
+#define regDMCUB_REGION7_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION0_TOP_ADDRESS 0x019e
+#define regDMCUB_REGION0_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION1_TOP_ADDRESS 0x019f
+#define regDMCUB_REGION1_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION2_TOP_ADDRESS 0x01a0
+#define regDMCUB_REGION2_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION4_TOP_ADDRESS 0x01a1
+#define regDMCUB_REGION4_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION5_TOP_ADDRESS 0x01a2
+#define regDMCUB_REGION5_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION6_TOP_ADDRESS 0x01a3
+#define regDMCUB_REGION6_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION7_TOP_ADDRESS 0x01a4
+#define regDMCUB_REGION7_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_BASE_ADDRESS 0x01a5
+#define regDMCUB_REGION3_CW0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_BASE_ADDRESS 0x01a6
+#define regDMCUB_REGION3_CW1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_BASE_ADDRESS 0x01a7
+#define regDMCUB_REGION3_CW2_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_BASE_ADDRESS 0x01a8
+#define regDMCUB_REGION3_CW3_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_BASE_ADDRESS 0x01a9
+#define regDMCUB_REGION3_CW4_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_BASE_ADDRESS 0x01aa
+#define regDMCUB_REGION3_CW5_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_BASE_ADDRESS 0x01ab
+#define regDMCUB_REGION3_CW6_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_BASE_ADDRESS 0x01ac
+#define regDMCUB_REGION3_CW7_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_TOP_ADDRESS 0x01ad
+#define regDMCUB_REGION3_CW0_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_TOP_ADDRESS 0x01ae
+#define regDMCUB_REGION3_CW1_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_TOP_ADDRESS 0x01af
+#define regDMCUB_REGION3_CW2_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_TOP_ADDRESS 0x01b0
+#define regDMCUB_REGION3_CW3_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_TOP_ADDRESS 0x01b1
+#define regDMCUB_REGION3_CW4_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_TOP_ADDRESS 0x01b2
+#define regDMCUB_REGION3_CW5_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_TOP_ADDRESS 0x01b3
+#define regDMCUB_REGION3_CW6_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_TOP_ADDRESS 0x01b4
+#define regDMCUB_REGION3_CW7_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_OFFSET 0x01b5
+#define regDMCUB_REGION3_CW0_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_OFFSET_HIGH 0x01b6
+#define regDMCUB_REGION3_CW0_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_OFFSET 0x01b7
+#define regDMCUB_REGION3_CW1_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_OFFSET_HIGH 0x01b8
+#define regDMCUB_REGION3_CW1_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_OFFSET 0x01b9
+#define regDMCUB_REGION3_CW2_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_OFFSET_HIGH 0x01ba
+#define regDMCUB_REGION3_CW2_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_OFFSET 0x01bb
+#define regDMCUB_REGION3_CW3_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_OFFSET_HIGH 0x01bc
+#define regDMCUB_REGION3_CW3_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_OFFSET 0x01bd
+#define regDMCUB_REGION3_CW4_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_OFFSET_HIGH 0x01be
+#define regDMCUB_REGION3_CW4_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_OFFSET 0x01bf
+#define regDMCUB_REGION3_CW5_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_OFFSET_HIGH 0x01c0
+#define regDMCUB_REGION3_CW5_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_OFFSET 0x01c1
+#define regDMCUB_REGION3_CW6_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_OFFSET_HIGH 0x01c2
+#define regDMCUB_REGION3_CW6_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_OFFSET 0x01c3
+#define regDMCUB_REGION3_CW7_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_OFFSET_HIGH 0x01c4
+#define regDMCUB_REGION3_CW7_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_INTERRUPT_ENABLE 0x01c5
+#define regDMCUB_INTERRUPT_ENABLE_BASE_IDX 2
+#define regDMCUB_INTERRUPT_ACK 0x01c6
+#define regDMCUB_INTERRUPT_ACK_BASE_IDX 2
+#define regDMCUB_INTERRUPT_STATUS 0x01c7
+#define regDMCUB_INTERRUPT_STATUS_BASE_IDX 2
+#define regDMCUB_INTERRUPT_TYPE 0x01c8
+#define regDMCUB_INTERRUPT_TYPE_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_STATUS 0x01c9
+#define regDMCUB_EXT_INTERRUPT_STATUS_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_CTXID 0x01ca
+#define regDMCUB_EXT_INTERRUPT_CTXID_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_ACK 0x01cb
+#define regDMCUB_EXT_INTERRUPT_ACK_BASE_IDX 2
+#define regDMCUB_INST_FETCH_FAULT_ADDR 0x01cc
+#define regDMCUB_INST_FETCH_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_DATA_WRITE_FAULT_ADDR 0x01cd
+#define regDMCUB_DATA_WRITE_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_SEC_CNTL 0x01ce
+#define regDMCUB_SEC_CNTL_BASE_IDX 2
+#define regDMCUB_MEM_CNTL 0x01cf
+#define regDMCUB_MEM_CNTL_BASE_IDX 2
+#define regDMCUB_INBOX0_BASE_ADDRESS 0x01d0
+#define regDMCUB_INBOX0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_INBOX0_SIZE 0x01d1
+#define regDMCUB_INBOX0_SIZE_BASE_IDX 2
+#define regDMCUB_INBOX0_WPTR 0x01d2
+#define regDMCUB_INBOX0_WPTR_BASE_IDX 2
+#define regDMCUB_INBOX0_RPTR 0x01d3
+#define regDMCUB_INBOX0_RPTR_BASE_IDX 2
+#define regDMCUB_INBOX1_BASE_ADDRESS 0x01d4
+#define regDMCUB_INBOX1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_INBOX1_SIZE 0x01d5
+#define regDMCUB_INBOX1_SIZE_BASE_IDX 2
+#define regDMCUB_INBOX1_WPTR 0x01d6
+#define regDMCUB_INBOX1_WPTR_BASE_IDX 2
+#define regDMCUB_INBOX1_RPTR 0x01d7
+#define regDMCUB_INBOX1_RPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX0_BASE_ADDRESS 0x01d8
+#define regDMCUB_OUTBOX0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_OUTBOX0_SIZE 0x01d9
+#define regDMCUB_OUTBOX0_SIZE_BASE_IDX 2
+#define regDMCUB_OUTBOX0_WPTR 0x01da
+#define regDMCUB_OUTBOX0_WPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX0_RPTR 0x01db
+#define regDMCUB_OUTBOX0_RPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX1_BASE_ADDRESS 0x01dc
+#define regDMCUB_OUTBOX1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_OUTBOX1_SIZE 0x01dd
+#define regDMCUB_OUTBOX1_SIZE_BASE_IDX 2
+#define regDMCUB_OUTBOX1_WPTR 0x01de
+#define regDMCUB_OUTBOX1_WPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX1_RPTR 0x01df
+#define regDMCUB_OUTBOX1_RPTR_BASE_IDX 2
+#define regDMCUB_TIMER_TRIGGER0 0x01e0
+#define regDMCUB_TIMER_TRIGGER0_BASE_IDX 2
+#define regDMCUB_TIMER_TRIGGER1 0x01e1
+#define regDMCUB_TIMER_TRIGGER1_BASE_IDX 2
+#define regDMCUB_TIMER_WINDOW 0x01e2
+#define regDMCUB_TIMER_WINDOW_BASE_IDX 2
+#define regDMCUB_SCRATCH0 0x01e3
+#define regDMCUB_SCRATCH0_BASE_IDX 2
+#define regDMCUB_SCRATCH1 0x01e4
+#define regDMCUB_SCRATCH1_BASE_IDX 2
+#define regDMCUB_SCRATCH2 0x01e5
+#define regDMCUB_SCRATCH2_BASE_IDX 2
+#define regDMCUB_SCRATCH3 0x01e6
+#define regDMCUB_SCRATCH3_BASE_IDX 2
+#define regDMCUB_SCRATCH4 0x01e7
+#define regDMCUB_SCRATCH4_BASE_IDX 2
+#define regDMCUB_SCRATCH5 0x01e8
+#define regDMCUB_SCRATCH5_BASE_IDX 2
+#define regDMCUB_SCRATCH6 0x01e9
+#define regDMCUB_SCRATCH6_BASE_IDX 2
+#define regDMCUB_SCRATCH7 0x01ea
+#define regDMCUB_SCRATCH7_BASE_IDX 2
+#define regDMCUB_SCRATCH8 0x01eb
+#define regDMCUB_SCRATCH8_BASE_IDX 2
+#define regDMCUB_SCRATCH9 0x01ec
+#define regDMCUB_SCRATCH9_BASE_IDX 2
+#define regDMCUB_SCRATCH10 0x01ed
+#define regDMCUB_SCRATCH10_BASE_IDX 2
+#define regDMCUB_SCRATCH11 0x01ee
+#define regDMCUB_SCRATCH11_BASE_IDX 2
+#define regDMCUB_SCRATCH12 0x01ef
+#define regDMCUB_SCRATCH12_BASE_IDX 2
+#define regDMCUB_SCRATCH13 0x01f0
+#define regDMCUB_SCRATCH13_BASE_IDX 2
+#define regDMCUB_SCRATCH14 0x01f1
+#define regDMCUB_SCRATCH14_BASE_IDX 2
+#define regDMCUB_SCRATCH15 0x01f2
+#define regDMCUB_SCRATCH15_BASE_IDX 2
+#define regDMCUB_SCRATCH16 0x01f3
+#define regDMCUB_SCRATCH16_BASE_IDX 2
+#define regDMCUB_SCRATCH17 0x01f4
+#define regDMCUB_SCRATCH17_BASE_IDX 2
+#define regDMCUB_SCRATCH18 0x01f5
+#define regDMCUB_SCRATCH18_BASE_IDX 2
+#define regDMCUB_CNTL 0x01f6
+#define regDMCUB_CNTL_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN0 0x01f7
+#define regDMCUB_GPINT_DATAIN0_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN1 0x01f8
+#define regDMCUB_GPINT_DATAIN1_BASE_IDX 2
+#define regDMCUB_GPINT_DATAOUT 0x01f9
+#define regDMCUB_GPINT_DATAOUT_BASE_IDX 2
+#define regDMCUB_UNDEFINED_ADDRESS_FAULT_ADDR 0x01fa
+#define regDMCUB_UNDEFINED_ADDRESS_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_LS_WAKE_INT_ENABLE 0x01fb
+#define regDMCUB_LS_WAKE_INT_ENABLE_BASE_IDX 2
+#define regDMCUB_MEM_PWR_CNTL 0x01fc
+#define regDMCUB_MEM_PWR_CNTL_BASE_IDX 2
+#define regDMCUB_TIMER_CURRENT 0x01fd
+#define regDMCUB_TIMER_CURRENT_BASE_IDX 2
+#define regDMCUB_PROC_ID 0x01ff
+#define regDMCUB_PROC_ID_BASE_IDX 2
+#define regDMCUB_CNTL2 0x0200
+#define regDMCUB_CNTL2_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN2 0x0215
+#define regDMCUB_GPINT_DATAIN2_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN3 0x0216
+#define regDMCUB_GPINT_DATAIN3_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN4 0x0217
+#define regDMCUB_GPINT_DATAIN4_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN5 0x0218
+#define regDMCUB_GPINT_DATAIN5_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN6 0x0219
+#define regDMCUB_GPINT_DATAIN6_BASE_IDX 2
+#define regDMCUB_REGION3_TMR_AXI_SPACE 0x021a
+#define regDMCUB_REGION3_TMR_AXI_SPACE_BASE_IDX 2
+#define regDMCUB_SCRATCH19 0x022e
+#define regDMCUB_SCRATCH19_BASE_IDX 2
+#define regDMCUB_SCRATCH20 0x022f
+#define regDMCUB_SCRATCH20_BASE_IDX 2
+#define regDMCUB_SCRATCH21 0x0230
+#define regDMCUB_SCRATCH21_BASE_IDX 2
+#define regDMCUB_SCRATCH22 0x0231
+#define regDMCUB_SCRATCH22_BASE_IDX 2
+#define regDMCUB_SCRATCH23 0x0232
+#define regDMCUB_SCRATCH23_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwb_top_dispdec
+// base address: 0x0
+#define regDWB_ENABLE_CLK_CTRL 0x3228
+#define regDWB_ENABLE_CLK_CTRL_BASE_IDX 2
+#define regDWB_MEM_PWR_CTRL 0x3229
+#define regDWB_MEM_PWR_CTRL_BASE_IDX 2
+#define regFC_MODE_CTRL 0x322a
+#define regFC_MODE_CTRL_BASE_IDX 2
+#define regFC_FLOW_CTRL 0x322b
+#define regFC_FLOW_CTRL_BASE_IDX 2
+#define regFC_WINDOW_START 0x322c
+#define regFC_WINDOW_START_BASE_IDX 2
+#define regFC_WINDOW_SIZE 0x322d
+#define regFC_WINDOW_SIZE_BASE_IDX 2
+#define regFC_SOURCE_SIZE 0x322e
+#define regFC_SOURCE_SIZE_BASE_IDX 2
+#define regDWB_UPDATE_CTRL 0x322f
+#define regDWB_UPDATE_CTRL_BASE_IDX 2
+#define regDWB_CRC_CTRL 0x3230
+#define regDWB_CRC_CTRL_BASE_IDX 2
+#define regDWB_CRC_MASK_R_G 0x3231
+#define regDWB_CRC_MASK_R_G_BASE_IDX 2
+#define regDWB_CRC_MASK_B_A 0x3232
+#define regDWB_CRC_MASK_B_A_BASE_IDX 2
+#define regDWB_CRC_VAL_R_G 0x3233
+#define regDWB_CRC_VAL_R_G_BASE_IDX 2
+#define regDWB_CRC_VAL_B_A 0x3234
+#define regDWB_CRC_VAL_B_A_BASE_IDX 2
+#define regDWB_OUT_CTRL 0x3235
+#define regDWB_OUT_CTRL_BASE_IDX 2
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_EN 0x3236
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_EN_BASE_IDX 2
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT 0x3237
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_BASE_IDX 2
+#define regDWB_HOST_READ_CONTROL 0x3238
+#define regDWB_HOST_READ_CONTROL_BASE_IDX 2
+#define regDWB_OVERFLOW_STATUS 0x3239
+#define regDWB_OVERFLOW_STATUS_BASE_IDX 2
+#define regDWB_OVERFLOW_COUNTER 0x323a
+#define regDWB_OVERFLOW_COUNTER_BASE_IDX 2
+#define regDWB_SOFT_RESET 0x323b
+#define regDWB_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwbcp_dispdec
+// base address: 0x0
+#define regDWB_HDR_MULT_COEF 0x3294
+#define regDWB_HDR_MULT_COEF_BASE_IDX 2
+#define regDWB_GAMUT_REMAP_MODE 0x3295
+#define regDWB_GAMUT_REMAP_MODE_BASE_IDX 2
+#define regDWB_GAMUT_REMAP_COEF_FORMAT 0x3296
+#define regDWB_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C11_C12 0x3297
+#define regDWB_GAMUT_REMAPA_C11_C12_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C13_C14 0x3298
+#define regDWB_GAMUT_REMAPA_C13_C14_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C21_C22 0x3299
+#define regDWB_GAMUT_REMAPA_C21_C22_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C23_C24 0x329a
+#define regDWB_GAMUT_REMAPA_C23_C24_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C31_C32 0x329b
+#define regDWB_GAMUT_REMAPA_C31_C32_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C33_C34 0x329c
+#define regDWB_GAMUT_REMAPA_C33_C34_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C11_C12 0x329d
+#define regDWB_GAMUT_REMAPB_C11_C12_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C13_C14 0x329e
+#define regDWB_GAMUT_REMAPB_C13_C14_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C21_C22 0x329f
+#define regDWB_GAMUT_REMAPB_C21_C22_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C23_C24 0x32a0
+#define regDWB_GAMUT_REMAPB_C23_C24_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C31_C32 0x32a1
+#define regDWB_GAMUT_REMAPB_C31_C32_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C33_C34 0x32a2
+#define regDWB_GAMUT_REMAPB_C33_C34_BASE_IDX 2
+#define regDWB_OGAM_CONTROL 0x32a3
+#define regDWB_OGAM_CONTROL_BASE_IDX 2
+#define regDWB_OGAM_LUT_INDEX 0x32a4
+#define regDWB_OGAM_LUT_INDEX_BASE_IDX 2
+#define regDWB_OGAM_LUT_DATA 0x32a5
+#define regDWB_OGAM_LUT_DATA_BASE_IDX 2
+#define regDWB_OGAM_LUT_CONTROL 0x32a6
+#define regDWB_OGAM_LUT_CONTROL_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_B 0x32a7
+#define regDWB_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_G 0x32a8
+#define regDWB_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_R 0x32a9
+#define regDWB_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_B 0x32aa
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_B 0x32ab
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_G 0x32ac
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_G 0x32ad
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_R 0x32ae
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_R 0x32af
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_B 0x32b0
+#define regDWB_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_B 0x32b1
+#define regDWB_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_G 0x32b2
+#define regDWB_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_G 0x32b3
+#define regDWB_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_R 0x32b4
+#define regDWB_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_R 0x32b5
+#define regDWB_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_B 0x32b6
+#define regDWB_OGAM_RAMA_OFFSET_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_G 0x32b7
+#define regDWB_OGAM_RAMA_OFFSET_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_R 0x32b8
+#define regDWB_OGAM_RAMA_OFFSET_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_0_1 0x32b9
+#define regDWB_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_2_3 0x32ba
+#define regDWB_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_4_5 0x32bb
+#define regDWB_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_6_7 0x32bc
+#define regDWB_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_8_9 0x32bd
+#define regDWB_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_10_11 0x32be
+#define regDWB_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_12_13 0x32bf
+#define regDWB_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_14_15 0x32c0
+#define regDWB_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_16_17 0x32c1
+#define regDWB_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_18_19 0x32c2
+#define regDWB_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_20_21 0x32c3
+#define regDWB_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_22_23 0x32c4
+#define regDWB_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_24_25 0x32c5
+#define regDWB_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_26_27 0x32c6
+#define regDWB_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_28_29 0x32c7
+#define regDWB_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_30_31 0x32c8
+#define regDWB_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_32_33 0x32c9
+#define regDWB_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_B 0x32ca
+#define regDWB_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_G 0x32cb
+#define regDWB_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_R 0x32cc
+#define regDWB_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_B 0x32cd
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_B 0x32ce
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_G 0x32cf
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_G 0x32d0
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_R 0x32d1
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_R 0x32d2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_B 0x32d3
+#define regDWB_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_B 0x32d4
+#define regDWB_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_G 0x32d5
+#define regDWB_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_G 0x32d6
+#define regDWB_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_R 0x32d7
+#define regDWB_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_R 0x32d8
+#define regDWB_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_B 0x32d9
+#define regDWB_OGAM_RAMB_OFFSET_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_G 0x32da
+#define regDWB_OGAM_RAMB_OFFSET_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_R 0x32db
+#define regDWB_OGAM_RAMB_OFFSET_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_0_1 0x32dc
+#define regDWB_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_2_3 0x32dd
+#define regDWB_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_4_5 0x32de
+#define regDWB_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_6_7 0x32df
+#define regDWB_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_8_9 0x32e0
+#define regDWB_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_10_11 0x32e1
+#define regDWB_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_12_13 0x32e2
+#define regDWB_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_14_15 0x32e3
+#define regDWB_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_16_17 0x32e4
+#define regDWB_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_18_19 0x32e5
+#define regDWB_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_20_21 0x32e6
+#define regDWB_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_22_23 0x32e7
+#define regDWB_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_24_25 0x32e8
+#define regDWB_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_26_27 0x32e9
+#define regDWB_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_28_29 0x32ea
+#define regDWB_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_30_31 0x32eb
+#define regDWB_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_32_33 0x32ec
+#define regDWB_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_wb_dcperfmon_dc_perfmon_dispdec
+// base address: 0xca20
+#define regDC_PERFMON3_PERFCOUNTER_CNTL 0x3288
+#define regDC_PERFMON3_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON3_PERFCOUNTER_CNTL2 0x3289
+#define regDC_PERFMON3_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON3_PERFCOUNTER_STATE 0x328a
+#define regDC_PERFMON3_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CNTL 0x328b
+#define regDC_PERFMON3_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CNTL2 0x328c
+#define regDC_PERFMON3_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CVALUE_INT_MISC 0x328d
+#define regDC_PERFMON3_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CVALUE_LOW 0x328e
+#define regDC_PERFMON3_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_HI 0x328f
+#define regDC_PERFMON3_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_LOW 0x3290
+#define regDC_PERFMON3_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb0_dispdec
+// base address: 0x0
+#define regMCIF_WB_BUFMGR_SW_CONTROL 0x0272
+#define regMCIF_WB_BUFMGR_SW_CONTROL_BASE_IDX 2
+#define regMCIF_WB_BUFMGR_STATUS 0x0274
+#define regMCIF_WB_BUFMGR_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_PITCH 0x0275
+#define regMCIF_WB_BUF_PITCH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_STATUS 0x0276
+#define regMCIF_WB_BUF_1_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_1_STATUS2 0x0277
+#define regMCIF_WB_BUF_1_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_2_STATUS 0x0278
+#define regMCIF_WB_BUF_2_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_2_STATUS2 0x0279
+#define regMCIF_WB_BUF_2_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_3_STATUS 0x027a
+#define regMCIF_WB_BUF_3_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_3_STATUS2 0x027b
+#define regMCIF_WB_BUF_3_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_4_STATUS 0x027c
+#define regMCIF_WB_BUF_4_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_4_STATUS2 0x027d
+#define regMCIF_WB_BUF_4_STATUS2_BASE_IDX 2
+#define regMCIF_WB_ARBITRATION_CONTROL 0x027e
+#define regMCIF_WB_ARBITRATION_CONTROL_BASE_IDX 2
+#define regMCIF_WB_SCLK_CHANGE 0x027f
+#define regMCIF_WB_SCLK_CHANGE_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_Y 0x0282
+#define regMCIF_WB_BUF_1_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_C 0x0284
+#define regMCIF_WB_BUF_1_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_Y 0x0286
+#define regMCIF_WB_BUF_2_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_C 0x0288
+#define regMCIF_WB_BUF_2_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_Y 0x028a
+#define regMCIF_WB_BUF_3_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_C 0x028c
+#define regMCIF_WB_BUF_3_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_Y 0x028e
+#define regMCIF_WB_BUF_4_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_C 0x0290
+#define regMCIF_WB_BUF_4_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUFMGR_VCE_CONTROL 0x0292
+#define regMCIF_WB_BUFMGR_VCE_CONTROL_BASE_IDX 2
+#define regMCIF_WB_NB_PSTATE_CONTROL 0x0293
+#define regMCIF_WB_NB_PSTATE_CONTROL_BASE_IDX 2
+#define regMCIF_WB_CLOCK_GATER_CONTROL 0x0294
+#define regMCIF_WB_CLOCK_GATER_CONTROL_BASE_IDX 2
+#define regMCIF_WB_SELF_REFRESH_CONTROL 0x0296
+#define regMCIF_WB_SELF_REFRESH_CONTROL_BASE_IDX 2
+#define regMULTI_LEVEL_QOS_CTRL 0x0297
+#define regMULTI_LEVEL_QOS_CTRL_BASE_IDX 2
+#define regMCIF_WB_SECURITY_LEVEL 0x0298
+#define regMCIF_WB_SECURITY_LEVEL_BASE_IDX 2
+#define regMCIF_WB_BUF_LUMA_SIZE 0x0299
+#define regMCIF_WB_BUF_LUMA_SIZE_BASE_IDX 2
+#define regMCIF_WB_BUF_CHROMA_SIZE 0x029a
+#define regMCIF_WB_BUF_CHROMA_SIZE_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_Y_HIGH 0x029b
+#define regMCIF_WB_BUF_1_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_C_HIGH 0x029c
+#define regMCIF_WB_BUF_1_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_Y_HIGH 0x029d
+#define regMCIF_WB_BUF_2_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_C_HIGH 0x029e
+#define regMCIF_WB_BUF_2_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_Y_HIGH 0x029f
+#define regMCIF_WB_BUF_3_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_C_HIGH 0x02a0
+#define regMCIF_WB_BUF_3_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_Y_HIGH 0x02a1
+#define regMCIF_WB_BUF_4_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_C_HIGH 0x02a2
+#define regMCIF_WB_BUF_4_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_RESOLUTION 0x02a3
+#define regMCIF_WB_BUF_1_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_2_RESOLUTION 0x02a4
+#define regMCIF_WB_BUF_2_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_3_RESOLUTION 0x02a5
+#define regMCIF_WB_BUF_3_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_4_RESOLUTION 0x02a6
+#define regMCIF_WB_BUF_4_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_PSTATE_CHANGE_DURATION_VBI 0x02a7
+#define regMCIF_WB_PSTATE_CHANGE_DURATION_VBI_BASE_IDX 2
+#define regMCIF_WB_VMID_CONTROL 0x02a8
+#define regMCIF_WB_VMID_CONTROL_BASE_IDX 2
+#define regMCIF_WB_MIN_TTO 0x02a9
+#define regMCIF_WB_MIN_TTO_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dcperfmon_dc_perfmon_dispdec
+// base address: 0xd48
+#define regDC_PERFMON4_PERFCOUNTER_CNTL 0x0352
+#define regDC_PERFMON4_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON4_PERFCOUNTER_CNTL2 0x0353
+#define regDC_PERFMON4_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON4_PERFCOUNTER_STATE 0x0354
+#define regDC_PERFMON4_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CNTL 0x0355
+#define regDC_PERFMON4_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CNTL2 0x0356
+#define regDC_PERFMON4_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CVALUE_INT_MISC 0x0357
+#define regDC_PERFMON4_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CVALUE_LOW 0x0358
+#define regDC_PERFMON4_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_HI 0x0359
+#define regDC_PERFMON4_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_LOW 0x035a
+#define regDC_PERFMON4_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dispdec
+// base address: 0x0
+#define regMCIF_WB_NB_PSTATE_LATENCY_WATERMARK 0x02aa
+#define regMCIF_WB_NB_PSTATE_LATENCY_WATERMARK_BASE_IDX 2
+#define regMCIF_WB_WATERMARK 0x02ab
+#define regMCIF_WB_WATERMARK_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_CONFIG 0x02ac
+#define regMMHUBBUB_WARMUP_CONFIG_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_CONTROL_STATUS 0x02ad
+#define regMMHUBBUB_WARMUP_CONTROL_STATUS_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_BASE_ADDR_LOW 0x02ae
+#define regMMHUBBUB_WARMUP_BASE_ADDR_LOW_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_BASE_ADDR_HIGH 0x02af
+#define regMMHUBBUB_WARMUP_BASE_ADDR_HIGH_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_ADDR_REGION 0x02b0
+#define regMMHUBBUB_WARMUP_ADDR_REGION_BASE_IDX 2
+#define regMMHUBBUB_MIN_TTO 0x02b1
+#define regMMHUBBUB_MIN_TTO_BASE_IDX 2
+#define regMMHUBBUB_CTRL 0x0333
+#define regMMHUBBUB_CTRL_BASE_IDX 2
+#define regWBIF_SMU_WM_CONTROL 0x0334
+#define regWBIF_SMU_WM_CONTROL_BASE_IDX 2
+#define regWBIF0_MISC_CTRL 0x0335
+#define regWBIF0_MISC_CTRL_BASE_IDX 2
+#define regWBIF0_PHASE0_OUTSTANDING_COUNTER 0x0336
+#define regWBIF0_PHASE0_OUTSTANDING_COUNTER_BASE_IDX 2
+#define regWBIF0_PHASE1_OUTSTANDING_COUNTER 0x0337
+#define regWBIF0_PHASE1_OUTSTANDING_COUNTER_BASE_IDX 2
+#define regMMHUBBUB_MEM_PWR_STATUS 0x033e
+#define regMMHUBBUB_MEM_PWR_STATUS_BASE_IDX 2
+#define regMMHUBBUB_MEM_PWR_CNTL 0x033f
+#define regMMHUBBUB_MEM_PWR_CNTL_BASE_IDX 2
+#define regMMHUBBUB_CLOCK_CNTL 0x0340
+#define regMMHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define regMMHUBBUB_SOFT_RESET 0x0341
+#define regMMHUBBUB_SOFT_RESET_BASE_IDX 2
+#define regDMU_IF_ERR_STATUS 0x0345
+#define regDMU_IF_ERR_STATUS_BASE_IDX 2
+#define regMMHUBBUB_CLIENT_UNIT_ID 0x0346
+#define regMMHUBBUB_CLIENT_UNIT_ID_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_VMID_CONTROL 0x0348
+#define regMMHUBBUB_WARMUP_VMID_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+// base address: 0x0
+#define regAZALIA_CONTROLLER_CLOCK_GATING 0x03c2
+#define regAZALIA_CONTROLLER_CLOCK_GATING_BASE_IDX 2
+#define regAZALIA_AUDIO_DTO 0x03c3
+#define regAZALIA_AUDIO_DTO_BASE_IDX 2
+#define regAZALIA_AUDIO_DTO_CONTROL 0x03c4
+#define regAZALIA_AUDIO_DTO_CONTROL_BASE_IDX 2
+#define regAZALIA_SOCCLK_CONTROL 0x03c5
+#define regAZALIA_SOCCLK_CONTROL_BASE_IDX 2
+#define regAZALIA_UNDERFLOW_FILLER_SAMPLE 0x03c6
+#define regAZALIA_UNDERFLOW_FILLER_SAMPLE_BASE_IDX 2
+#define regAZALIA_DATA_DMA_CONTROL 0x03c7
+#define regAZALIA_DATA_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_BDL_DMA_CONTROL 0x03c8
+#define regAZALIA_BDL_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_RIRB_AND_DP_CONTROL 0x03c9
+#define regAZALIA_RIRB_AND_DP_CONTROL_BASE_IDX 2
+#define regAZALIA_CORB_DMA_CONTROL 0x03ca
+#define regAZALIA_CORB_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_GLOBAL_CAPABILITIES 0x03d3
+#define regAZALIA_GLOBAL_CAPABILITIES_BASE_IDX 2
+#define regAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x03d4
+#define regAZALIA_OUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define regAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x03d5
+#define regAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_BASE_IDX 2
+#define regAZALIA_INPUT_PAYLOAD_CAPABILITY 0x03d6
+#define regAZALIA_INPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL0 0x03d9
+#define regAZALIA_INPUT_CRC0_CONTROL0_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL1 0x03da
+#define regAZALIA_INPUT_CRC0_CONTROL1_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL2 0x03db
+#define regAZALIA_INPUT_CRC0_CONTROL2_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL3 0x03dc
+#define regAZALIA_INPUT_CRC0_CONTROL3_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_RESULT 0x03dd
+#define regAZALIA_INPUT_CRC0_RESULT_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL0 0x03de
+#define regAZALIA_INPUT_CRC1_CONTROL0_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL1 0x03df
+#define regAZALIA_INPUT_CRC1_CONTROL1_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL2 0x03e0
+#define regAZALIA_INPUT_CRC1_CONTROL2_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL3 0x03e1
+#define regAZALIA_INPUT_CRC1_CONTROL3_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_RESULT 0x03e2
+#define regAZALIA_INPUT_CRC1_RESULT_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL0 0x03e3
+#define regAZALIA_CRC0_CONTROL0_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL1 0x03e4
+#define regAZALIA_CRC0_CONTROL1_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL2 0x03e5
+#define regAZALIA_CRC0_CONTROL2_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL3 0x03e6
+#define regAZALIA_CRC0_CONTROL3_BASE_IDX 2
+#define regAZALIA_CRC0_RESULT 0x03e7
+#define regAZALIA_CRC0_RESULT_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL0 0x03e8
+#define regAZALIA_CRC1_CONTROL0_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL1 0x03e9
+#define regAZALIA_CRC1_CONTROL1_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL2 0x03ea
+#define regAZALIA_CRC1_CONTROL2_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL3 0x03eb
+#define regAZALIA_CRC1_CONTROL3_BASE_IDX 2
+#define regAZALIA_CRC1_RESULT 0x03ec
+#define regAZALIA_CRC1_RESULT_BASE_IDX 2
+#define regAZALIA_MEM_PWR_CTRL 0x03ee
+#define regAZALIA_MEM_PWR_CTRL_BASE_IDX 2
+#define regAZALIA_MEM_PWR_STATUS 0x03ef
+#define regAZALIA_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+// base address: 0x0
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0406
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x0407
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x0408
+#define regAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_BASE_IDX 2
+#define regAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x0409
+#define regAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x040a
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x040b
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x040c
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x040d
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x040e
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x040f
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x0410
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x0411
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_BASE_IDX 2
+#define regCC_RCU_DC_AUDIO_PORT_CONNECTIVITY 0x0412
+#define regCC_RCU_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define regCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x0413
+#define regCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET0 0x0415
+#define regAZALIA_F0_GTC_GROUP_OFFSET0_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET1 0x0416
+#define regAZALIA_F0_GTC_GROUP_OFFSET1_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET2 0x0417
+#define regAZALIA_F0_GTC_GROUP_OFFSET2_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET3 0x0418
+#define regAZALIA_F0_GTC_GROUP_OFFSET3_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET4 0x0419
+#define regAZALIA_F0_GTC_GROUP_OFFSET4_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET5 0x041a
+#define regAZALIA_F0_GTC_GROUP_OFFSET5_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET6 0x041b
+#define regAZALIA_F0_GTC_GROUP_OFFSET6_BASE_IDX 2
+#define regREG_DC_AUDIO_PORT_CONNECTIVITY 0x041c
+#define regREG_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define regREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x041d
+#define regREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+// base address: 0x0
+#define regAZ_CLOCK_CNTL 0x0372
+#define regAZ_CLOCK_CNTL_BASE_IDX 2
+#define regAZ_MEM_GLOBAL_PWR_REQ_CNTL 0x0373
+#define regAZ_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_az_dcperfmon_dc_perfmon_dispdec
+// base address: 0xde8
+#define regDC_PERFMON5_PERFCOUNTER_CNTL 0x037a
+#define regDC_PERFMON5_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON5_PERFCOUNTER_CNTL2 0x037b
+#define regDC_PERFMON5_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON5_PERFCOUNTER_STATE 0x037c
+#define regDC_PERFMON5_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CNTL 0x037d
+#define regDC_PERFMON5_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CNTL2 0x037e
+#define regDC_PERFMON5_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CVALUE_INT_MISC 0x037f
+#define regDC_PERFMON5_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CVALUE_LOW 0x0380
+#define regDC_PERFMON5_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_HI 0x0381
+#define regDC_PERFMON5_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_LOW 0x0382
+#define regDC_PERFMON5_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream0_dispdec
+// base address: 0x0
+#define regAZF0STREAM0_AZALIA_STREAM_INDEX 0x035e
+#define regAZF0STREAM0_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM0_AZALIA_STREAM_DATA 0x035f
+#define regAZF0STREAM0_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream1_dispdec
+// base address: 0x8
+#define regAZF0STREAM1_AZALIA_STREAM_INDEX 0x0360
+#define regAZF0STREAM1_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM1_AZALIA_STREAM_DATA 0x0361
+#define regAZF0STREAM1_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream2_dispdec
+// base address: 0x10
+#define regAZF0STREAM2_AZALIA_STREAM_INDEX 0x0362
+#define regAZF0STREAM2_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM2_AZALIA_STREAM_DATA 0x0363
+#define regAZF0STREAM2_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream3_dispdec
+// base address: 0x18
+#define regAZF0STREAM3_AZALIA_STREAM_INDEX 0x0364
+#define regAZF0STREAM3_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM3_AZALIA_STREAM_DATA 0x0365
+#define regAZF0STREAM3_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream4_dispdec
+// base address: 0x20
+#define regAZF0STREAM4_AZALIA_STREAM_INDEX 0x0366
+#define regAZF0STREAM4_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM4_AZALIA_STREAM_DATA 0x0367
+#define regAZF0STREAM4_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream5_dispdec
+// base address: 0x28
+#define regAZF0STREAM5_AZALIA_STREAM_INDEX 0x0368
+#define regAZF0STREAM5_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM5_AZALIA_STREAM_DATA 0x0369
+#define regAZF0STREAM5_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream6_dispdec
+// base address: 0x30
+#define regAZF0STREAM6_AZALIA_STREAM_INDEX 0x036a
+#define regAZF0STREAM6_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM6_AZALIA_STREAM_DATA 0x036b
+#define regAZF0STREAM6_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream7_dispdec
+// base address: 0x38
+#define regAZF0STREAM7_AZALIA_STREAM_INDEX 0x036c
+#define regAZF0STREAM7_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM7_AZALIA_STREAM_DATA 0x036d
+#define regAZF0STREAM7_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream8_dispdec
+// base address: 0x320
+#define regAZF0STREAM8_AZALIA_STREAM_INDEX 0x0426
+#define regAZF0STREAM8_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM8_AZALIA_STREAM_DATA 0x0427
+#define regAZF0STREAM8_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream9_dispdec
+// base address: 0x328
+#define regAZF0STREAM9_AZALIA_STREAM_INDEX 0x0428
+#define regAZF0STREAM9_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM9_AZALIA_STREAM_DATA 0x0429
+#define regAZF0STREAM9_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream10_dispdec
+// base address: 0x330
+#define regAZF0STREAM10_AZALIA_STREAM_INDEX 0x042a
+#define regAZF0STREAM10_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM10_AZALIA_STREAM_DATA 0x042b
+#define regAZF0STREAM10_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream11_dispdec
+// base address: 0x338
+#define regAZF0STREAM11_AZALIA_STREAM_INDEX 0x042c
+#define regAZF0STREAM11_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM11_AZALIA_STREAM_DATA 0x042d
+#define regAZF0STREAM11_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream12_dispdec
+// base address: 0x340
+#define regAZF0STREAM12_AZALIA_STREAM_INDEX 0x042e
+#define regAZF0STREAM12_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM12_AZALIA_STREAM_DATA 0x042f
+#define regAZF0STREAM12_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream13_dispdec
+// base address: 0x348
+#define regAZF0STREAM13_AZALIA_STREAM_INDEX 0x0430
+#define regAZF0STREAM13_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM13_AZALIA_STREAM_DATA 0x0431
+#define regAZF0STREAM13_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream14_dispdec
+// base address: 0x350
+#define regAZF0STREAM14_AZALIA_STREAM_INDEX 0x0432
+#define regAZF0STREAM14_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM14_AZALIA_STREAM_DATA 0x0433
+#define regAZF0STREAM14_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream15_dispdec
+// base address: 0x358
+#define regAZF0STREAM15_AZALIA_STREAM_INDEX 0x0434
+#define regAZF0STREAM15_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM15_AZALIA_STREAM_DATA 0x0435
+#define regAZF0STREAM15_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+// base address: 0x0
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0386
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0387
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+// base address: 0x18
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x038c
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x038d
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint2_dispdec
+// base address: 0x30
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0392
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0393
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint3_dispdec
+// base address: 0x48
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0398
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0399
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint4_dispdec
+// base address: 0x60
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x039e
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA 0x039f
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint5_dispdec
+// base address: 0x78
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03a4
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03a5
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint6_dispdec
+// base address: 0x90
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03aa
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03ab
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint7_dispdec
+// base address: 0xa8
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03b0
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03b1
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+// base address: 0x0
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043a
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043b
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+// base address: 0x10
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043e
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043f
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+// base address: 0x20
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0442
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0443
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint3_dispdec
+// base address: 0x30
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0446
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0447
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint4_dispdec
+// base address: 0x40
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x044a
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x044b
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint5_dispdec
+// base address: 0x50
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x044e
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x044f
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint6_dispdec
+// base address: 0x60
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0452
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0453
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint7_dispdec
+// base address: 0x70
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0456
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0457
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_dispdec
+// base address: 0x0
+#define regDCHUBBUB_ARB_DF_REQ_OUTSTAND 0x04f9
+#define regDCHUBBUB_ARB_DF_REQ_OUTSTAND_BASE_IDX 2
+#define regDCHUBBUB_ARB_SAT_LEVEL 0x04fa
+#define regDCHUBBUB_ARB_SAT_LEVEL_BASE_IDX 2
+#define regDCHUBBUB_ARB_QOS_FORCE 0x04fb
+#define regDCHUBBUB_ARB_QOS_FORCE_BASE_IDX 2
+#define regDCHUBBUB_ARB_DRAM_STATE_CNTL 0x04fc
+#define regDCHUBBUB_ARB_DRAM_STATE_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_CNTL 0x04fd
+#define regDCHUBBUB_ARB_USR_RETRAINING_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A 0x04fe
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A 0x04ff
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A 0x0500
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A 0x0501
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A 0x0502
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A 0x0503
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A 0x0504
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A 0x0505
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A 0x0506
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_A 0x0507
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_A 0x0508
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B 0x0509
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B 0x050a
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B 0x050b
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B 0x050c
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B 0x050d
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B 0x050e
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B 0x050f
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B 0x0510
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B 0x0511
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_B 0x0512
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_B 0x0513
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C 0x0514
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C 0x0515
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C 0x0516
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C 0x0517
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C 0x0518
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C 0x0519
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C 0x051a
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C 0x051b
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C 0x051c
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_C 0x051d
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_C 0x051e
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D 0x051f
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D 0x0520
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D 0x0521
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D 0x0522
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D 0x0523
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D 0x0524
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D 0x0525
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D 0x0526
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D 0x0527
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_D 0x0528
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_D 0x0529
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_HOSTVM_CNTL 0x052a
+#define regDCHUBBUB_ARB_HOSTVM_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL 0x052b
+#define regDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_MALL_CNTL 0x052c
+#define regDCHUBBUB_ARB_MALL_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_TIMEOUT_ENABLE 0x052d
+#define regDCHUBBUB_ARB_TIMEOUT_ENABLE_BASE_IDX 2
+#define regDCHUBBUB_GLOBAL_TIMER_CNTL 0x052e
+#define regDCHUBBUB_GLOBAL_TIMER_CNTL_BASE_IDX 2
+#define regSURFACE_CHECK0_ADDRESS_LSB 0x052f
+#define regSURFACE_CHECK0_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK0_ADDRESS_MSB 0x0530
+#define regSURFACE_CHECK0_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK1_ADDRESS_LSB 0x0531
+#define regSURFACE_CHECK1_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK1_ADDRESS_MSB 0x0532
+#define regSURFACE_CHECK1_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK2_ADDRESS_LSB 0x0533
+#define regSURFACE_CHECK2_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK2_ADDRESS_MSB 0x0534
+#define regSURFACE_CHECK2_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK3_ADDRESS_LSB 0x0535
+#define regSURFACE_CHECK3_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK3_ADDRESS_MSB 0x0536
+#define regSURFACE_CHECK3_ADDRESS_MSB_BASE_IDX 2
+#define regVTG0_CONTROL 0x0537
+#define regVTG0_CONTROL_BASE_IDX 2
+#define regVTG1_CONTROL 0x0538
+#define regVTG1_CONTROL_BASE_IDX 2
+#define regVTG2_CONTROL 0x0539
+#define regVTG2_CONTROL_BASE_IDX 2
+#define regVTG3_CONTROL 0x053a
+#define regVTG3_CONTROL_BASE_IDX 2
+#define regDCHUBBUB_SOFT_RESET 0x053b
+#define regDCHUBBUB_SOFT_RESET_BASE_IDX 2
+#define regDCHUBBUB_CLOCK_CNTL 0x053c
+#define regDCHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define regDCFCLK_CNTL 0x053d
+#define regDCFCLK_CNTL_BASE_IDX 2
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL 0x053e
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL_BASE_IDX 2
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2 0x053f
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2_BASE_IDX 2
+#define regDCHUBBUB_VLINE_SNAPSHOT 0x0540
+#define regDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
+#define regDCHUBBUB_CTRL_STATUS 0x0541
+#define regDCHUBBUB_CTRL_STATUS_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL1 0x0547
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL1_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL2 0x0548
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL2_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS 0x0549
+#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
+#define regFMON_CTRL 0x054a
+#define regFMON_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec
+// base address: 0x0
+#define regDCHUBBUB_SDPIF_CFG0 0x046f
+#define regDCHUBBUB_SDPIF_CFG0_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_CFG1 0x0470
+#define regDCHUBBUB_SDPIF_CFG1_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_CFG2 0x0471
+#define regDCHUBBUB_SDPIF_CFG2_BASE_IDX 2
+#define regVM_REQUEST_PHYSICAL 0x0472
+#define regVM_REQUEST_PHYSICAL_BASE_IDX 2
+#define regDCHUBBUB_FORCE_IO_STATUS_0 0x0473
+#define regDCHUBBUB_FORCE_IO_STATUS_0_BASE_IDX 2
+#define regDCHUBBUB_FORCE_IO_STATUS_1 0x0474
+#define regDCHUBBUB_FORCE_IO_STATUS_1_BASE_IDX 2
+#define regDCN_VM_FB_LOCATION_BASE 0x0475
+#define regDCN_VM_FB_LOCATION_BASE_BASE_IDX 2
+#define regDCN_VM_FB_LOCATION_TOP 0x0476
+#define regDCN_VM_FB_LOCATION_TOP_BASE_IDX 2
+#define regDCN_VM_FB_OFFSET 0x0477
+#define regDCN_VM_FB_OFFSET_BASE_IDX 2
+#define regDCN_VM_AGP_BOT 0x0478
+#define regDCN_VM_AGP_BOT_BASE_IDX 2
+#define regDCN_VM_AGP_TOP 0x0479
+#define regDCN_VM_AGP_TOP_BASE_IDX 2
+#define regDCN_VM_AGP_BASE 0x047a
+#define regDCN_VM_AGP_BASE_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_START 0x047b
+#define regDCN_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_END 0x047c
+#define regDCN_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x047d
+#define regDCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_SEC_LVL 0x047e
+#define regDCHUBBUB_SDPIF_PIPE_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_NOALLOC 0x047f
+#define regDCHUBBUB_SDPIF_PIPE_NOALLOC_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL 0x0480
+#define regDCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL 0x0481
+#define regDCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL 0x0482
+#define regDCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL 0x0483
+#define regDCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL_BASE_IDX 2
+#define regSDPIF_REQUEST_RATE_LIMIT 0x0484
+#define regSDPIF_REQUEST_RATE_LIMIT_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_MEM_PWR_CTRL 0x0485
+#define regDCHUBBUB_SDPIF_MEM_PWR_CTRL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_MEM_PWR_STATUS 0x0486
+#define regDCHUBBUB_SDPIF_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_ret_path_dispdec
+// base address: 0x0
+#define regDCHUBBUB_RET_PATH_MEM_PWR_CTRL 0x04af
+#define regDCHUBBUB_RET_PATH_MEM_PWR_CTRL_BASE_IDX 2
+#define regDCHUBBUB_RET_PATH_MEM_PWR_STATUS 0x04b0
+#define regDCHUBBUB_RET_PATH_MEM_PWR_STATUS_BASE_IDX 2
+#define regDCHUBBUB_CRC_CTRL 0x04b1
+#define regDCHUBBUB_CRC_CTRL_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_R 0x04b2
+#define regDCHUBBUB_CRC0_VAL_R_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_G 0x04b3
+#define regDCHUBBUB_CRC0_VAL_G_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_B 0x04b4
+#define regDCHUBBUB_CRC0_VAL_B_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_A 0x04b5
+#define regDCHUBBUB_CRC0_VAL_A_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT_CNTL 0x04b6
+#define regDCHUBBUB_DCC_STAT_CNTL_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT0 0x04b7
+#define regDCHUBBUB_DCC_STAT0_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT1 0x04b8
+#define regDCHUBBUB_DCC_STAT1_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT2 0x04b9
+#define regDCHUBBUB_DCC_STAT2_BASE_IDX 2
+#define regDCHUBBUB_COMPBUF_CTRL 0x04ba
+#define regDCHUBBUB_COMPBUF_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET0_CTRL 0x04bb
+#define regDCHUBBUB_DET0_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET1_CTRL 0x04bc
+#define regDCHUBBUB_DET1_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET2_CTRL 0x04bd
+#define regDCHUBBUB_DET2_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET3_CTRL 0x04be
+#define regDCHUBBUB_DET3_CTRL_BASE_IDX 2
+#define regDCHUBBUB_MEM_PWR_MODE_CTRL 0x04c0
+#define regDCHUBBUB_MEM_PWR_MODE_CTRL_BASE_IDX 2
+#define regCOMPBUF_MEM_PWR_CTRL_1 0x04c1
+#define regCOMPBUF_MEM_PWR_CTRL_1_BASE_IDX 2
+#define regCOMPBUF_MEM_PWR_CTRL_2 0x04c2
+#define regCOMPBUF_MEM_PWR_CTRL_2_BASE_IDX 2
+#define regDCHUBBUB_MEM_PWR_STATUS 0x04c3
+#define regDCHUBBUB_MEM_PWR_STATUS_BASE_IDX 2
+#define regCOMPBUF_RESERVED_SPACE 0x04c4
+#define regCOMPBUF_RESERVED_SPACE_BASE_IDX 2
+#define regDCHUBBUB_DEBUG_CTRL_0 0x04c5
+#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_R 0x04ca
+#define regDCHUBBUB_CRC1_VAL_R_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_G 0x04cb
+#define regDCHUBBUB_CRC1_VAL_G_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_B 0x04cc
+#define regDCHUBBUB_CRC1_VAL_B_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_A 0x04cd
+#define regDCHUBBUB_CRC1_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_vmrq_if_dispdec
+// base address: 0x0
+#define regDCN_VM_CONTEXT0_CNTL 0x0559
+#define regDCN_VM_CONTEXT0_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x055a
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x055b
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x055c
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x055d
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x055e
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x055f
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_CNTL 0x0560
+#define regDCN_VM_CONTEXT1_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x0561
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x0562
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x0563
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x0564
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x0565
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x0566
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_CNTL 0x0567
+#define regDCN_VM_CONTEXT2_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x0568
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x0569
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x056a
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x056b
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x056c
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x056d
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_CNTL 0x056e
+#define regDCN_VM_CONTEXT3_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x056f
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x0570
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0571
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0572
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0573
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0574
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_CNTL 0x0575
+#define regDCN_VM_CONTEXT4_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x0576
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x0577
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0578
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0579
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x057a
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x057b
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_CNTL 0x057c
+#define regDCN_VM_CONTEXT5_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x057d
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x057e
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x057f
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0580
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0581
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0582
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_CNTL 0x0583
+#define regDCN_VM_CONTEXT6_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x0584
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x0585
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0586
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0587
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0588
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0589
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_CNTL 0x058a
+#define regDCN_VM_CONTEXT7_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x058b
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x058c
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x058d
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x058e
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x058f
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0590
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_CNTL 0x0591
+#define regDCN_VM_CONTEXT8_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x0592
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x0593
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x0594
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x0595
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x0596
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x0597
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_CNTL 0x0598
+#define regDCN_VM_CONTEXT9_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x0599
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x059a
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x059b
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x059c
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x059d
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x059e
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_CNTL 0x059f
+#define regDCN_VM_CONTEXT10_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x05a0
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x05a1
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x05a2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x05a3
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x05a4
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x05a5
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_CNTL 0x05a6
+#define regDCN_VM_CONTEXT11_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x05a7
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x05a8
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x05a9
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x05aa
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x05ab
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x05ac
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_CNTL 0x05ad
+#define regDCN_VM_CONTEXT12_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x05ae
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x05af
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x05b0
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x05b1
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x05b2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x05b3
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_CNTL 0x05b4
+#define regDCN_VM_CONTEXT13_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x05b5
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x05b6
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x05b7
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x05b8
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x05b9
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x05ba
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_CNTL 0x05bb
+#define regDCN_VM_CONTEXT14_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x05bc
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x05bd
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x05be
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x05bf
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x05c0
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x05c1
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_CNTL 0x05c2
+#define regDCN_VM_CONTEXT15_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x05c3
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x05c4
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x05c5
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x05c6
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x05c7
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x05c8
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_DEFAULT_ADDR_MSB 0x05c9
+#define regDCN_VM_DEFAULT_ADDR_MSB_BASE_IDX 2
+#define regDCN_VM_DEFAULT_ADDR_LSB 0x05ca
+#define regDCN_VM_DEFAULT_ADDR_LSB_BASE_IDX 2
+#define regDCN_VM_FAULT_CNTL 0x05cb
+#define regDCN_VM_FAULT_CNTL_BASE_IDX 2
+#define regDCN_VM_FAULT_STATUS 0x05cc
+#define regDCN_VM_FAULT_STATUS_BASE_IDX 2
+#define regDCN_VM_FAULT_ADDR_MSB 0x05cd
+#define regDCN_VM_FAULT_ADDR_MSB_BASE_IDX 2
+#define regDCN_VM_FAULT_ADDR_LSB 0x05ce
+#define regDCN_VM_FAULT_ADDR_LSB_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_dchubbub_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1534
+#define regDC_PERFMON6_PERFCOUNTER_CNTL 0x054d
+#define regDC_PERFMON6_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON6_PERFCOUNTER_CNTL2 0x054e
+#define regDC_PERFMON6_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON6_PERFCOUNTER_STATE 0x054f
+#define regDC_PERFMON6_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CNTL 0x0550
+#define regDC_PERFMON6_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CNTL2 0x0551
+#define regDC_PERFMON6_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CVALUE_INT_MISC 0x0552
+#define regDC_PERFMON6_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CVALUE_LOW 0x0553
+#define regDC_PERFMON6_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_HI 0x0554
+#define regDC_PERFMON6_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_LOW 0x0555
+#define regDC_PERFMON6_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+// base address: 0x0
+#define regHUBP0_DCSURF_SURFACE_CONFIG 0x05e5
+#define regHUBP0_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_ADDR_CONFIG 0x05e6
+#define regHUBP0_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_TILING_CONFIG 0x05e7
+#define regHUBP0_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START 0x05e9
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x05ea
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_C 0x05eb
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x05ec
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START 0x05ed
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION 0x05ee
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_C 0x05ef
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x05f0
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG 0x05f1
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_C 0x05f2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP0_DCHUBP_CNTL 0x05f3
+#define regHUBP0_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP0_HUBP_CLK_CNTL 0x05f4
+#define regHUBP0_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP0_DCHUBP_VMPG_CONFIG 0x05f5
+#define regHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_MALL_CONFIG 0x05f6
+#define regHUBP0_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_MALL_SUB_VP 0x05f7
+#define regHUBP0_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP0_HUBPREQ_DEBUG_DB 0x05f8
+#define regHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP0_HUBPREQ_DEBUG 0x05f9
+#define regHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x05fd
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x05fe
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP0_HUBP_MALL_STATUS 0x05ff
+#define regHUBP0_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+// base address: 0x0
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH 0x0607
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_C 0x0608
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ0_VMID_SETTINGS_0 0x0609
+#define regHUBPREQ0_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS 0x060a
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x060b
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x060c
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x060d
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS 0x060e
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x060f
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x0610
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x0611
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x0612
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x0613
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x0614
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x0615
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x0616
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x0617
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x0618
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x0619
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_CONTROL 0x061a
+#define regHUBPREQ0_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL 0x061b
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL2 0x061c
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT 0x061f
+#define regHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE 0x0620
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH 0x0621
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_C 0x0622
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C 0x0623
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE 0x0624
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0625
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0626
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0627
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCN_EXPANSION_MODE 0x0628
+#define regHUBPREQ0_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ0_DCN_TTU_QOS_WM 0x0629
+#define regHUBPREQ0_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ0_DCN_GLOBAL_TTU_CNTL 0x062a
+#define regHUBPREQ0_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL0 0x062b
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL1 0x062c
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL0 0x062d
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL1 0x062e
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL0 0x062f
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL1 0x0630
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL0 0x0631
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL1 0x0632
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_DMDATA_VM_CNTL 0x0633
+#define regHUBPREQ0_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x0634
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0635
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL 0x0642
+#define regHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ0_BLANK_OFFSET_0 0x0643
+#define regHUBPREQ0_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ0_BLANK_OFFSET_1 0x0644
+#define regHUBPREQ0_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ0_DST_DIMENSIONS 0x0645
+#define regHUBPREQ0_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ0_DST_AFTER_SCALER 0x0646
+#define regHUBPREQ0_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ0_PREFETCH_SETTINGS 0x0647
+#define regHUBPREQ0_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ0_PREFETCH_SETTINGS_C 0x0648
+#define regHUBPREQ0_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_0 0x0649
+#define regHUBPREQ0_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_1 0x064a
+#define regHUBPREQ0_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_2 0x064b
+#define regHUBPREQ0_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_3 0x064c
+#define regHUBPREQ0_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_4 0x064d
+#define regHUBPREQ0_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_0 0x064e
+#define regHUBPREQ0_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_1 0x064f
+#define regHUBPREQ0_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_2 0x0650
+#define regHUBPREQ0_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_0 0x0651
+#define regHUBPREQ0_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_1 0x0652
+#define regHUBPREQ0_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_2 0x0653
+#define regHUBPREQ0_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_3 0x0654
+#define regHUBPREQ0_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_4 0x0655
+#define regHUBPREQ0_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_5 0x0656
+#define regHUBPREQ0_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_6 0x0657
+#define regHUBPREQ0_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_7 0x0658
+#define regHUBPREQ0_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ0_PER_LINE_DELIVERY_PRE 0x0659
+#define regHUBPREQ0_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ0_PER_LINE_DELIVERY 0x065a
+#define regHUBPREQ0_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ0_CURSOR_SETTINGS 0x065b
+#define regHUBPREQ0_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ0_REF_FREQ_TO_PIX_FREQ 0x065c
+#define regHUBPREQ0_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT 0x065d
+#define regHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_CTRL 0x065e
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_STATUS 0x065f
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_5 0x0662
+#define regHUBPREQ0_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_6 0x0663
+#define regHUBPREQ0_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_3 0x0664
+#define regHUBPREQ0_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_4 0x0665
+#define regHUBPREQ0_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_5 0x0666
+#define regHUBPREQ0_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_6 0x0667
+#define regHUBPREQ0_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_UCLK_PSTATE_FORCE 0x0668
+#define regHUBPREQ0_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG0 0x0669
+#define regHUBPREQ0_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG1 0x066a
+#define regHUBPREQ0_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG2 0x066b
+#define regHUBPREQ0_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+// base address: 0x0
+#define regHUBPRET0_HUBPRET_CONTROL 0x066c
+#define regHUBPRET0_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_MEM_PWR_CTRL 0x066d
+#define regHUBPRET0_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_MEM_PWR_STATUS 0x066e
+#define regHUBPRET0_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL0 0x066f
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL1 0x0670
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE0 0x0671
+#define regHUBPRET0_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE1 0x0672
+#define regHUBPRET0_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_INTERRUPT 0x0673
+#define regHUBPRET0_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_VALUE 0x0674
+#define regHUBPRET0_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_STATUS 0x0675
+#define regHUBPRET0_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+// base address: 0x0
+#define regCURSOR0_0_CURSOR_CONTROL 0x0678
+#define regCURSOR0_0_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS 0x0679
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH 0x067a
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SIZE 0x067b
+#define regCURSOR0_0_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_POSITION 0x067c
+#define regCURSOR0_0_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_HOT_SPOT 0x067d
+#define regCURSOR0_0_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_STEREO_CONTROL 0x067e
+#define regCURSOR0_0_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_DST_OFFSET 0x067f
+#define regCURSOR0_0_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_MEM_PWR_CTRL 0x0680
+#define regCURSOR0_0_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_MEM_PWR_STATUS 0x0681
+#define regCURSOR0_0_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_ADDRESS_HIGH 0x0682
+#define regCURSOR0_0_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_ADDRESS_LOW 0x0683
+#define regCURSOR0_0_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_CNTL 0x0684
+#define regCURSOR0_0_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_QOS_CNTL 0x0685
+#define regCURSOR0_0_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_STATUS 0x0686
+#define regCURSOR0_0_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_SW_CNTL 0x0687
+#define regCURSOR0_0_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_SW_DATA 0x0688
+#define regCURSOR0_0_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1a74
+#define regDC_PERFMON7_PERFCOUNTER_CNTL 0x069d
+#define regDC_PERFMON7_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON7_PERFCOUNTER_CNTL2 0x069e
+#define regDC_PERFMON7_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON7_PERFCOUNTER_STATE 0x069f
+#define regDC_PERFMON7_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CNTL 0x06a0
+#define regDC_PERFMON7_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CNTL2 0x06a1
+#define regDC_PERFMON7_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CVALUE_INT_MISC 0x06a2
+#define regDC_PERFMON7_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CVALUE_LOW 0x06a3
+#define regDC_PERFMON7_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_HI 0x06a4
+#define regDC_PERFMON7_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_LOW 0x06a5
+#define regDC_PERFMON7_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+// base address: 0x370
+#define regHUBP1_DCSURF_SURFACE_CONFIG 0x06c1
+#define regHUBP1_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_ADDR_CONFIG 0x06c2
+#define regHUBP1_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_TILING_CONFIG 0x06c3
+#define regHUBP1_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START 0x06c5
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION 0x06c6
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_C 0x06c7
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x06c8
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START 0x06c9
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION 0x06ca
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_C 0x06cb
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x06cc
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG 0x06cd
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_C 0x06ce
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP1_DCHUBP_CNTL 0x06cf
+#define regHUBP1_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP1_HUBP_CLK_CNTL 0x06d0
+#define regHUBP1_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP1_DCHUBP_VMPG_CONFIG 0x06d1
+#define regHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_MALL_CONFIG 0x06d2
+#define regHUBP1_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_MALL_SUB_VP 0x06d3
+#define regHUBP1_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP1_HUBPREQ_DEBUG_DB 0x06d4
+#define regHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP1_HUBPREQ_DEBUG 0x06d5
+#define regHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06d9
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06da
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP1_HUBP_MALL_STATUS 0x06db
+#define regHUBP1_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+// base address: 0x370
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH 0x06e3
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_C 0x06e4
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ1_VMID_SETTINGS_0 0x06e5
+#define regHUBPREQ1_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS 0x06e6
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x06e7
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x06e8
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x06e9
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS 0x06ea
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x06eb
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x06ec
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x06ed
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x06ee
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x06ef
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x06f0
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x06f1
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x06f2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x06f3
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x06f4
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x06f5
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_CONTROL 0x06f6
+#define regHUBPREQ1_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL 0x06f7
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL2 0x06f8
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT 0x06fb
+#define regHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE 0x06fc
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH 0x06fd
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_C 0x06fe
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C 0x06ff
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE 0x0700
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0701
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0702
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0703
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCN_EXPANSION_MODE 0x0704
+#define regHUBPREQ1_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ1_DCN_TTU_QOS_WM 0x0705
+#define regHUBPREQ1_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ1_DCN_GLOBAL_TTU_CNTL 0x0706
+#define regHUBPREQ1_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL0 0x0707
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL1 0x0708
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL0 0x0709
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL1 0x070a
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL0 0x070b
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL1 0x070c
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL0 0x070d
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL1 0x070e
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_DMDATA_VM_CNTL 0x070f
+#define regHUBPREQ1_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x0710
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0711
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL 0x071e
+#define regHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ1_BLANK_OFFSET_0 0x071f
+#define regHUBPREQ1_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ1_BLANK_OFFSET_1 0x0720
+#define regHUBPREQ1_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ1_DST_DIMENSIONS 0x0721
+#define regHUBPREQ1_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ1_DST_AFTER_SCALER 0x0722
+#define regHUBPREQ1_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ1_PREFETCH_SETTINGS 0x0723
+#define regHUBPREQ1_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ1_PREFETCH_SETTINGS_C 0x0724
+#define regHUBPREQ1_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_0 0x0725
+#define regHUBPREQ1_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_1 0x0726
+#define regHUBPREQ1_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_2 0x0727
+#define regHUBPREQ1_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_3 0x0728
+#define regHUBPREQ1_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_4 0x0729
+#define regHUBPREQ1_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_0 0x072a
+#define regHUBPREQ1_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_1 0x072b
+#define regHUBPREQ1_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_2 0x072c
+#define regHUBPREQ1_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_0 0x072d
+#define regHUBPREQ1_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_1 0x072e
+#define regHUBPREQ1_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_2 0x072f
+#define regHUBPREQ1_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_3 0x0730
+#define regHUBPREQ1_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_4 0x0731
+#define regHUBPREQ1_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_5 0x0732
+#define regHUBPREQ1_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_6 0x0733
+#define regHUBPREQ1_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_7 0x0734
+#define regHUBPREQ1_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ1_PER_LINE_DELIVERY_PRE 0x0735
+#define regHUBPREQ1_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ1_PER_LINE_DELIVERY 0x0736
+#define regHUBPREQ1_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ1_CURSOR_SETTINGS 0x0737
+#define regHUBPREQ1_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ1_REF_FREQ_TO_PIX_FREQ 0x0738
+#define regHUBPREQ1_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT 0x0739
+#define regHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_CTRL 0x073a
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_STATUS 0x073b
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_5 0x073e
+#define regHUBPREQ1_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_6 0x073f
+#define regHUBPREQ1_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_3 0x0740
+#define regHUBPREQ1_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_4 0x0741
+#define regHUBPREQ1_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_5 0x0742
+#define regHUBPREQ1_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_6 0x0743
+#define regHUBPREQ1_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_UCLK_PSTATE_FORCE 0x0744
+#define regHUBPREQ1_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG0 0x0745
+#define regHUBPREQ1_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG1 0x0746
+#define regHUBPREQ1_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG2 0x0747
+#define regHUBPREQ1_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+// base address: 0x370
+#define regHUBPRET1_HUBPRET_CONTROL 0x0748
+#define regHUBPRET1_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_MEM_PWR_CTRL 0x0749
+#define regHUBPRET1_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_MEM_PWR_STATUS 0x074a
+#define regHUBPRET1_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL0 0x074b
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL1 0x074c
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE0 0x074d
+#define regHUBPRET1_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE1 0x074e
+#define regHUBPRET1_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_INTERRUPT 0x074f
+#define regHUBPRET1_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_VALUE 0x0750
+#define regHUBPRET1_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_STATUS 0x0751
+#define regHUBPRET1_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+// base address: 0x370
+#define regCURSOR0_1_CURSOR_CONTROL 0x0754
+#define regCURSOR0_1_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS 0x0755
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH 0x0756
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SIZE 0x0757
+#define regCURSOR0_1_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_POSITION 0x0758
+#define regCURSOR0_1_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_HOT_SPOT 0x0759
+#define regCURSOR0_1_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_STEREO_CONTROL 0x075a
+#define regCURSOR0_1_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_DST_OFFSET 0x075b
+#define regCURSOR0_1_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_MEM_PWR_CTRL 0x075c
+#define regCURSOR0_1_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_MEM_PWR_STATUS 0x075d
+#define regCURSOR0_1_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_ADDRESS_HIGH 0x075e
+#define regCURSOR0_1_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_ADDRESS_LOW 0x075f
+#define regCURSOR0_1_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_CNTL 0x0760
+#define regCURSOR0_1_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_QOS_CNTL 0x0761
+#define regCURSOR0_1_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_STATUS 0x0762
+#define regCURSOR0_1_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_SW_CNTL 0x0763
+#define regCURSOR0_1_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_SW_DATA 0x0764
+#define regCURSOR0_1_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1de4
+#define regDC_PERFMON8_PERFCOUNTER_CNTL 0x0779
+#define regDC_PERFMON8_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON8_PERFCOUNTER_CNTL2 0x077a
+#define regDC_PERFMON8_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON8_PERFCOUNTER_STATE 0x077b
+#define regDC_PERFMON8_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CNTL 0x077c
+#define regDC_PERFMON8_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CNTL2 0x077d
+#define regDC_PERFMON8_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CVALUE_INT_MISC 0x077e
+#define regDC_PERFMON8_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CVALUE_LOW 0x077f
+#define regDC_PERFMON8_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_HI 0x0780
+#define regDC_PERFMON8_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_LOW 0x0781
+#define regDC_PERFMON8_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+// base address: 0x6e0
+#define regHUBP2_DCSURF_SURFACE_CONFIG 0x079d
+#define regHUBP2_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_ADDR_CONFIG 0x079e
+#define regHUBP2_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_TILING_CONFIG 0x079f
+#define regHUBP2_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START 0x07a1
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION 0x07a2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_C 0x07a3
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x07a4
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START 0x07a5
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION 0x07a6
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_C 0x07a7
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x07a8
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG 0x07a9
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_C 0x07aa
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP2_DCHUBP_CNTL 0x07ab
+#define regHUBP2_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP2_HUBP_CLK_CNTL 0x07ac
+#define regHUBP2_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP2_DCHUBP_VMPG_CONFIG 0x07ad
+#define regHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_MALL_CONFIG 0x07ae
+#define regHUBP2_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_MALL_SUB_VP 0x07af
+#define regHUBP2_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP2_HUBPREQ_DEBUG_DB 0x07b0
+#define regHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP2_HUBPREQ_DEBUG 0x07b1
+#define regHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07b5
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07b6
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP2_HUBP_MALL_STATUS 0x07b7
+#define regHUBP2_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+// base address: 0x6e0
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH 0x07bf
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_C 0x07c0
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ2_VMID_SETTINGS_0 0x07c1
+#define regHUBPREQ2_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS 0x07c2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x07c3
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x07c4
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x07c5
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS 0x07c6
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x07c7
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x07c8
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x07c9
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x07ca
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x07cb
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x07cc
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x07cd
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x07ce
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x07cf
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x07d0
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x07d1
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_CONTROL 0x07d2
+#define regHUBPREQ2_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL 0x07d3
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL2 0x07d4
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT 0x07d7
+#define regHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE 0x07d8
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH 0x07d9
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_C 0x07da
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C 0x07db
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE 0x07dc
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x07dd
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C 0x07de
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x07df
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCN_EXPANSION_MODE 0x07e0
+#define regHUBPREQ2_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ2_DCN_TTU_QOS_WM 0x07e1
+#define regHUBPREQ2_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ2_DCN_GLOBAL_TTU_CNTL 0x07e2
+#define regHUBPREQ2_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL0 0x07e3
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL1 0x07e4
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL0 0x07e5
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL1 0x07e6
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL0 0x07e7
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL1 0x07e8
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL0 0x07e9
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL1 0x07ea
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_DMDATA_VM_CNTL 0x07eb
+#define regHUBPREQ2_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x07ec
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x07ed
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL 0x07fa
+#define regHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ2_BLANK_OFFSET_0 0x07fb
+#define regHUBPREQ2_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ2_BLANK_OFFSET_1 0x07fc
+#define regHUBPREQ2_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ2_DST_DIMENSIONS 0x07fd
+#define regHUBPREQ2_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ2_DST_AFTER_SCALER 0x07fe
+#define regHUBPREQ2_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ2_PREFETCH_SETTINGS 0x07ff
+#define regHUBPREQ2_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ2_PREFETCH_SETTINGS_C 0x0800
+#define regHUBPREQ2_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_0 0x0801
+#define regHUBPREQ2_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_1 0x0802
+#define regHUBPREQ2_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_2 0x0803
+#define regHUBPREQ2_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_3 0x0804
+#define regHUBPREQ2_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_4 0x0805
+#define regHUBPREQ2_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_0 0x0806
+#define regHUBPREQ2_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_1 0x0807
+#define regHUBPREQ2_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_2 0x0808
+#define regHUBPREQ2_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_0 0x0809
+#define regHUBPREQ2_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_1 0x080a
+#define regHUBPREQ2_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_2 0x080b
+#define regHUBPREQ2_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_3 0x080c
+#define regHUBPREQ2_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_4 0x080d
+#define regHUBPREQ2_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_5 0x080e
+#define regHUBPREQ2_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_6 0x080f
+#define regHUBPREQ2_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_7 0x0810
+#define regHUBPREQ2_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ2_PER_LINE_DELIVERY_PRE 0x0811
+#define regHUBPREQ2_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ2_PER_LINE_DELIVERY 0x0812
+#define regHUBPREQ2_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ2_CURSOR_SETTINGS 0x0813
+#define regHUBPREQ2_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ2_REF_FREQ_TO_PIX_FREQ 0x0814
+#define regHUBPREQ2_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT 0x0815
+#define regHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_CTRL 0x0816
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_STATUS 0x0817
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_5 0x081a
+#define regHUBPREQ2_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_6 0x081b
+#define regHUBPREQ2_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_3 0x081c
+#define regHUBPREQ2_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_4 0x081d
+#define regHUBPREQ2_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_5 0x081e
+#define regHUBPREQ2_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_6 0x081f
+#define regHUBPREQ2_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_UCLK_PSTATE_FORCE 0x0820
+#define regHUBPREQ2_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG0 0x0821
+#define regHUBPREQ2_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG1 0x0822
+#define regHUBPREQ2_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG2 0x0823
+#define regHUBPREQ2_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+// base address: 0x6e0
+#define regHUBPRET2_HUBPRET_CONTROL 0x0824
+#define regHUBPRET2_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_MEM_PWR_CTRL 0x0825
+#define regHUBPRET2_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_MEM_PWR_STATUS 0x0826
+#define regHUBPRET2_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL0 0x0827
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL1 0x0828
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE0 0x0829
+#define regHUBPRET2_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE1 0x082a
+#define regHUBPRET2_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_INTERRUPT 0x082b
+#define regHUBPRET2_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_VALUE 0x082c
+#define regHUBPRET2_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_STATUS 0x082d
+#define regHUBPRET2_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+// base address: 0x6e0
+#define regCURSOR0_2_CURSOR_CONTROL 0x0830
+#define regCURSOR0_2_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS 0x0831
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH 0x0832
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SIZE 0x0833
+#define regCURSOR0_2_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_POSITION 0x0834
+#define regCURSOR0_2_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_HOT_SPOT 0x0835
+#define regCURSOR0_2_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_STEREO_CONTROL 0x0836
+#define regCURSOR0_2_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_DST_OFFSET 0x0837
+#define regCURSOR0_2_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_MEM_PWR_CTRL 0x0838
+#define regCURSOR0_2_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_MEM_PWR_STATUS 0x0839
+#define regCURSOR0_2_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_ADDRESS_HIGH 0x083a
+#define regCURSOR0_2_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_ADDRESS_LOW 0x083b
+#define regCURSOR0_2_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_CNTL 0x083c
+#define regCURSOR0_2_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_QOS_CNTL 0x083d
+#define regCURSOR0_2_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_STATUS 0x083e
+#define regCURSOR0_2_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_SW_CNTL 0x083f
+#define regCURSOR0_2_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_SW_DATA 0x0840
+#define regCURSOR0_2_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x2154
+#define regDC_PERFMON9_PERFCOUNTER_CNTL 0x0855
+#define regDC_PERFMON9_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON9_PERFCOUNTER_CNTL2 0x0856
+#define regDC_PERFMON9_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON9_PERFCOUNTER_STATE 0x0857
+#define regDC_PERFMON9_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CNTL 0x0858
+#define regDC_PERFMON9_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CNTL2 0x0859
+#define regDC_PERFMON9_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CVALUE_INT_MISC 0x085a
+#define regDC_PERFMON9_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CVALUE_LOW 0x085b
+#define regDC_PERFMON9_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_HI 0x085c
+#define regDC_PERFMON9_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_LOW 0x085d
+#define regDC_PERFMON9_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+// base address: 0xa50
+#define regHUBP3_DCSURF_SURFACE_CONFIG 0x0879
+#define regHUBP3_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_ADDR_CONFIG 0x087a
+#define regHUBP3_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_TILING_CONFIG 0x087b
+#define regHUBP3_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START 0x087d
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION 0x087e
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_C 0x087f
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x0880
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START 0x0881
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION 0x0882
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_C 0x0883
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x0884
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG 0x0885
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_C 0x0886
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP3_DCHUBP_CNTL 0x0887
+#define regHUBP3_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP3_HUBP_CLK_CNTL 0x0888
+#define regHUBP3_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP3_DCHUBP_VMPG_CONFIG 0x0889
+#define regHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_MALL_CONFIG 0x088a
+#define regHUBP3_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_MALL_SUB_VP 0x088b
+#define regHUBP3_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP3_HUBPREQ_DEBUG_DB 0x088c
+#define regHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP3_HUBPREQ_DEBUG 0x088d
+#define regHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0891
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0892
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP3_HUBP_MALL_STATUS 0x0893
+#define regHUBP3_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+// base address: 0xa50
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH 0x089b
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_C 0x089c
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ3_VMID_SETTINGS_0 0x089d
+#define regHUBPREQ3_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS 0x089e
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x089f
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x08a0
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x08a1
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS 0x08a2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x08a3
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x08a4
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x08a5
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x08a6
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x08a7
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x08a8
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x08a9
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x08aa
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x08ab
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x08ac
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x08ad
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_CONTROL 0x08ae
+#define regHUBPREQ3_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL 0x08af
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL2 0x08b0
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT 0x08b3
+#define regHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE 0x08b4
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH 0x08b5
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_C 0x08b6
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C 0x08b7
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE 0x08b8
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x08b9
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C 0x08ba
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x08bb
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCN_EXPANSION_MODE 0x08bc
+#define regHUBPREQ3_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ3_DCN_TTU_QOS_WM 0x08bd
+#define regHUBPREQ3_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ3_DCN_GLOBAL_TTU_CNTL 0x08be
+#define regHUBPREQ3_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL0 0x08bf
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL1 0x08c0
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL0 0x08c1
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL1 0x08c2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL0 0x08c3
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL1 0x08c4
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL0 0x08c5
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL1 0x08c6
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_DMDATA_VM_CNTL 0x08c7
+#define regHUBPREQ3_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x08c8
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x08c9
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL 0x08d6
+#define regHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ3_BLANK_OFFSET_0 0x08d7
+#define regHUBPREQ3_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ3_BLANK_OFFSET_1 0x08d8
+#define regHUBPREQ3_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ3_DST_DIMENSIONS 0x08d9
+#define regHUBPREQ3_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ3_DST_AFTER_SCALER 0x08da
+#define regHUBPREQ3_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ3_PREFETCH_SETTINGS 0x08db
+#define regHUBPREQ3_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ3_PREFETCH_SETTINGS_C 0x08dc
+#define regHUBPREQ3_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_0 0x08dd
+#define regHUBPREQ3_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_1 0x08de
+#define regHUBPREQ3_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_2 0x08df
+#define regHUBPREQ3_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_3 0x08e0
+#define regHUBPREQ3_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_4 0x08e1
+#define regHUBPREQ3_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_0 0x08e2
+#define regHUBPREQ3_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_1 0x08e3
+#define regHUBPREQ3_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_2 0x08e4
+#define regHUBPREQ3_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_0 0x08e5
+#define regHUBPREQ3_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_1 0x08e6
+#define regHUBPREQ3_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_2 0x08e7
+#define regHUBPREQ3_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_3 0x08e8
+#define regHUBPREQ3_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_4 0x08e9
+#define regHUBPREQ3_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_5 0x08ea
+#define regHUBPREQ3_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_6 0x08eb
+#define regHUBPREQ3_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_7 0x08ec
+#define regHUBPREQ3_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ3_PER_LINE_DELIVERY_PRE 0x08ed
+#define regHUBPREQ3_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ3_PER_LINE_DELIVERY 0x08ee
+#define regHUBPREQ3_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ3_CURSOR_SETTINGS 0x08ef
+#define regHUBPREQ3_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ3_REF_FREQ_TO_PIX_FREQ 0x08f0
+#define regHUBPREQ3_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT 0x08f1
+#define regHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_CTRL 0x08f2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_STATUS 0x08f3
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_5 0x08f6
+#define regHUBPREQ3_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_6 0x08f7
+#define regHUBPREQ3_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_3 0x08f8
+#define regHUBPREQ3_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_4 0x08f9
+#define regHUBPREQ3_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_5 0x08fa
+#define regHUBPREQ3_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_6 0x08fb
+#define regHUBPREQ3_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_UCLK_PSTATE_FORCE 0x08fc
+#define regHUBPREQ3_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG0 0x08fd
+#define regHUBPREQ3_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG1 0x08fe
+#define regHUBPREQ3_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG2 0x08ff
+#define regHUBPREQ3_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+// base address: 0xa50
+#define regHUBPRET3_HUBPRET_CONTROL 0x0900
+#define regHUBPRET3_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_MEM_PWR_CTRL 0x0901
+#define regHUBPRET3_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_MEM_PWR_STATUS 0x0902
+#define regHUBPRET3_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL0 0x0903
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL1 0x0904
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE0 0x0905
+#define regHUBPRET3_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE1 0x0906
+#define regHUBPRET3_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_INTERRUPT 0x0907
+#define regHUBPRET3_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_VALUE 0x0908
+#define regHUBPRET3_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_STATUS 0x0909
+#define regHUBPRET3_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+// base address: 0xa50
+#define regCURSOR0_3_CURSOR_CONTROL 0x090c
+#define regCURSOR0_3_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS 0x090d
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH 0x090e
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SIZE 0x090f
+#define regCURSOR0_3_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_POSITION 0x0910
+#define regCURSOR0_3_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_HOT_SPOT 0x0911
+#define regCURSOR0_3_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_STEREO_CONTROL 0x0912
+#define regCURSOR0_3_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_DST_OFFSET 0x0913
+#define regCURSOR0_3_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_MEM_PWR_CTRL 0x0914
+#define regCURSOR0_3_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_MEM_PWR_STATUS 0x0915
+#define regCURSOR0_3_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_ADDRESS_HIGH 0x0916
+#define regCURSOR0_3_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_ADDRESS_LOW 0x0917
+#define regCURSOR0_3_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_CNTL 0x0918
+#define regCURSOR0_3_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_QOS_CNTL 0x0919
+#define regCURSOR0_3_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_STATUS 0x091a
+#define regCURSOR0_3_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_SW_CNTL 0x091b
+#define regCURSOR0_3_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_SW_DATA 0x091c
+#define regCURSOR0_3_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x24c4
+#define regDC_PERFMON10_PERFCOUNTER_CNTL 0x0931
+#define regDC_PERFMON10_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON10_PERFCOUNTER_CNTL2 0x0932
+#define regDC_PERFMON10_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON10_PERFCOUNTER_STATE 0x0933
+#define regDC_PERFMON10_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CNTL 0x0934
+#define regDC_PERFMON10_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CNTL2 0x0935
+#define regDC_PERFMON10_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CVALUE_INT_MISC 0x0936
+#define regDC_PERFMON10_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CVALUE_LOW 0x0937
+#define regDC_PERFMON10_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_HI 0x0938
+#define regDC_PERFMON10_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_LOW 0x0939
+#define regDC_PERFMON10_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+// base address: 0x0
+#define regCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT 0x0ccf
+#define regCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG0_FORMAT_CONTROL 0x0cd0
+#define regCNVC_CFG0_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_R 0x0cd1
+#define regCNVC_CFG0_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_G 0x0cd2
+#define regCNVC_CFG0_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_B 0x0cd3
+#define regCNVC_CFG0_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_R 0x0cd4
+#define regCNVC_CFG0_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_G 0x0cd5
+#define regCNVC_CFG0_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_B 0x0cd6
+#define regCNVC_CFG0_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_CONTROL 0x0cd7
+#define regCNVC_CFG0_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_ALPHA 0x0cd8
+#define regCNVC_CFG0_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_RED 0x0cd9
+#define regCNVC_CFG0_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_GREEN 0x0cda
+#define regCNVC_CFG0_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_BLUE 0x0cdb
+#define regCNVC_CFG0_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG0_ALPHA_2BIT_LUT 0x0cdd
+#define regCNVC_CFG0_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG0_PRE_DEALPHA 0x0cde
+#define regCNVC_CFG0_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_MODE 0x0cdf
+#define regCNVC_CFG0_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C11_C12 0x0ce0
+#define regCNVC_CFG0_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C13_C14 0x0ce1
+#define regCNVC_CFG0_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C21_C22 0x0ce2
+#define regCNVC_CFG0_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C23_C24 0x0ce3
+#define regCNVC_CFG0_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C31_C32 0x0ce4
+#define regCNVC_CFG0_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C33_C34 0x0ce5
+#define regCNVC_CFG0_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C11_C12 0x0ce6
+#define regCNVC_CFG0_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C13_C14 0x0ce7
+#define regCNVC_CFG0_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C21_C22 0x0ce8
+#define regCNVC_CFG0_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C23_C24 0x0ce9
+#define regCNVC_CFG0_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C31_C32 0x0cea
+#define regCNVC_CFG0_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C33_C34 0x0ceb
+#define regCNVC_CFG0_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG0_CNVC_COEF_FORMAT 0x0cec
+#define regCNVC_CFG0_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG0_PRE_DEGAM 0x0ced
+#define regCNVC_CFG0_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG0_PRE_REALPHA 0x0cee
+#define regCNVC_CFG0_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+// base address: 0x0
+#define regCNVC_CUR0_CURSOR0_CONTROL 0x0cf1
+#define regCNVC_CUR0_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_COLOR0 0x0cf2
+#define regCNVC_CUR0_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_COLOR1 0x0cf3
+#define regCNVC_CUR0_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_FP_SCALE_BIAS 0x0cf4
+#define regCNVC_CUR0_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+// base address: 0x0
+#define regDSCL0_SCL_COEF_RAM_TAP_SELECT 0x0cf9
+#define regDSCL0_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL0_SCL_COEF_RAM_TAP_DATA 0x0cfa
+#define regDSCL0_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL0_SCL_MODE 0x0cfb
+#define regDSCL0_SCL_MODE_BASE_IDX 2
+#define regDSCL0_SCL_TAP_CONTROL 0x0cfc
+#define regDSCL0_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL0_DSCL_CONTROL 0x0cfd
+#define regDSCL0_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL0_DSCL_2TAP_CONTROL 0x0cfe
+#define regDSCL0_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x0cff
+#define regDSCL0_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x0d00
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_INIT 0x0d01
+#define regDSCL0_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0d02
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_INIT_C 0x0d03
+#define regDSCL0_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x0d04
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT 0x0d05
+#define regDSCL0_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT 0x0d06
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C 0x0d07
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_C 0x0d08
+#define regDSCL0_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_C 0x0d09
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL0_SCL_BLACK_COLOR 0x0d0a
+#define regDSCL0_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL0_DSCL_UPDATE 0x0d0b
+#define regDSCL0_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL0_DSCL_AUTOCAL 0x0d0c
+#define regDSCL0_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0d0d
+#define regDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0d0e
+#define regDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL0_OTG_H_BLANK 0x0d0f
+#define regDSCL0_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL0_OTG_V_BLANK 0x0d10
+#define regDSCL0_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL0_RECOUT_START 0x0d11
+#define regDSCL0_RECOUT_START_BASE_IDX 2
+#define regDSCL0_RECOUT_SIZE 0x0d12
+#define regDSCL0_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL0_MPC_SIZE 0x0d13
+#define regDSCL0_MPC_SIZE_BASE_IDX 2
+#define regDSCL0_LB_DATA_FORMAT 0x0d14
+#define regDSCL0_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL0_LB_MEMORY_CTRL 0x0d15
+#define regDSCL0_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL0_LB_V_COUNTER 0x0d16
+#define regDSCL0_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL0_DSCL_MEM_PWR_CTRL 0x0d17
+#define regDSCL0_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL0_DSCL_MEM_PWR_STATUS 0x0d18
+#define regDSCL0_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL0_OBUF_CONTROL 0x0d19
+#define regDSCL0_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL0_OBUF_MEM_PWR_CTRL 0x0d1a
+#define regDSCL0_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+// base address: 0x0
+#define regCM0_CM_CONTROL 0x0d20
+#define regCM0_CM_CONTROL_BASE_IDX 2
+#define regCM0_CM_POST_CSC_CONTROL 0x0d21
+#define regCM0_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C11_C12 0x0d22
+#define regCM0_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C13_C14 0x0d23
+#define regCM0_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C21_C22 0x0d24
+#define regCM0_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C23_C24 0x0d25
+#define regCM0_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C31_C32 0x0d26
+#define regCM0_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C33_C34 0x0d27
+#define regCM0_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C11_C12 0x0d28
+#define regCM0_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C13_C14 0x0d29
+#define regCM0_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C21_C22 0x0d2a
+#define regCM0_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C23_C24 0x0d2b
+#define regCM0_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C31_C32 0x0d2c
+#define regCM0_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C33_C34 0x0d2d
+#define regCM0_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_CONTROL 0x0d2e
+#define regCM0_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C11_C12 0x0d2f
+#define regCM0_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C13_C14 0x0d30
+#define regCM0_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C21_C22 0x0d31
+#define regCM0_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C23_C24 0x0d32
+#define regCM0_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C31_C32 0x0d33
+#define regCM0_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C33_C34 0x0d34
+#define regCM0_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C11_C12 0x0d35
+#define regCM0_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C13_C14 0x0d36
+#define regCM0_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C21_C22 0x0d37
+#define regCM0_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C23_C24 0x0d38
+#define regCM0_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C31_C32 0x0d39
+#define regCM0_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C33_C34 0x0d3a
+#define regCM0_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM0_CM_BIAS_CR_R 0x0d3b
+#define regCM0_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM0_CM_BIAS_Y_G_CB_B 0x0d3c
+#define regCM0_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_CONTROL 0x0d3d
+#define regCM0_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_INDEX 0x0d3e
+#define regCM0_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_DATA 0x0d3f
+#define regCM0_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_CONTROL 0x0d40
+#define regCM0_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_B 0x0d41
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_G 0x0d42
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_R 0x0d43
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x0d44
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x0d45
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x0d46
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x0d47
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x0d48
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x0d49
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_B 0x0d4a
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_B 0x0d4b
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_G 0x0d4c
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_G 0x0d4d
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_R 0x0d4e
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_R 0x0d4f
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_B 0x0d50
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_G 0x0d51
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_R 0x0d52
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_0_1 0x0d53
+#define regCM0_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_2_3 0x0d54
+#define regCM0_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_4_5 0x0d55
+#define regCM0_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_6_7 0x0d56
+#define regCM0_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_8_9 0x0d57
+#define regCM0_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_10_11 0x0d58
+#define regCM0_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_12_13 0x0d59
+#define regCM0_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_14_15 0x0d5a
+#define regCM0_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_16_17 0x0d5b
+#define regCM0_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_18_19 0x0d5c
+#define regCM0_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_20_21 0x0d5d
+#define regCM0_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_22_23 0x0d5e
+#define regCM0_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_24_25 0x0d5f
+#define regCM0_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_26_27 0x0d60
+#define regCM0_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_28_29 0x0d61
+#define regCM0_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_30_31 0x0d62
+#define regCM0_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_32_33 0x0d63
+#define regCM0_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_B 0x0d64
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_G 0x0d65
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_R 0x0d66
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x0d67
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x0d68
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x0d69
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x0d6a
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x0d6b
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x0d6c
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_B 0x0d6d
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_B 0x0d6e
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_G 0x0d6f
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_G 0x0d70
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_R 0x0d71
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_R 0x0d72
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_B 0x0d73
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_G 0x0d74
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_R 0x0d75
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_0_1 0x0d76
+#define regCM0_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_2_3 0x0d77
+#define regCM0_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_4_5 0x0d78
+#define regCM0_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_6_7 0x0d79
+#define regCM0_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_8_9 0x0d7a
+#define regCM0_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_10_11 0x0d7b
+#define regCM0_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_12_13 0x0d7c
+#define regCM0_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_14_15 0x0d7d
+#define regCM0_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_16_17 0x0d7e
+#define regCM0_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_18_19 0x0d7f
+#define regCM0_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_20_21 0x0d80
+#define regCM0_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_22_23 0x0d81
+#define regCM0_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_24_25 0x0d82
+#define regCM0_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_26_27 0x0d83
+#define regCM0_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_28_29 0x0d84
+#define regCM0_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_30_31 0x0d85
+#define regCM0_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_32_33 0x0d86
+#define regCM0_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM0_CM_HDR_MULT_COEF 0x0d87
+#define regCM0_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM0_CM_MEM_PWR_CTRL 0x0d88
+#define regCM0_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM0_CM_MEM_PWR_STATUS 0x0d89
+#define regCM0_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM0_CM_DEALPHA 0x0d8b
+#define regCM0_CM_DEALPHA_BASE_IDX 2
+#define regCM0_CM_COEF_FORMAT 0x0d8c
+#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
+#define regCM0_DPP_CRC_VAL_R 0x0d8f
+#define regCM0_DPP_CRC_VAL_R_BASE_IDX 2
+#define regCM0_DPP_CRC_VAL_G 0x0d90
+#define regCM0_DPP_CRC_VAL_G_BASE_IDX 2
+#define regCM0_DPP_CRC_VAL_B 0x0d91
+#define regCM0_DPP_CRC_VAL_B_BASE_IDX 2
+#define regCM0_DPP_CRC_VAL_A 0x0d92
+#define regCM0_DPP_CRC_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+// base address: 0x0
+#define regDPP_TOP0_DPP_CONTROL 0x0cc5
+#define regDPP_TOP0_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP0_DPP_SOFT_RESET 0x0cc6
+#define regDPP_TOP0_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP0_DPP_CRC_CTRL 0x0cc9
+#define regDPP_TOP0_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP0_HOST_READ_CONTROL 0x0cca
+#define regDPP_TOP0_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x3890
+#define regDC_PERFMON11_PERFCOUNTER_CNTL 0x0e24
+#define regDC_PERFMON11_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON11_PERFCOUNTER_CNTL2 0x0e25
+#define regDC_PERFMON11_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON11_PERFCOUNTER_STATE 0x0e26
+#define regDC_PERFMON11_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CNTL 0x0e27
+#define regDC_PERFMON11_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CNTL2 0x0e28
+#define regDC_PERFMON11_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CVALUE_INT_MISC 0x0e29
+#define regDC_PERFMON11_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CVALUE_LOW 0x0e2a
+#define regDC_PERFMON11_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_HI 0x0e2b
+#define regDC_PERFMON11_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_LOW 0x0e2c
+#define regDC_PERFMON11_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+// base address: 0x5ac
+#define regCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT 0x0e3a
+#define regCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG1_FORMAT_CONTROL 0x0e3b
+#define regCNVC_CFG1_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_R 0x0e3c
+#define regCNVC_CFG1_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_G 0x0e3d
+#define regCNVC_CFG1_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_B 0x0e3e
+#define regCNVC_CFG1_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_R 0x0e3f
+#define regCNVC_CFG1_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_G 0x0e40
+#define regCNVC_CFG1_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_B 0x0e41
+#define regCNVC_CFG1_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_CONTROL 0x0e42
+#define regCNVC_CFG1_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_ALPHA 0x0e43
+#define regCNVC_CFG1_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_RED 0x0e44
+#define regCNVC_CFG1_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_GREEN 0x0e45
+#define regCNVC_CFG1_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_BLUE 0x0e46
+#define regCNVC_CFG1_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG1_ALPHA_2BIT_LUT 0x0e48
+#define regCNVC_CFG1_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG1_PRE_DEALPHA 0x0e49
+#define regCNVC_CFG1_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_MODE 0x0e4a
+#define regCNVC_CFG1_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C11_C12 0x0e4b
+#define regCNVC_CFG1_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C13_C14 0x0e4c
+#define regCNVC_CFG1_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C21_C22 0x0e4d
+#define regCNVC_CFG1_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C23_C24 0x0e4e
+#define regCNVC_CFG1_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C31_C32 0x0e4f
+#define regCNVC_CFG1_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C33_C34 0x0e50
+#define regCNVC_CFG1_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C11_C12 0x0e51
+#define regCNVC_CFG1_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C13_C14 0x0e52
+#define regCNVC_CFG1_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C21_C22 0x0e53
+#define regCNVC_CFG1_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C23_C24 0x0e54
+#define regCNVC_CFG1_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C31_C32 0x0e55
+#define regCNVC_CFG1_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C33_C34 0x0e56
+#define regCNVC_CFG1_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG1_CNVC_COEF_FORMAT 0x0e57
+#define regCNVC_CFG1_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG1_PRE_DEGAM 0x0e58
+#define regCNVC_CFG1_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG1_PRE_REALPHA 0x0e59
+#define regCNVC_CFG1_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+// base address: 0x5ac
+#define regCNVC_CUR1_CURSOR0_CONTROL 0x0e5c
+#define regCNVC_CUR1_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_COLOR0 0x0e5d
+#define regCNVC_CUR1_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_COLOR1 0x0e5e
+#define regCNVC_CUR1_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_FP_SCALE_BIAS 0x0e5f
+#define regCNVC_CUR1_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+// base address: 0x5ac
+#define regDSCL1_SCL_COEF_RAM_TAP_SELECT 0x0e64
+#define regDSCL1_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL1_SCL_COEF_RAM_TAP_DATA 0x0e65
+#define regDSCL1_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL1_SCL_MODE 0x0e66
+#define regDSCL1_SCL_MODE_BASE_IDX 2
+#define regDSCL1_SCL_TAP_CONTROL 0x0e67
+#define regDSCL1_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL1_DSCL_CONTROL 0x0e68
+#define regDSCL1_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL1_DSCL_2TAP_CONTROL 0x0e69
+#define regDSCL1_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x0e6a
+#define regDSCL1_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x0e6b
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_INIT 0x0e6c
+#define regDSCL1_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0e6d
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_INIT_C 0x0e6e
+#define regDSCL1_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x0e6f
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT 0x0e70
+#define regDSCL1_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT 0x0e71
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C 0x0e72
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_C 0x0e73
+#define regDSCL1_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_C 0x0e74
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL1_SCL_BLACK_COLOR 0x0e75
+#define regDSCL1_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL1_DSCL_UPDATE 0x0e76
+#define regDSCL1_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL1_DSCL_AUTOCAL 0x0e77
+#define regDSCL1_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0e78
+#define regDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0e79
+#define regDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL1_OTG_H_BLANK 0x0e7a
+#define regDSCL1_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL1_OTG_V_BLANK 0x0e7b
+#define regDSCL1_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL1_RECOUT_START 0x0e7c
+#define regDSCL1_RECOUT_START_BASE_IDX 2
+#define regDSCL1_RECOUT_SIZE 0x0e7d
+#define regDSCL1_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL1_MPC_SIZE 0x0e7e
+#define regDSCL1_MPC_SIZE_BASE_IDX 2
+#define regDSCL1_LB_DATA_FORMAT 0x0e7f
+#define regDSCL1_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL1_LB_MEMORY_CTRL 0x0e80
+#define regDSCL1_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL1_LB_V_COUNTER 0x0e81
+#define regDSCL1_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL1_DSCL_MEM_PWR_CTRL 0x0e82
+#define regDSCL1_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL1_DSCL_MEM_PWR_STATUS 0x0e83
+#define regDSCL1_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL1_OBUF_CONTROL 0x0e84
+#define regDSCL1_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL1_OBUF_MEM_PWR_CTRL 0x0e85
+#define regDSCL1_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+// base address: 0x5ac
+#define regCM1_CM_CONTROL 0x0e8b
+#define regCM1_CM_CONTROL_BASE_IDX 2
+#define regCM1_CM_POST_CSC_CONTROL 0x0e8c
+#define regCM1_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C11_C12 0x0e8d
+#define regCM1_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C13_C14 0x0e8e
+#define regCM1_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C21_C22 0x0e8f
+#define regCM1_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C23_C24 0x0e90
+#define regCM1_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C31_C32 0x0e91
+#define regCM1_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C33_C34 0x0e92
+#define regCM1_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C11_C12 0x0e93
+#define regCM1_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C13_C14 0x0e94
+#define regCM1_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C21_C22 0x0e95
+#define regCM1_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C23_C24 0x0e96
+#define regCM1_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C31_C32 0x0e97
+#define regCM1_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C33_C34 0x0e98
+#define regCM1_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_CONTROL 0x0e99
+#define regCM1_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C11_C12 0x0e9a
+#define regCM1_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C13_C14 0x0e9b
+#define regCM1_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C21_C22 0x0e9c
+#define regCM1_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C23_C24 0x0e9d
+#define regCM1_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C31_C32 0x0e9e
+#define regCM1_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C33_C34 0x0e9f
+#define regCM1_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C11_C12 0x0ea0
+#define regCM1_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C13_C14 0x0ea1
+#define regCM1_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C21_C22 0x0ea2
+#define regCM1_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C23_C24 0x0ea3
+#define regCM1_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C31_C32 0x0ea4
+#define regCM1_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C33_C34 0x0ea5
+#define regCM1_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM1_CM_BIAS_CR_R 0x0ea6
+#define regCM1_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM1_CM_BIAS_Y_G_CB_B 0x0ea7
+#define regCM1_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_CONTROL 0x0ea8
+#define regCM1_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_INDEX 0x0ea9
+#define regCM1_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_DATA 0x0eaa
+#define regCM1_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_CONTROL 0x0eab
+#define regCM1_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_B 0x0eac
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_G 0x0ead
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_R 0x0eae
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x0eaf
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x0eb0
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x0eb1
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x0eb2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x0eb3
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x0eb4
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_B 0x0eb5
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_B 0x0eb6
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_G 0x0eb7
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_G 0x0eb8
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_R 0x0eb9
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_R 0x0eba
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_B 0x0ebb
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_G 0x0ebc
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_R 0x0ebd
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_0_1 0x0ebe
+#define regCM1_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_2_3 0x0ebf
+#define regCM1_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_4_5 0x0ec0
+#define regCM1_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_6_7 0x0ec1
+#define regCM1_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_8_9 0x0ec2
+#define regCM1_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_10_11 0x0ec3
+#define regCM1_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_12_13 0x0ec4
+#define regCM1_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_14_15 0x0ec5
+#define regCM1_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_16_17 0x0ec6
+#define regCM1_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_18_19 0x0ec7
+#define regCM1_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_20_21 0x0ec8
+#define regCM1_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_22_23 0x0ec9
+#define regCM1_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_24_25 0x0eca
+#define regCM1_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_26_27 0x0ecb
+#define regCM1_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_28_29 0x0ecc
+#define regCM1_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_30_31 0x0ecd
+#define regCM1_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_32_33 0x0ece
+#define regCM1_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_B 0x0ecf
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_G 0x0ed0
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_R 0x0ed1
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x0ed2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x0ed3
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x0ed4
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x0ed5
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x0ed6
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x0ed7
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_B 0x0ed8
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_B 0x0ed9
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_G 0x0eda
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_G 0x0edb
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_R 0x0edc
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_R 0x0edd
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_B 0x0ede
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_G 0x0edf
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_R 0x0ee0
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_0_1 0x0ee1
+#define regCM1_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_2_3 0x0ee2
+#define regCM1_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_4_5 0x0ee3
+#define regCM1_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_6_7 0x0ee4
+#define regCM1_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_8_9 0x0ee5
+#define regCM1_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_10_11 0x0ee6
+#define regCM1_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_12_13 0x0ee7
+#define regCM1_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_14_15 0x0ee8
+#define regCM1_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_16_17 0x0ee9
+#define regCM1_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_18_19 0x0eea
+#define regCM1_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_20_21 0x0eeb
+#define regCM1_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_22_23 0x0eec
+#define regCM1_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_24_25 0x0eed
+#define regCM1_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_26_27 0x0eee
+#define regCM1_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_28_29 0x0eef
+#define regCM1_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_30_31 0x0ef0
+#define regCM1_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_32_33 0x0ef1
+#define regCM1_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM1_CM_HDR_MULT_COEF 0x0ef2
+#define regCM1_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM1_CM_MEM_PWR_CTRL 0x0ef3
+#define regCM1_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM1_CM_MEM_PWR_STATUS 0x0ef4
+#define regCM1_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM1_CM_DEALPHA 0x0ef6
+#define regCM1_CM_DEALPHA_BASE_IDX 2
+#define regCM1_CM_COEF_FORMAT 0x0ef7
+#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
+#define regCM1_DPP_CRC_VAL_R 0x0efa
+#define regCM1_DPP_CRC_VAL_R_BASE_IDX 2
+#define regCM1_DPP_CRC_VAL_G 0x0efb
+#define regCM1_DPP_CRC_VAL_G_BASE_IDX 2
+#define regCM1_DPP_CRC_VAL_B 0x0efc
+#define regCM1_DPP_CRC_VAL_B_BASE_IDX 2
+#define regCM1_DPP_CRC_VAL_A 0x0efd
+#define regCM1_DPP_CRC_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+// base address: 0x5ac
+#define regDPP_TOP1_DPP_CONTROL 0x0e30
+#define regDPP_TOP1_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP1_DPP_SOFT_RESET 0x0e31
+#define regDPP_TOP1_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP1_DPP_CRC_CTRL 0x0e34
+#define regDPP_TOP1_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP1_HOST_READ_CONTROL 0x0e35
+#define regDPP_TOP1_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x3e3c
+#define regDC_PERFMON12_PERFCOUNTER_CNTL 0x0f8f
+#define regDC_PERFMON12_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON12_PERFCOUNTER_CNTL2 0x0f90
+#define regDC_PERFMON12_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON12_PERFCOUNTER_STATE 0x0f91
+#define regDC_PERFMON12_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CNTL 0x0f92
+#define regDC_PERFMON12_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CNTL2 0x0f93
+#define regDC_PERFMON12_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CVALUE_INT_MISC 0x0f94
+#define regDC_PERFMON12_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CVALUE_LOW 0x0f95
+#define regDC_PERFMON12_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_HI 0x0f96
+#define regDC_PERFMON12_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_LOW 0x0f97
+#define regDC_PERFMON12_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+// base address: 0xb58
+#define regCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT 0x0fa5
+#define regCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG2_FORMAT_CONTROL 0x0fa6
+#define regCNVC_CFG2_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_R 0x0fa7
+#define regCNVC_CFG2_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_G 0x0fa8
+#define regCNVC_CFG2_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_B 0x0fa9
+#define regCNVC_CFG2_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_R 0x0faa
+#define regCNVC_CFG2_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_G 0x0fab
+#define regCNVC_CFG2_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_B 0x0fac
+#define regCNVC_CFG2_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_CONTROL 0x0fad
+#define regCNVC_CFG2_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_ALPHA 0x0fae
+#define regCNVC_CFG2_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_RED 0x0faf
+#define regCNVC_CFG2_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_GREEN 0x0fb0
+#define regCNVC_CFG2_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_BLUE 0x0fb1
+#define regCNVC_CFG2_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG2_ALPHA_2BIT_LUT 0x0fb3
+#define regCNVC_CFG2_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG2_PRE_DEALPHA 0x0fb4
+#define regCNVC_CFG2_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_MODE 0x0fb5
+#define regCNVC_CFG2_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C11_C12 0x0fb6
+#define regCNVC_CFG2_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C13_C14 0x0fb7
+#define regCNVC_CFG2_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C21_C22 0x0fb8
+#define regCNVC_CFG2_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C23_C24 0x0fb9
+#define regCNVC_CFG2_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C31_C32 0x0fba
+#define regCNVC_CFG2_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C33_C34 0x0fbb
+#define regCNVC_CFG2_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C11_C12 0x0fbc
+#define regCNVC_CFG2_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C13_C14 0x0fbd
+#define regCNVC_CFG2_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C21_C22 0x0fbe
+#define regCNVC_CFG2_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C23_C24 0x0fbf
+#define regCNVC_CFG2_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C31_C32 0x0fc0
+#define regCNVC_CFG2_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C33_C34 0x0fc1
+#define regCNVC_CFG2_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG2_CNVC_COEF_FORMAT 0x0fc2
+#define regCNVC_CFG2_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG2_PRE_DEGAM 0x0fc3
+#define regCNVC_CFG2_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG2_PRE_REALPHA 0x0fc4
+#define regCNVC_CFG2_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+// base address: 0xb58
+#define regCNVC_CUR2_CURSOR0_CONTROL 0x0fc7
+#define regCNVC_CUR2_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_COLOR0 0x0fc8
+#define regCNVC_CUR2_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_COLOR1 0x0fc9
+#define regCNVC_CUR2_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_FP_SCALE_BIAS 0x0fca
+#define regCNVC_CUR2_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+// base address: 0xb58
+#define regDSCL2_SCL_COEF_RAM_TAP_SELECT 0x0fcf
+#define regDSCL2_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL2_SCL_COEF_RAM_TAP_DATA 0x0fd0
+#define regDSCL2_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL2_SCL_MODE 0x0fd1
+#define regDSCL2_SCL_MODE_BASE_IDX 2
+#define regDSCL2_SCL_TAP_CONTROL 0x0fd2
+#define regDSCL2_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL2_DSCL_CONTROL 0x0fd3
+#define regDSCL2_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL2_DSCL_2TAP_CONTROL 0x0fd4
+#define regDSCL2_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x0fd5
+#define regDSCL2_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x0fd6
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_INIT 0x0fd7
+#define regDSCL2_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0fd8
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_INIT_C 0x0fd9
+#define regDSCL2_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x0fda
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT 0x0fdb
+#define regDSCL2_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT 0x0fdc
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C 0x0fdd
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_C 0x0fde
+#define regDSCL2_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_C 0x0fdf
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL2_SCL_BLACK_COLOR 0x0fe0
+#define regDSCL2_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL2_DSCL_UPDATE 0x0fe1
+#define regDSCL2_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL2_DSCL_AUTOCAL 0x0fe2
+#define regDSCL2_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0fe3
+#define regDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0fe4
+#define regDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL2_OTG_H_BLANK 0x0fe5
+#define regDSCL2_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL2_OTG_V_BLANK 0x0fe6
+#define regDSCL2_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL2_RECOUT_START 0x0fe7
+#define regDSCL2_RECOUT_START_BASE_IDX 2
+#define regDSCL2_RECOUT_SIZE 0x0fe8
+#define regDSCL2_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL2_MPC_SIZE 0x0fe9
+#define regDSCL2_MPC_SIZE_BASE_IDX 2
+#define regDSCL2_LB_DATA_FORMAT 0x0fea
+#define regDSCL2_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL2_LB_MEMORY_CTRL 0x0feb
+#define regDSCL2_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL2_LB_V_COUNTER 0x0fec
+#define regDSCL2_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL2_DSCL_MEM_PWR_CTRL 0x0fed
+#define regDSCL2_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL2_DSCL_MEM_PWR_STATUS 0x0fee
+#define regDSCL2_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL2_OBUF_CONTROL 0x0fef
+#define regDSCL2_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL2_OBUF_MEM_PWR_CTRL 0x0ff0
+#define regDSCL2_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+// base address: 0xb58
+#define regCM2_CM_CONTROL 0x0ff6
+#define regCM2_CM_CONTROL_BASE_IDX 2
+#define regCM2_CM_POST_CSC_CONTROL 0x0ff7
+#define regCM2_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C11_C12 0x0ff8
+#define regCM2_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C13_C14 0x0ff9
+#define regCM2_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C21_C22 0x0ffa
+#define regCM2_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C23_C24 0x0ffb
+#define regCM2_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C31_C32 0x0ffc
+#define regCM2_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C33_C34 0x0ffd
+#define regCM2_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C11_C12 0x0ffe
+#define regCM2_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C13_C14 0x0fff
+#define regCM2_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C21_C22 0x1000
+#define regCM2_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C23_C24 0x1001
+#define regCM2_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C31_C32 0x1002
+#define regCM2_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C33_C34 0x1003
+#define regCM2_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_CONTROL 0x1004
+#define regCM2_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C11_C12 0x1005
+#define regCM2_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C13_C14 0x1006
+#define regCM2_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C21_C22 0x1007
+#define regCM2_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C23_C24 0x1008
+#define regCM2_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C31_C32 0x1009
+#define regCM2_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C33_C34 0x100a
+#define regCM2_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C11_C12 0x100b
+#define regCM2_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C13_C14 0x100c
+#define regCM2_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C21_C22 0x100d
+#define regCM2_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C23_C24 0x100e
+#define regCM2_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C31_C32 0x100f
+#define regCM2_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C33_C34 0x1010
+#define regCM2_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM2_CM_BIAS_CR_R 0x1011
+#define regCM2_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM2_CM_BIAS_Y_G_CB_B 0x1012
+#define regCM2_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_CONTROL 0x1013
+#define regCM2_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_INDEX 0x1014
+#define regCM2_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_DATA 0x1015
+#define regCM2_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_CONTROL 0x1016
+#define regCM2_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_B 0x1017
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_G 0x1018
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_R 0x1019
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x101a
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x101b
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x101c
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x101d
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x101e
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x101f
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_B 0x1020
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_B 0x1021
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_G 0x1022
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_G 0x1023
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_R 0x1024
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_R 0x1025
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_B 0x1026
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_G 0x1027
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_R 0x1028
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_0_1 0x1029
+#define regCM2_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_2_3 0x102a
+#define regCM2_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_4_5 0x102b
+#define regCM2_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_6_7 0x102c
+#define regCM2_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_8_9 0x102d
+#define regCM2_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_10_11 0x102e
+#define regCM2_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_12_13 0x102f
+#define regCM2_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_14_15 0x1030
+#define regCM2_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_16_17 0x1031
+#define regCM2_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_18_19 0x1032
+#define regCM2_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_20_21 0x1033
+#define regCM2_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_22_23 0x1034
+#define regCM2_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_24_25 0x1035
+#define regCM2_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_26_27 0x1036
+#define regCM2_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_28_29 0x1037
+#define regCM2_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_30_31 0x1038
+#define regCM2_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_32_33 0x1039
+#define regCM2_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_B 0x103a
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_G 0x103b
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_R 0x103c
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x103d
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x103e
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x103f
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x1040
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x1041
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x1042
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_B 0x1043
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_B 0x1044
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_G 0x1045
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_G 0x1046
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_R 0x1047
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_R 0x1048
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_B 0x1049
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_G 0x104a
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_R 0x104b
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_0_1 0x104c
+#define regCM2_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_2_3 0x104d
+#define regCM2_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_4_5 0x104e
+#define regCM2_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_6_7 0x104f
+#define regCM2_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_8_9 0x1050
+#define regCM2_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_10_11 0x1051
+#define regCM2_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_12_13 0x1052
+#define regCM2_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_14_15 0x1053
+#define regCM2_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_16_17 0x1054
+#define regCM2_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_18_19 0x1055
+#define regCM2_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_20_21 0x1056
+#define regCM2_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_22_23 0x1057
+#define regCM2_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_24_25 0x1058
+#define regCM2_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_26_27 0x1059
+#define regCM2_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_28_29 0x105a
+#define regCM2_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_30_31 0x105b
+#define regCM2_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_32_33 0x105c
+#define regCM2_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM2_CM_HDR_MULT_COEF 0x105d
+#define regCM2_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM2_CM_MEM_PWR_CTRL 0x105e
+#define regCM2_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM2_CM_MEM_PWR_STATUS 0x105f
+#define regCM2_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM2_CM_DEALPHA 0x1061
+#define regCM2_CM_DEALPHA_BASE_IDX 2
+#define regCM2_CM_COEF_FORMAT 0x1062
+#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
+#define regCM2_DPP_CRC_VAL_R 0x1065
+#define regCM2_DPP_CRC_VAL_R_BASE_IDX 2
+#define regCM2_DPP_CRC_VAL_G 0x1066
+#define regCM2_DPP_CRC_VAL_G_BASE_IDX 2
+#define regCM2_DPP_CRC_VAL_B 0x1067
+#define regCM2_DPP_CRC_VAL_B_BASE_IDX 2
+#define regCM2_DPP_CRC_VAL_A 0x1068
+#define regCM2_DPP_CRC_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+// base address: 0xb58
+#define regDPP_TOP2_DPP_CONTROL 0x0f9b
+#define regDPP_TOP2_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP2_DPP_SOFT_RESET 0x0f9c
+#define regDPP_TOP2_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP2_DPP_CRC_CTRL 0x0f9f
+#define regDPP_TOP2_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP2_HOST_READ_CONTROL 0x0fa0
+#define regDPP_TOP2_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x43e8
+#define regDC_PERFMON13_PERFCOUNTER_CNTL 0x10fa
+#define regDC_PERFMON13_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON13_PERFCOUNTER_CNTL2 0x10fb
+#define regDC_PERFMON13_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON13_PERFCOUNTER_STATE 0x10fc
+#define regDC_PERFMON13_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CNTL 0x10fd
+#define regDC_PERFMON13_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CNTL2 0x10fe
+#define regDC_PERFMON13_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CVALUE_INT_MISC 0x10ff
+#define regDC_PERFMON13_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CVALUE_LOW 0x1100
+#define regDC_PERFMON13_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_HI 0x1101
+#define regDC_PERFMON13_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_LOW 0x1102
+#define regDC_PERFMON13_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+// base address: 0x1104
+#define regCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT 0x1110
+#define regCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG3_FORMAT_CONTROL 0x1111
+#define regCNVC_CFG3_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_R 0x1112
+#define regCNVC_CFG3_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_G 0x1113
+#define regCNVC_CFG3_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_B 0x1114
+#define regCNVC_CFG3_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_R 0x1115
+#define regCNVC_CFG3_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_G 0x1116
+#define regCNVC_CFG3_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_B 0x1117
+#define regCNVC_CFG3_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_CONTROL 0x1118
+#define regCNVC_CFG3_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_ALPHA 0x1119
+#define regCNVC_CFG3_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_RED 0x111a
+#define regCNVC_CFG3_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_GREEN 0x111b
+#define regCNVC_CFG3_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_BLUE 0x111c
+#define regCNVC_CFG3_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG3_ALPHA_2BIT_LUT 0x111e
+#define regCNVC_CFG3_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG3_PRE_DEALPHA 0x111f
+#define regCNVC_CFG3_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_MODE 0x1120
+#define regCNVC_CFG3_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C11_C12 0x1121
+#define regCNVC_CFG3_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C13_C14 0x1122
+#define regCNVC_CFG3_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C21_C22 0x1123
+#define regCNVC_CFG3_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C23_C24 0x1124
+#define regCNVC_CFG3_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C31_C32 0x1125
+#define regCNVC_CFG3_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C33_C34 0x1126
+#define regCNVC_CFG3_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C11_C12 0x1127
+#define regCNVC_CFG3_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C13_C14 0x1128
+#define regCNVC_CFG3_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C21_C22 0x1129
+#define regCNVC_CFG3_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C23_C24 0x112a
+#define regCNVC_CFG3_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C31_C32 0x112b
+#define regCNVC_CFG3_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C33_C34 0x112c
+#define regCNVC_CFG3_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG3_CNVC_COEF_FORMAT 0x112d
+#define regCNVC_CFG3_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG3_PRE_DEGAM 0x112e
+#define regCNVC_CFG3_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG3_PRE_REALPHA 0x112f
+#define regCNVC_CFG3_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+// base address: 0x1104
+#define regCNVC_CUR3_CURSOR0_CONTROL 0x1132
+#define regCNVC_CUR3_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_COLOR0 0x1133
+#define regCNVC_CUR3_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_COLOR1 0x1134
+#define regCNVC_CUR3_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_FP_SCALE_BIAS 0x1135
+#define regCNVC_CUR3_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+// base address: 0x1104
+#define regDSCL3_SCL_COEF_RAM_TAP_SELECT 0x113a
+#define regDSCL3_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL3_SCL_COEF_RAM_TAP_DATA 0x113b
+#define regDSCL3_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL3_SCL_MODE 0x113c
+#define regDSCL3_SCL_MODE_BASE_IDX 2
+#define regDSCL3_SCL_TAP_CONTROL 0x113d
+#define regDSCL3_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL3_DSCL_CONTROL 0x113e
+#define regDSCL3_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL3_DSCL_2TAP_CONTROL 0x113f
+#define regDSCL3_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x1140
+#define regDSCL3_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x1141
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_INIT 0x1142
+#define regDSCL3_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C 0x1143
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_INIT_C 0x1144
+#define regDSCL3_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x1145
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT 0x1146
+#define regDSCL3_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT 0x1147
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C 0x1148
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_C 0x1149
+#define regDSCL3_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_C 0x114a
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL3_SCL_BLACK_COLOR 0x114b
+#define regDSCL3_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL3_DSCL_UPDATE 0x114c
+#define regDSCL3_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL3_DSCL_AUTOCAL 0x114d
+#define regDSCL3_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x114e
+#define regDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x114f
+#define regDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL3_OTG_H_BLANK 0x1150
+#define regDSCL3_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL3_OTG_V_BLANK 0x1151
+#define regDSCL3_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL3_RECOUT_START 0x1152
+#define regDSCL3_RECOUT_START_BASE_IDX 2
+#define regDSCL3_RECOUT_SIZE 0x1153
+#define regDSCL3_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL3_MPC_SIZE 0x1154
+#define regDSCL3_MPC_SIZE_BASE_IDX 2
+#define regDSCL3_LB_DATA_FORMAT 0x1155
+#define regDSCL3_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL3_LB_MEMORY_CTRL 0x1156
+#define regDSCL3_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL3_LB_V_COUNTER 0x1157
+#define regDSCL3_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL3_DSCL_MEM_PWR_CTRL 0x1158
+#define regDSCL3_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL3_DSCL_MEM_PWR_STATUS 0x1159
+#define regDSCL3_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL3_OBUF_CONTROL 0x115a
+#define regDSCL3_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL3_OBUF_MEM_PWR_CTRL 0x115b
+#define regDSCL3_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+// base address: 0x1104
+#define regCM3_CM_CONTROL 0x1161
+#define regCM3_CM_CONTROL_BASE_IDX 2
+#define regCM3_CM_POST_CSC_CONTROL 0x1162
+#define regCM3_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C11_C12 0x1163
+#define regCM3_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C13_C14 0x1164
+#define regCM3_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C21_C22 0x1165
+#define regCM3_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C23_C24 0x1166
+#define regCM3_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C31_C32 0x1167
+#define regCM3_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C33_C34 0x1168
+#define regCM3_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C11_C12 0x1169
+#define regCM3_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C13_C14 0x116a
+#define regCM3_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C21_C22 0x116b
+#define regCM3_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C23_C24 0x116c
+#define regCM3_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C31_C32 0x116d
+#define regCM3_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C33_C34 0x116e
+#define regCM3_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_CONTROL 0x116f
+#define regCM3_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C11_C12 0x1170
+#define regCM3_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C13_C14 0x1171
+#define regCM3_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C21_C22 0x1172
+#define regCM3_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C23_C24 0x1173
+#define regCM3_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C31_C32 0x1174
+#define regCM3_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C33_C34 0x1175
+#define regCM3_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C11_C12 0x1176
+#define regCM3_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C13_C14 0x1177
+#define regCM3_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C21_C22 0x1178
+#define regCM3_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C23_C24 0x1179
+#define regCM3_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C31_C32 0x117a
+#define regCM3_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C33_C34 0x117b
+#define regCM3_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM3_CM_BIAS_CR_R 0x117c
+#define regCM3_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM3_CM_BIAS_Y_G_CB_B 0x117d
+#define regCM3_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_CONTROL 0x117e
+#define regCM3_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_INDEX 0x117f
+#define regCM3_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_DATA 0x1180
+#define regCM3_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_CONTROL 0x1181
+#define regCM3_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_B 0x1182
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_G 0x1183
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_R 0x1184
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x1185
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x1186
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x1187
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x1188
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x1189
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x118a
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_B 0x118b
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_B 0x118c
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_G 0x118d
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_G 0x118e
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_R 0x118f
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_R 0x1190
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_B 0x1191
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_G 0x1192
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_R 0x1193
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_0_1 0x1194
+#define regCM3_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_2_3 0x1195
+#define regCM3_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_4_5 0x1196
+#define regCM3_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_6_7 0x1197
+#define regCM3_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_8_9 0x1198
+#define regCM3_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_10_11 0x1199
+#define regCM3_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_12_13 0x119a
+#define regCM3_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_14_15 0x119b
+#define regCM3_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_16_17 0x119c
+#define regCM3_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_18_19 0x119d
+#define regCM3_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_20_21 0x119e
+#define regCM3_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_22_23 0x119f
+#define regCM3_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_24_25 0x11a0
+#define regCM3_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_26_27 0x11a1
+#define regCM3_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_28_29 0x11a2
+#define regCM3_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_30_31 0x11a3
+#define regCM3_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_32_33 0x11a4
+#define regCM3_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_B 0x11a5
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_G 0x11a6
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_R 0x11a7
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x11a8
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x11a9
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x11aa
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x11ab
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x11ac
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x11ad
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_B 0x11ae
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_B 0x11af
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_G 0x11b0
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_G 0x11b1
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_R 0x11b2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_R 0x11b3
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_B 0x11b4
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_G 0x11b5
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_R 0x11b6
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_0_1 0x11b7
+#define regCM3_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_2_3 0x11b8
+#define regCM3_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_4_5 0x11b9
+#define regCM3_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_6_7 0x11ba
+#define regCM3_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_8_9 0x11bb
+#define regCM3_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_10_11 0x11bc
+#define regCM3_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_12_13 0x11bd
+#define regCM3_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_14_15 0x11be
+#define regCM3_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_16_17 0x11bf
+#define regCM3_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_18_19 0x11c0
+#define regCM3_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_20_21 0x11c1
+#define regCM3_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_22_23 0x11c2
+#define regCM3_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_24_25 0x11c3
+#define regCM3_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_26_27 0x11c4
+#define regCM3_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_28_29 0x11c5
+#define regCM3_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_30_31 0x11c6
+#define regCM3_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_32_33 0x11c7
+#define regCM3_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM3_CM_HDR_MULT_COEF 0x11c8
+#define regCM3_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM3_CM_MEM_PWR_CTRL 0x11c9
+#define regCM3_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM3_CM_MEM_PWR_STATUS 0x11ca
+#define regCM3_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM3_CM_DEALPHA 0x11cc
+#define regCM3_CM_DEALPHA_BASE_IDX 2
+#define regCM3_CM_COEF_FORMAT 0x11cd
+#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
+#define regCM3_DPP_CRC_VAL_R 0x11d0
+#define regCM3_DPP_CRC_VAL_R_BASE_IDX 2
+#define regCM3_DPP_CRC_VAL_G 0x11d1
+#define regCM3_DPP_CRC_VAL_G_BASE_IDX 2
+#define regCM3_DPP_CRC_VAL_B 0x11d2
+#define regCM3_DPP_CRC_VAL_B_BASE_IDX 2
+#define regCM3_DPP_CRC_VAL_A 0x11d3
+#define regCM3_DPP_CRC_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+// base address: 0x1104
+#define regDPP_TOP3_DPP_CONTROL 0x1106
+#define regDPP_TOP3_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP3_DPP_SOFT_RESET 0x1107
+#define regDPP_TOP3_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP3_DPP_CRC_CTRL 0x110a
+#define regDPP_TOP3_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP3_HOST_READ_CONTROL 0x110b
+#define regDPP_TOP3_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x4994
+#define regDC_PERFMON14_PERFCOUNTER_CNTL 0x1265
+#define regDC_PERFMON14_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON14_PERFCOUNTER_CNTL2 0x1266
+#define regDC_PERFMON14_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON14_PERFCOUNTER_STATE 0x1267
+#define regDC_PERFMON14_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CNTL 0x1268
+#define regDC_PERFMON14_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CNTL2 0x1269
+#define regDC_PERFMON14_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CVALUE_INT_MISC 0x126a
+#define regDC_PERFMON14_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CVALUE_LOW 0x126b
+#define regDC_PERFMON14_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_HI 0x126c
+#define regDC_PERFMON14_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_LOW 0x126d
+#define regDC_PERFMON14_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+// base address: 0x0
+#define regMPCC0_MPCC_TOP_SEL 0x0000
+#define regMPCC0_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_SEL 0x0001
+#define regMPCC0_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_OPP_ID 0x0002
+#define regMPCC0_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC0_MPCC_CONTROL 0x0003
+#define regMPCC0_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_SM_CONTROL 0x0004
+#define regMPCC0_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_UPDATE_LOCK_SEL 0x0005
+#define regMPCC0_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_TOP_GAIN 0x0006
+#define regMPCC0_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_GAIN_INSIDE 0x0007
+#define regMPCC0_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_GAIN_OUTSIDE 0x0008
+#define regMPCC0_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0009
+#define regMPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_BG_R_CR 0x000a
+#define regMPCC0_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC0_MPCC_BG_G_Y 0x000b
+#define regMPCC0_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC0_MPCC_BG_B_CB 0x000c
+#define regMPCC0_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC0_MPCC_MEM_PWR_CTRL 0x000d
+#define regMPCC0_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC0_MPCC_STATUS 0x000e
+#define regMPCC0_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+// base address: 0x54
+#define regMPCC1_MPCC_TOP_SEL 0x0015
+#define regMPCC1_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_SEL 0x0016
+#define regMPCC1_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_OPP_ID 0x0017
+#define regMPCC1_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC1_MPCC_CONTROL 0x0018
+#define regMPCC1_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_SM_CONTROL 0x0019
+#define regMPCC1_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_UPDATE_LOCK_SEL 0x001a
+#define regMPCC1_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_TOP_GAIN 0x001b
+#define regMPCC1_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_GAIN_INSIDE 0x001c
+#define regMPCC1_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_GAIN_OUTSIDE 0x001d
+#define regMPCC1_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x001e
+#define regMPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_BG_R_CR 0x001f
+#define regMPCC1_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC1_MPCC_BG_G_Y 0x0020
+#define regMPCC1_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC1_MPCC_BG_B_CB 0x0021
+#define regMPCC1_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC1_MPCC_MEM_PWR_CTRL 0x0022
+#define regMPCC1_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC1_MPCC_STATUS 0x0023
+#define regMPCC1_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+// base address: 0xa8
+#define regMPCC2_MPCC_TOP_SEL 0x002a
+#define regMPCC2_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_SEL 0x002b
+#define regMPCC2_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_OPP_ID 0x002c
+#define regMPCC2_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC2_MPCC_CONTROL 0x002d
+#define regMPCC2_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_SM_CONTROL 0x002e
+#define regMPCC2_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_UPDATE_LOCK_SEL 0x002f
+#define regMPCC2_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_TOP_GAIN 0x0030
+#define regMPCC2_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_GAIN_INSIDE 0x0031
+#define regMPCC2_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_GAIN_OUTSIDE 0x0032
+#define regMPCC2_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0033
+#define regMPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_BG_R_CR 0x0034
+#define regMPCC2_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC2_MPCC_BG_G_Y 0x0035
+#define regMPCC2_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC2_MPCC_BG_B_CB 0x0036
+#define regMPCC2_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC2_MPCC_MEM_PWR_CTRL 0x0037
+#define regMPCC2_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC2_MPCC_STATUS 0x0038
+#define regMPCC2_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+// base address: 0xfc
+#define regMPCC3_MPCC_TOP_SEL 0x003f
+#define regMPCC3_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_SEL 0x0040
+#define regMPCC3_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_OPP_ID 0x0041
+#define regMPCC3_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC3_MPCC_CONTROL 0x0042
+#define regMPCC3_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_SM_CONTROL 0x0043
+#define regMPCC3_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_UPDATE_LOCK_SEL 0x0044
+#define regMPCC3_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_TOP_GAIN 0x0045
+#define regMPCC3_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_GAIN_INSIDE 0x0046
+#define regMPCC3_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_GAIN_OUTSIDE 0x0047
+#define regMPCC3_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0048
+#define regMPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_BG_R_CR 0x0049
+#define regMPCC3_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC3_MPCC_BG_G_Y 0x004a
+#define regMPCC3_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC3_MPCC_BG_B_CB 0x004b
+#define regMPCC3_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC3_MPCC_MEM_PWR_CTRL 0x004c
+#define regMPCC3_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC3_MPCC_STATUS 0x004d
+#define regMPCC3_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+// base address: 0x0
+#define regMPC_CLOCK_CONTROL 0x0398
+#define regMPC_CLOCK_CONTROL_BASE_IDX 3
+#define regMPC_SOFT_RESET 0x0399
+#define regMPC_SOFT_RESET_BASE_IDX 3
+#define regMPC_CRC_CTRL 0x039a
+#define regMPC_CRC_CTRL_BASE_IDX 3
+#define regMPC_CRC_SEL_CONTROL 0x039b
+#define regMPC_CRC_SEL_CONTROL_BASE_IDX 3
+#define regMPC_PERFMON_EVENT_CTRL 0x03a1
+#define regMPC_PERFMON_EVENT_CTRL_BASE_IDX 3
+#define regMPC_BYPASS_BG_AR 0x03a2
+#define regMPC_BYPASS_BG_AR_BASE_IDX 3
+#define regMPC_BYPASS_BG_GB 0x03a3
+#define regMPC_BYPASS_BG_GB_BASE_IDX 3
+#define regMPC_HOST_READ_CONTROL 0x03a4
+#define regMPC_HOST_READ_CONTROL_BASE_IDX 3
+#define regMPC_DPP_PENDING_STATUS 0x03a5
+#define regMPC_DPP_PENDING_STATUS_BASE_IDX 3
+#define regMPC_PENDING_STATUS_MISC 0x03a6
+#define regMPC_PENDING_STATUS_MISC_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET0 0x03a7
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET0 0x03a8
+#define regADR_CFG_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET0 0x03a9
+#define regADR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET0 0x03aa
+#define regCFG_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET0 0x03ab
+#define regCUR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET1 0x03ac
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET1 0x03ad
+#define regADR_CFG_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET1 0x03ae
+#define regADR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET1 0x03af
+#define regCFG_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET1 0x03b0
+#define regCUR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET2 0x03b1
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET2 0x03b2
+#define regADR_CFG_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET2 0x03b3
+#define regADR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET2 0x03b4
+#define regCFG_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET2 0x03b5
+#define regCUR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET3 0x03b6
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET3 0x03b7
+#define regADR_CFG_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET3 0x03b8
+#define regADR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET3 0x03b9
+#define regCFG_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET3 0x03ba
+#define regCUR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regMPC_CRC_RESULT_A 0x03c0
+#define regMPC_CRC_RESULT_A_BASE_IDX 3
+#define regMPC_CRC_RESULT_R 0x03c1
+#define regMPC_CRC_RESULT_R_BASE_IDX 3
+#define regMPC_CRC_RESULT_G 0x03c2
+#define regMPC_CRC_RESULT_G_BASE_IDX 3
+#define regMPC_CRC_RESULT_B 0x03c3
+#define regMPC_CRC_RESULT_B_BASE_IDX 3
+#define regMPC_DWB0_MUX 0x03c6
+#define regMPC_DWB0_MUX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
+// base address: 0x17e1c
+#define regDC_PERFMON15_PERFCOUNTER_CNTL 0x0447
+#define regDC_PERFMON15_PERFCOUNTER_CNTL_BASE_IDX 3
+#define regDC_PERFMON15_PERFCOUNTER_CNTL2 0x0448
+#define regDC_PERFMON15_PERFCOUNTER_CNTL2_BASE_IDX 3
+#define regDC_PERFMON15_PERFCOUNTER_STATE 0x0449
+#define regDC_PERFMON15_PERFCOUNTER_STATE_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CNTL 0x044a
+#define regDC_PERFMON15_PERFMON_CNTL_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CNTL2 0x044b
+#define regDC_PERFMON15_PERFMON_CNTL2_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CVALUE_INT_MISC 0x044c
+#define regDC_PERFMON15_PERFMON_CVALUE_INT_MISC_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CVALUE_LOW 0x044d
+#define regDC_PERFMON15_PERFMON_CVALUE_LOW_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_HI 0x044e
+#define regDC_PERFMON15_PERFMON_HI_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_LOW 0x044f
+#define regDC_PERFMON15_PERFMON_LOW_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+// base address: 0x0
+#define regMPCC_OGAM0_MPCC_OGAM_CONTROL 0x00a8
+#define regMPCC_OGAM0_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_INDEX 0x00a9
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_DATA 0x00aa
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_CONTROL 0x00ab
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B 0x00ac
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G 0x00ad
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R 0x00ae
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x00af
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x00b0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x00b1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x00b2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x00b3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x00b4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B 0x00b5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B 0x00b6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G 0x00b7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G 0x00b8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R 0x00b9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R 0x00ba
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B 0x00bb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G 0x00bc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R 0x00bd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1 0x00be
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3 0x00bf
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5 0x00c0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7 0x00c1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9 0x00c2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11 0x00c3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13 0x00c4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15 0x00c5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17 0x00c6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19 0x00c7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21 0x00c8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23 0x00c9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25 0x00ca
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27 0x00cb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29 0x00cc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31 0x00cd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33 0x00ce
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B 0x00cf
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G 0x00d0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R 0x00d1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x00d2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x00d3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x00d4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x00d5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x00d6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x00d7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B 0x00d8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B 0x00d9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G 0x00da
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G 0x00db
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R 0x00dc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R 0x00dd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B 0x00de
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G 0x00df
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R 0x00e0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1 0x00e1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3 0x00e2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5 0x00e3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7 0x00e4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9 0x00e5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11 0x00e6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13 0x00e7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15 0x00e8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17 0x00e9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19 0x00ea
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21 0x00eb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23 0x00ec
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25 0x00ed
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27 0x00ee
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29 0x00ef
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31 0x00f0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33 0x00f1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT 0x00f2
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_MODE 0x00f3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A 0x00f4
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A 0x00f5
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A 0x00f6
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A 0x00f7
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A 0x00f8
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A 0x00f9
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B 0x00fa
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B 0x00fb
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B 0x00fc
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B 0x00fd
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B 0x00fe
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B 0x00ff
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+// base address: 0x178
+#define regMPCC_OGAM1_MPCC_OGAM_CONTROL 0x0106
+#define regMPCC_OGAM1_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_INDEX 0x0107
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_DATA 0x0108
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_CONTROL 0x0109
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B 0x010a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G 0x010b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R 0x010c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x010d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x010e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x010f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x0110
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x0111
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x0112
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B 0x0113
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B 0x0114
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G 0x0115
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G 0x0116
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R 0x0117
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R 0x0118
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B 0x0119
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G 0x011a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R 0x011b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1 0x011c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3 0x011d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5 0x011e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7 0x011f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9 0x0120
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11 0x0121
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13 0x0122
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15 0x0123
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17 0x0124
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19 0x0125
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21 0x0126
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23 0x0127
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25 0x0128
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27 0x0129
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29 0x012a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31 0x012b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33 0x012c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B 0x012d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G 0x012e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R 0x012f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x0130
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x0131
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x0132
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x0133
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x0134
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x0135
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B 0x0136
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B 0x0137
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G 0x0138
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G 0x0139
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R 0x013a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R 0x013b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B 0x013c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G 0x013d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R 0x013e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1 0x013f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3 0x0140
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5 0x0141
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7 0x0142
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9 0x0143
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11 0x0144
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13 0x0145
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15 0x0146
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17 0x0147
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19 0x0148
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21 0x0149
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23 0x014a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25 0x014b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27 0x014c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29 0x014d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31 0x014e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33 0x014f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT 0x0150
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_MODE 0x0151
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A 0x0152
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A 0x0153
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A 0x0154
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A 0x0155
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A 0x0156
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A 0x0157
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B 0x0158
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B 0x0159
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B 0x015a
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B 0x015b
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B 0x015c
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B 0x015d
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+// base address: 0x2f0
+#define regMPCC_OGAM2_MPCC_OGAM_CONTROL 0x0164
+#define regMPCC_OGAM2_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_INDEX 0x0165
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_DATA 0x0166
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_CONTROL 0x0167
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B 0x0168
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G 0x0169
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R 0x016a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x016b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x016c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x016d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x016e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x016f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x0170
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B 0x0171
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B 0x0172
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G 0x0173
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G 0x0174
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R 0x0175
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R 0x0176
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B 0x0177
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G 0x0178
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R 0x0179
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1 0x017a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3 0x017b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5 0x017c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7 0x017d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9 0x017e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11 0x017f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13 0x0180
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15 0x0181
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17 0x0182
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19 0x0183
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21 0x0184
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23 0x0185
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25 0x0186
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27 0x0187
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29 0x0188
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31 0x0189
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33 0x018a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B 0x018b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G 0x018c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R 0x018d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x018e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x018f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x0190
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x0191
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x0192
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x0193
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B 0x0194
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B 0x0195
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G 0x0196
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G 0x0197
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R 0x0198
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R 0x0199
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B 0x019a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G 0x019b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R 0x019c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1 0x019d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3 0x019e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5 0x019f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7 0x01a0
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9 0x01a1
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11 0x01a2
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13 0x01a3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15 0x01a4
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17 0x01a5
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19 0x01a6
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21 0x01a7
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23 0x01a8
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25 0x01a9
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27 0x01aa
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29 0x01ab
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31 0x01ac
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33 0x01ad
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT 0x01ae
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_MODE 0x01af
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A 0x01b0
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A 0x01b1
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A 0x01b2
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A 0x01b3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A 0x01b4
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A 0x01b5
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B 0x01b6
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B 0x01b7
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B 0x01b8
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B 0x01b9
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B 0x01ba
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B 0x01bb
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+// base address: 0x468
+#define regMPCC_OGAM3_MPCC_OGAM_CONTROL 0x01c2
+#define regMPCC_OGAM3_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_INDEX 0x01c3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_DATA 0x01c4
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_CONTROL 0x01c5
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B 0x01c6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G 0x01c7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R 0x01c8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x01c9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x01ca
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x01cb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x01cc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x01cd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x01ce
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B 0x01cf
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B 0x01d0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G 0x01d1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G 0x01d2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R 0x01d3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R 0x01d4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B 0x01d5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G 0x01d6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R 0x01d7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1 0x01d8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3 0x01d9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5 0x01da
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7 0x01db
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9 0x01dc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11 0x01dd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13 0x01de
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15 0x01df
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17 0x01e0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19 0x01e1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21 0x01e2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23 0x01e3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25 0x01e4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27 0x01e5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29 0x01e6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31 0x01e7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33 0x01e8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B 0x01e9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G 0x01ea
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R 0x01eb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x01ec
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x01ed
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x01ee
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x01ef
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x01f0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x01f1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B 0x01f2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B 0x01f3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G 0x01f4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G 0x01f5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R 0x01f6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R 0x01f7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B 0x01f8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G 0x01f9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R 0x01fa
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1 0x01fb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3 0x01fc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5 0x01fd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7 0x01fe
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9 0x01ff
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11 0x0200
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13 0x0201
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15 0x0202
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17 0x0203
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19 0x0204
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21 0x0205
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23 0x0206
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25 0x0207
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27 0x0208
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29 0x0209
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31 0x020a
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33 0x020b
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT 0x020c
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_MODE 0x020d
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A 0x020e
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A 0x020f
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A 0x0210
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A 0x0211
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A 0x0212
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A 0x0213
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B 0x0214
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B 0x0215
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B 0x0216
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B 0x0217
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B 0x0218
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B 0x0219
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm0_dispdec
+// base address: 0x0
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_CONTROL 0x0453
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R 0x0454
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G 0x0455
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B 0x0456
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R 0x0457
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B 0x0458
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX 0x0459
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA 0x045a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x045b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x045c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x045d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x045e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x045f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0460
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0461
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0462
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0463
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0464
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0465
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0466
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0467
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0468
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0469
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x046a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x046b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x046c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x046d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x046e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x046f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0470
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0471
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0472
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0473
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0474
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0475
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0476
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0477
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0478
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0479
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x047a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x047b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x047c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x047d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x047e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x047f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0480
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0481
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0482
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0483
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0484
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0485
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0486
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0487
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0488
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0489
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_MODE 0x048a
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_INDEX 0x048b
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA 0x048c
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT 0x048d
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x048e
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x048f
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0490
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0491
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0492
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_CONTROL 0x0493
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX 0x0494
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA 0x0495
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL 0x0496
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0497
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0498
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0499
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x049a
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x049b
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x049c
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x049d
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x049e
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x049f
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x04a0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x04a1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x04a2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x04a3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x04a4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x04a5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x04a6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x04a7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x04a8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x04a9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x04aa
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x04ab
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x04ac
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x04ad
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x04ae
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x04af
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x04b0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x04b1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x04b2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x04b3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x04b4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x04b5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x04b6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x04b7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x04b8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x04b9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x04ba
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x04bb
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x04bc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x04bd
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x04be
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x04bf
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x04c0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x04c1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x04c2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x04c3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x04c4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x04c5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x04c6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x04c7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x04c8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x04c9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x04ca
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x04cb
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x04cc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x04cd
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x04ce
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x04cf
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x04d0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x04d1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x04d2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x04d3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x04d4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x04d5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x04d6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x04d7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x04d8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x04d9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x04da
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x04db
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x04dc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL 0x04dd
+#define regMPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm1_dispdec
+// base address: 0x240
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_CONTROL 0x04e3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R 0x04e4
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G 0x04e5
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B 0x04e6
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R 0x04e7
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B 0x04e8
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX 0x04e9
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA 0x04ea
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x04eb
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x04ec
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x04ed
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x04ee
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x04ef
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x04f0
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x04f1
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x04f2
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x04f3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x04f4
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x04f5
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x04f6
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x04f7
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x04f8
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x04f9
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x04fa
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x04fb
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x04fc
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x04fd
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x04fe
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x04ff
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0500
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0501
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0502
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0503
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0504
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0505
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0506
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0507
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0508
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0509
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x050a
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x050b
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x050c
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x050d
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x050e
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x050f
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0510
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0511
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0512
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0513
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0514
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0515
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0516
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0517
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0518
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0519
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_MODE 0x051a
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_INDEX 0x051b
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA 0x051c
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT 0x051d
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x051e
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x051f
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0520
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0521
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0522
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_CONTROL 0x0523
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX 0x0524
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA 0x0525
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL 0x0526
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0527
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0528
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0529
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x052a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x052b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x052c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x052d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x052e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x052f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x0530
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x0531
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x0532
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x0533
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x0534
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x0535
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x0536
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x0537
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x0538
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x0539
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x053a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x053b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x053c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x053d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x053e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x053f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x0540
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x0541
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x0542
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x0543
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x0544
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x0545
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x0546
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x0547
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x0548
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x0549
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x054a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x054b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x054c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x054d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x054e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x054f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x0550
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x0551
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x0552
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x0553
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x0554
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x0555
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x0556
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x0557
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x0558
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x0559
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x055a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x055b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x055c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x055d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x055e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x055f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x0560
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x0561
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x0562
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x0563
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x0564
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x0565
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x0566
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x0567
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x0568
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x0569
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x056a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x056b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x056c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL 0x056d
+#define regMPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm2_dispdec
+// base address: 0x480
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_CONTROL 0x0573
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R 0x0574
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G 0x0575
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B 0x0576
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R 0x0577
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B 0x0578
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX 0x0579
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA 0x057a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x057b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x057c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x057d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x057e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x057f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0580
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0581
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0582
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0583
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0584
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0585
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0586
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0587
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0588
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0589
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x058a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x058b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x058c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x058d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x058e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x058f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0590
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0591
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0592
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0593
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0594
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0595
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0596
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0597
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0598
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0599
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x059a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x059b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x059c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x059d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x059e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x059f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x05a0
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x05a1
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x05a2
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x05a3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x05a4
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x05a5
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x05a6
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x05a7
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x05a8
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x05a9
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_MODE 0x05aa
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_INDEX 0x05ab
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA 0x05ac
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT 0x05ad
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x05ae
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x05af
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x05b0
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x05b1
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x05b2
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_CONTROL 0x05b3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX 0x05b4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA 0x05b5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL 0x05b6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x05b7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x05b8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x05b9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x05ba
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x05bb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x05bc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x05bd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x05be
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x05bf
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x05c0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x05c1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x05c2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x05c3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x05c4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x05c5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x05c6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x05c7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x05c8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x05c9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x05ca
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x05cb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x05cc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x05cd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x05ce
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x05cf
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x05d0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x05d1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x05d2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x05d3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x05d4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x05d5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x05d6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x05d7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x05d8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x05d9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x05da
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x05db
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x05dc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x05dd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x05de
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x05df
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x05e0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x05e1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x05e2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x05e3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x05e4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x05e5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x05e6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x05e7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x05e8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x05e9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x05ea
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x05eb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x05ec
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x05ed
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x05ee
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x05ef
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x05f0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x05f1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x05f2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x05f3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x05f4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x05f5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x05f6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x05f7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x05f8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x05f9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x05fa
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x05fb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x05fc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL 0x05fd
+#define regMPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm3_dispdec
+// base address: 0x6c0
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_CONTROL 0x0603
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R 0x0604
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G 0x0605
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B 0x0606
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R 0x0607
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B 0x0608
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX 0x0609
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA 0x060a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x060b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x060c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x060d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x060e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x060f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0610
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0611
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0612
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0613
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0614
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0615
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0616
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0617
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0618
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0619
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x061a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x061b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x061c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x061d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x061e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x061f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0620
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0621
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0622
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0623
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0624
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0625
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0626
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0627
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0628
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0629
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x062a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x062b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x062c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x062d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x062e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x062f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0630
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0631
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0632
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0633
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0634
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0635
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0636
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0637
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0638
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0639
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_MODE 0x063a
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_INDEX 0x063b
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA 0x063c
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT 0x063d
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x063e
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x063f
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0640
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0641
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0642
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_CONTROL 0x0643
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX 0x0644
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA 0x0645
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL 0x0646
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0647
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0648
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0649
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x064a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x064b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x064c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x064d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x064e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x064f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x0650
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x0651
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x0652
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x0653
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x0654
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x0655
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x0656
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x0657
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x0658
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x0659
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x065a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x065b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x065c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x065d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x065e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x065f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x0660
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x0661
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x0662
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x0663
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x0664
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x0665
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x0666
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x0667
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x0668
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x0669
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x066a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x066b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x066c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x066d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x066e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x066f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x0670
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x0671
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x0672
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x0673
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x0674
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x0675
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x0676
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x0677
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x0678
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x0679
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x067a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x067b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x067c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x067d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x067e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x067f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x0680
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x0681
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x0682
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x0683
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x0684
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x0685
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x0686
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x0687
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x0688
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x0689
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x068a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x068b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x068c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL 0x068d
+#define regMPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+// base address: 0x0
+#define regMPC_OUT0_MUX 0x03d8
+#define regMPC_OUT0_MUX_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CONTROL 0x03d9
+#define regMPC_OUT0_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CLAMP_G_Y 0x03da
+#define regMPC_OUT0_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CLAMP_B_CB 0x03db
+#define regMPC_OUT0_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT1_MUX 0x03dc
+#define regMPC_OUT1_MUX_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CONTROL 0x03dd
+#define regMPC_OUT1_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CLAMP_G_Y 0x03de
+#define regMPC_OUT1_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CLAMP_B_CB 0x03df
+#define regMPC_OUT1_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT2_MUX 0x03e0
+#define regMPC_OUT2_MUX_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CONTROL 0x03e1
+#define regMPC_OUT2_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CLAMP_G_Y 0x03e2
+#define regMPC_OUT2_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CLAMP_B_CB 0x03e3
+#define regMPC_OUT2_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT3_MUX 0x03e4
+#define regMPC_OUT3_MUX_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CONTROL 0x03e5
+#define regMPC_OUT3_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CLAMP_G_Y 0x03e6
+#define regMPC_OUT3_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CLAMP_B_CB 0x03e7
+#define regMPC_OUT3_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT_CSC_COEF_FORMAT 0x03f0
+#define regMPC_OUT_CSC_COEF_FORMAT_BASE_IDX 3
+#define regMPC_OUT0_CSC_MODE 0x03f1
+#define regMPC_OUT0_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT0_CSC_C11_C12_A 0x03f2
+#define regMPC_OUT0_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C13_C14_A 0x03f3
+#define regMPC_OUT0_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C21_C22_A 0x03f4
+#define regMPC_OUT0_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C23_C24_A 0x03f5
+#define regMPC_OUT0_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C31_C32_A 0x03f6
+#define regMPC_OUT0_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C33_C34_A 0x03f7
+#define regMPC_OUT0_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C11_C12_B 0x03f8
+#define regMPC_OUT0_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C13_C14_B 0x03f9
+#define regMPC_OUT0_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C21_C22_B 0x03fa
+#define regMPC_OUT0_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C23_C24_B 0x03fb
+#define regMPC_OUT0_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C31_C32_B 0x03fc
+#define regMPC_OUT0_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C33_C34_B 0x03fd
+#define regMPC_OUT0_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_MODE 0x03fe
+#define regMPC_OUT1_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT1_CSC_C11_C12_A 0x03ff
+#define regMPC_OUT1_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C13_C14_A 0x0400
+#define regMPC_OUT1_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C21_C22_A 0x0401
+#define regMPC_OUT1_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C23_C24_A 0x0402
+#define regMPC_OUT1_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C31_C32_A 0x0403
+#define regMPC_OUT1_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C33_C34_A 0x0404
+#define regMPC_OUT1_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C11_C12_B 0x0405
+#define regMPC_OUT1_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C13_C14_B 0x0406
+#define regMPC_OUT1_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C21_C22_B 0x0407
+#define regMPC_OUT1_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C23_C24_B 0x0408
+#define regMPC_OUT1_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C31_C32_B 0x0409
+#define regMPC_OUT1_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C33_C34_B 0x040a
+#define regMPC_OUT1_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_MODE 0x040b
+#define regMPC_OUT2_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT2_CSC_C11_C12_A 0x040c
+#define regMPC_OUT2_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C13_C14_A 0x040d
+#define regMPC_OUT2_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C21_C22_A 0x040e
+#define regMPC_OUT2_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C23_C24_A 0x040f
+#define regMPC_OUT2_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C31_C32_A 0x0410
+#define regMPC_OUT2_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C33_C34_A 0x0411
+#define regMPC_OUT2_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C11_C12_B 0x0412
+#define regMPC_OUT2_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C13_C14_B 0x0413
+#define regMPC_OUT2_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C21_C22_B 0x0414
+#define regMPC_OUT2_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C23_C24_B 0x0415
+#define regMPC_OUT2_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C31_C32_B 0x0416
+#define regMPC_OUT2_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C33_C34_B 0x0417
+#define regMPC_OUT2_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_MODE 0x0418
+#define regMPC_OUT3_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT3_CSC_C11_C12_A 0x0419
+#define regMPC_OUT3_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C13_C14_A 0x041a
+#define regMPC_OUT3_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C21_C22_A 0x041b
+#define regMPC_OUT3_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C23_C24_A 0x041c
+#define regMPC_OUT3_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C31_C32_A 0x041d
+#define regMPC_OUT3_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C33_C34_A 0x041e
+#define regMPC_OUT3_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C11_C12_B 0x041f
+#define regMPC_OUT3_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C13_C14_B 0x0420
+#define regMPC_OUT3_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C21_C22_B 0x0421
+#define regMPC_OUT3_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C23_C24_B 0x0422
+#define regMPC_OUT3_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C31_C32_B 0x0423
+#define regMPC_OUT3_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C33_C34_B 0x0424
+#define regMPC_OUT3_CSC_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm0_dispdec
+// base address: 0x0
+#define regABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0e7a
+#define regABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_USER_LEVEL 0x0e7b
+#define regABM0_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_TARGET_ABM_LEVEL 0x0e7c
+#define regABM0_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_CURRENT_ABM_LEVEL 0x0e7d
+#define regABM0_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_FINAL_DUTY_CYCLE 0x0e7e
+#define regABM0_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM0_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0e7f
+#define regABM0_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM0_BL1_PWM_ABM_CNTL 0x0e80
+#define regABM0_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0e81
+#define regABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_BL1_PWM_GRP2_REG_LOCK 0x0e82
+#define regABM0_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM0_DC_ABM1_CNTL 0x0e83
+#define regABM0_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM0_DC_ABM1_IPCSC_COEFF_SEL 0x0e84
+#define regABM0_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0e85
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0e86
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0e87
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0e88
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0e89
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_THRES_12 0x0e8a
+#define regABM0_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_THRES_34 0x0e8b
+#define regABM0_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_CNTL_MISC 0x0e8c
+#define regABM0_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM0_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0e8e
+#define regABM0_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_MISC_CTRL 0x0e8f
+#define regABM0_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_SUM_OF_LUMA 0x0e90
+#define regABM0_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_MAX_LUMA 0x0e91
+#define regABM0_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0e92
+#define regABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_PIXEL_COUNT 0x0e93
+#define regABM0_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0e94
+#define regABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0e95
+#define regABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0e96
+#define regABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_SAMPLE_RATE 0x0e97
+#define regABM0_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_SAMPLE_RATE 0x0e98
+#define regABM0_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0e99
+#define regABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0e9a
+#define regABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0e9b
+#define regABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0e9c
+#define regABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0e9d
+#define regABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_1 0x0e9e
+#define regABM0_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_2 0x0e9f
+#define regABM0_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_3 0x0ea0
+#define regABM0_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_4 0x0ea1
+#define regABM0_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_5 0x0ea2
+#define regABM0_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_6 0x0ea3
+#define regABM0_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_7 0x0ea4
+#define regABM0_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_8 0x0ea5
+#define regABM0_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_9 0x0ea6
+#define regABM0_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_10 0x0ea7
+#define regABM0_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_11 0x0ea8
+#define regABM0_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_12 0x0ea9
+#define regABM0_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_13 0x0eaa
+#define regABM0_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_14 0x0eab
+#define regABM0_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_15 0x0eac
+#define regABM0_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_16 0x0ead
+#define regABM0_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_17 0x0eae
+#define regABM0_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_18 0x0eaf
+#define regABM0_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_19 0x0eb0
+#define regABM0_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_20 0x0eb1
+#define regABM0_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_21 0x0eb2
+#define regABM0_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_22 0x0eb3
+#define regABM0_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_23 0x0eb4
+#define regABM0_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_24 0x0eb5
+#define regABM0_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM0_DC_ABM1_BL_MASTER_LOCK 0x0eb6
+#define regABM0_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm1_dispdec
+// base address: 0x104
+#define regABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0ebb
+#define regABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_USER_LEVEL 0x0ebc
+#define regABM1_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_TARGET_ABM_LEVEL 0x0ebd
+#define regABM1_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_CURRENT_ABM_LEVEL 0x0ebe
+#define regABM1_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_FINAL_DUTY_CYCLE 0x0ebf
+#define regABM1_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM1_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0ec0
+#define regABM1_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM1_BL1_PWM_ABM_CNTL 0x0ec1
+#define regABM1_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0ec2
+#define regABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_BL1_PWM_GRP2_REG_LOCK 0x0ec3
+#define regABM1_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM1_DC_ABM1_CNTL 0x0ec4
+#define regABM1_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM1_DC_ABM1_IPCSC_COEFF_SEL 0x0ec5
+#define regABM1_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0ec6
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0ec7
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0ec8
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0ec9
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0eca
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_THRES_12 0x0ecb
+#define regABM1_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_THRES_34 0x0ecc
+#define regABM1_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_CNTL_MISC 0x0ecd
+#define regABM1_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM1_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0ecf
+#define regABM1_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_MISC_CTRL 0x0ed0
+#define regABM1_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_SUM_OF_LUMA 0x0ed1
+#define regABM1_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_MAX_LUMA 0x0ed2
+#define regABM1_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0ed3
+#define regABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_PIXEL_COUNT 0x0ed4
+#define regABM1_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0ed5
+#define regABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0ed6
+#define regABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0ed7
+#define regABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_SAMPLE_RATE 0x0ed8
+#define regABM1_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_SAMPLE_RATE 0x0ed9
+#define regABM1_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0eda
+#define regABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0edb
+#define regABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0edc
+#define regABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0edd
+#define regABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0ede
+#define regABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_1 0x0edf
+#define regABM1_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_2 0x0ee0
+#define regABM1_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_3 0x0ee1
+#define regABM1_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_4 0x0ee2
+#define regABM1_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_5 0x0ee3
+#define regABM1_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_6 0x0ee4
+#define regABM1_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_7 0x0ee5
+#define regABM1_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_8 0x0ee6
+#define regABM1_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_9 0x0ee7
+#define regABM1_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_10 0x0ee8
+#define regABM1_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_11 0x0ee9
+#define regABM1_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_12 0x0eea
+#define regABM1_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_13 0x0eeb
+#define regABM1_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_14 0x0eec
+#define regABM1_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_15 0x0eed
+#define regABM1_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_16 0x0eee
+#define regABM1_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_17 0x0eef
+#define regABM1_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_18 0x0ef0
+#define regABM1_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_19 0x0ef1
+#define regABM1_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_20 0x0ef2
+#define regABM1_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_21 0x0ef3
+#define regABM1_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_22 0x0ef4
+#define regABM1_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_23 0x0ef5
+#define regABM1_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_24 0x0ef6
+#define regABM1_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM1_DC_ABM1_BL_MASTER_LOCK 0x0ef7
+#define regABM1_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm2_dispdec
+// base address: 0x208
+#define regABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0efc
+#define regABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_USER_LEVEL 0x0efd
+#define regABM2_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_TARGET_ABM_LEVEL 0x0efe
+#define regABM2_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_CURRENT_ABM_LEVEL 0x0eff
+#define regABM2_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_FINAL_DUTY_CYCLE 0x0f00
+#define regABM2_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM2_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0f01
+#define regABM2_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM2_BL1_PWM_ABM_CNTL 0x0f02
+#define regABM2_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0f03
+#define regABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_BL1_PWM_GRP2_REG_LOCK 0x0f04
+#define regABM2_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM2_DC_ABM1_CNTL 0x0f05
+#define regABM2_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM2_DC_ABM1_IPCSC_COEFF_SEL 0x0f06
+#define regABM2_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0f07
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0f08
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0f09
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0f0a
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0f0b
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_THRES_12 0x0f0c
+#define regABM2_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_THRES_34 0x0f0d
+#define regABM2_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_CNTL_MISC 0x0f0e
+#define regABM2_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM2_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0f10
+#define regABM2_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_MISC_CTRL 0x0f11
+#define regABM2_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_SUM_OF_LUMA 0x0f12
+#define regABM2_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_MAX_LUMA 0x0f13
+#define regABM2_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0f14
+#define regABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_PIXEL_COUNT 0x0f15
+#define regABM2_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0f16
+#define regABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0f17
+#define regABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0f18
+#define regABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_SAMPLE_RATE 0x0f19
+#define regABM2_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_SAMPLE_RATE 0x0f1a
+#define regABM2_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0f1b
+#define regABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0f1c
+#define regABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0f1d
+#define regABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0f1e
+#define regABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0f1f
+#define regABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_1 0x0f20
+#define regABM2_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_2 0x0f21
+#define regABM2_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_3 0x0f22
+#define regABM2_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_4 0x0f23
+#define regABM2_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_5 0x0f24
+#define regABM2_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_6 0x0f25
+#define regABM2_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_7 0x0f26
+#define regABM2_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_8 0x0f27
+#define regABM2_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_9 0x0f28
+#define regABM2_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_10 0x0f29
+#define regABM2_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_11 0x0f2a
+#define regABM2_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_12 0x0f2b
+#define regABM2_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_13 0x0f2c
+#define regABM2_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_14 0x0f2d
+#define regABM2_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_15 0x0f2e
+#define regABM2_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_16 0x0f2f
+#define regABM2_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_17 0x0f30
+#define regABM2_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_18 0x0f31
+#define regABM2_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_19 0x0f32
+#define regABM2_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_20 0x0f33
+#define regABM2_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_21 0x0f34
+#define regABM2_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_22 0x0f35
+#define regABM2_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_23 0x0f36
+#define regABM2_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_24 0x0f37
+#define regABM2_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM2_DC_ABM1_BL_MASTER_LOCK 0x0f38
+#define regABM2_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm3_dispdec
+// base address: 0x30c
+#define regABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0f3d
+#define regABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_USER_LEVEL 0x0f3e
+#define regABM3_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_TARGET_ABM_LEVEL 0x0f3f
+#define regABM3_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_CURRENT_ABM_LEVEL 0x0f40
+#define regABM3_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_FINAL_DUTY_CYCLE 0x0f41
+#define regABM3_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM3_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0f42
+#define regABM3_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM3_BL1_PWM_ABM_CNTL 0x0f43
+#define regABM3_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0f44
+#define regABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_BL1_PWM_GRP2_REG_LOCK 0x0f45
+#define regABM3_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM3_DC_ABM1_CNTL 0x0f46
+#define regABM3_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM3_DC_ABM1_IPCSC_COEFF_SEL 0x0f47
+#define regABM3_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0f48
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0f49
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0f4a
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0f4b
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0f4c
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_THRES_12 0x0f4d
+#define regABM3_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_THRES_34 0x0f4e
+#define regABM3_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_CNTL_MISC 0x0f4f
+#define regABM3_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM3_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0f51
+#define regABM3_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_MISC_CTRL 0x0f52
+#define regABM3_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_SUM_OF_LUMA 0x0f53
+#define regABM3_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_MAX_LUMA 0x0f54
+#define regABM3_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0f55
+#define regABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_PIXEL_COUNT 0x0f56
+#define regABM3_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0f57
+#define regABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0f58
+#define regABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0f59
+#define regABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_SAMPLE_RATE 0x0f5a
+#define regABM3_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_SAMPLE_RATE 0x0f5b
+#define regABM3_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0f5c
+#define regABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0f5d
+#define regABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0f5e
+#define regABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0f5f
+#define regABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0f60
+#define regABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_1 0x0f61
+#define regABM3_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_2 0x0f62
+#define regABM3_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_3 0x0f63
+#define regABM3_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_4 0x0f64
+#define regABM3_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_5 0x0f65
+#define regABM3_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_6 0x0f66
+#define regABM3_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_7 0x0f67
+#define regABM3_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_8 0x0f68
+#define regABM3_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_9 0x0f69
+#define regABM3_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_10 0x0f6a
+#define regABM3_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_11 0x0f6b
+#define regABM3_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_12 0x0f6c
+#define regABM3_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_13 0x0f6d
+#define regABM3_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_14 0x0f6e
+#define regABM3_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_15 0x0f6f
+#define regABM3_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_16 0x0f70
+#define regABM3_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_17 0x0f71
+#define regABM3_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_18 0x0f72
+#define regABM3_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_19 0x0f73
+#define regABM3_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_20 0x0f74
+#define regABM3_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_21 0x0f75
+#define regABM3_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_22 0x0f76
+#define regABM3_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_23 0x0f77
+#define regABM3_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_24 0x0f78
+#define regABM3_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM3_DC_ABM1_BL_MASTER_LOCK 0x0f79
+#define regABM3_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+// base address: 0x0
+#define regDPG0_DPG_CONTROL 0x1854
+#define regDPG0_DPG_CONTROL_BASE_IDX 2
+#define regDPG0_DPG_RAMP_CONTROL 0x1855
+#define regDPG0_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG0_DPG_DIMENSIONS 0x1856
+#define regDPG0_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_R_CR 0x1857
+#define regDPG0_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_G_Y 0x1858
+#define regDPG0_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_B_CB 0x1859
+#define regDPG0_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG0_DPG_OFFSET_SEGMENT 0x185a
+#define regDPG0_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG0_DPG_STATUS 0x185b
+#define regDPG0_DPG_STATUS_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTA 0x185e
+#define regDPG0_OPP_PIPE_CRC_RESULTA_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTR 0x185f
+#define regDPG0_OPP_PIPE_CRC_RESULTR_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTG 0x1860
+#define regDPG0_OPP_PIPE_CRC_RESULTG_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTB 0x1861
+#define regDPG0_OPP_PIPE_CRC_RESULTB_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTC 0x1862
+#define regDPG0_OPP_PIPE_CRC_RESULTC_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+// base address: 0x0
+#define regFMT0_FMT_CLAMP_COMPONENT_R 0x183c
+#define regFMT0_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_COMPONENT_G 0x183d
+#define regFMT0_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_COMPONENT_B 0x183e
+#define regFMT0_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT0_FMT_DYNAMIC_EXP_CNTL 0x183f
+#define regFMT0_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT0_FMT_CONTROL 0x1840
+#define regFMT0_FMT_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_BIT_DEPTH_CONTROL 0x1841
+#define regFMT0_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_R_SEED 0x1842
+#define regFMT0_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_G_SEED 0x1843
+#define regFMT0_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_B_SEED 0x1844
+#define regFMT0_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_CNTL 0x1845
+#define regFMT0_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1846
+#define regFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_MAP420_MEMORY_CONTROL 0x1847
+#define regFMT0_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_422_CONTROL 0x1849
+#define regFMT0_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+// base address: 0x0
+#define regOPPBUF0_OPPBUF_CONTROL 0x1884
+#define regOPPBUF0_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_0 0x1885
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_1 0x1886
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_CONTROL1 0x1889
+#define regOPPBUF0_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+// base address: 0x0
+#define regOPP_PIPE0_OPP_PIPE_CONTROL 0x188c
+#define regOPP_PIPE0_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+// base address: 0x0
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL 0x1891
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK 0x1892
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+// base address: 0x168
+#define regDPG1_DPG_CONTROL 0x18ae
+#define regDPG1_DPG_CONTROL_BASE_IDX 2
+#define regDPG1_DPG_RAMP_CONTROL 0x18af
+#define regDPG1_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG1_DPG_DIMENSIONS 0x18b0
+#define regDPG1_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_R_CR 0x18b1
+#define regDPG1_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_G_Y 0x18b2
+#define regDPG1_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_B_CB 0x18b3
+#define regDPG1_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG1_DPG_OFFSET_SEGMENT 0x18b4
+#define regDPG1_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG1_DPG_STATUS 0x18b5
+#define regDPG1_DPG_STATUS_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTA 0x18b8
+#define regDPG1_OPP_PIPE_CRC_RESULTA_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTR 0x18b9
+#define regDPG1_OPP_PIPE_CRC_RESULTR_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTG 0x18ba
+#define regDPG1_OPP_PIPE_CRC_RESULTG_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTB 0x18bb
+#define regDPG1_OPP_PIPE_CRC_RESULTB_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTC 0x18bc
+#define regDPG1_OPP_PIPE_CRC_RESULTC_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+// base address: 0x168
+#define regFMT1_FMT_CLAMP_COMPONENT_R 0x1896
+#define regFMT1_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_COMPONENT_G 0x1897
+#define regFMT1_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_COMPONENT_B 0x1898
+#define regFMT1_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT1_FMT_DYNAMIC_EXP_CNTL 0x1899
+#define regFMT1_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT1_FMT_CONTROL 0x189a
+#define regFMT1_FMT_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_BIT_DEPTH_CONTROL 0x189b
+#define regFMT1_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_R_SEED 0x189c
+#define regFMT1_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_G_SEED 0x189d
+#define regFMT1_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_B_SEED 0x189e
+#define regFMT1_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_CNTL 0x189f
+#define regFMT1_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18a0
+#define regFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_MAP420_MEMORY_CONTROL 0x18a1
+#define regFMT1_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_422_CONTROL 0x18a3
+#define regFMT1_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+// base address: 0x168
+#define regOPPBUF1_OPPBUF_CONTROL 0x18de
+#define regOPPBUF1_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_0 0x18df
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_1 0x18e0
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_CONTROL1 0x18e3
+#define regOPPBUF1_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+// base address: 0x168
+#define regOPP_PIPE1_OPP_PIPE_CONTROL 0x18e6
+#define regOPP_PIPE1_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+// base address: 0x168
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL 0x18eb
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK 0x18ec
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg2_dispdec
+// base address: 0x2d0
+#define regDPG2_DPG_CONTROL 0x1908
+#define regDPG2_DPG_CONTROL_BASE_IDX 2
+#define regDPG2_DPG_RAMP_CONTROL 0x1909
+#define regDPG2_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG2_DPG_DIMENSIONS 0x190a
+#define regDPG2_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_R_CR 0x190b
+#define regDPG2_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_G_Y 0x190c
+#define regDPG2_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_B_CB 0x190d
+#define regDPG2_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG2_DPG_OFFSET_SEGMENT 0x190e
+#define regDPG2_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG2_DPG_STATUS 0x190f
+#define regDPG2_DPG_STATUS_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTA 0x1912
+#define regDPG2_OPP_PIPE_CRC_RESULTA_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTR 0x1913
+#define regDPG2_OPP_PIPE_CRC_RESULTR_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTG 0x1914
+#define regDPG2_OPP_PIPE_CRC_RESULTG_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTB 0x1915
+#define regDPG2_OPP_PIPE_CRC_RESULTB_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTC 0x1916
+#define regDPG2_OPP_PIPE_CRC_RESULTC_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt2_dispdec
+// base address: 0x2d0
+#define regFMT2_FMT_CLAMP_COMPONENT_R 0x18f0
+#define regFMT2_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_COMPONENT_G 0x18f1
+#define regFMT2_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_COMPONENT_B 0x18f2
+#define regFMT2_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT2_FMT_DYNAMIC_EXP_CNTL 0x18f3
+#define regFMT2_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT2_FMT_CONTROL 0x18f4
+#define regFMT2_FMT_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_BIT_DEPTH_CONTROL 0x18f5
+#define regFMT2_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_R_SEED 0x18f6
+#define regFMT2_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_G_SEED 0x18f7
+#define regFMT2_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_B_SEED 0x18f8
+#define regFMT2_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_CNTL 0x18f9
+#define regFMT2_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18fa
+#define regFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_MAP420_MEMORY_CONTROL 0x18fb
+#define regFMT2_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_422_CONTROL 0x18fd
+#define regFMT2_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf2_dispdec
+// base address: 0x2d0
+#define regOPPBUF2_OPPBUF_CONTROL 0x1938
+#define regOPPBUF2_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_0 0x1939
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_1 0x193a
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_CONTROL1 0x193d
+#define regOPPBUF2_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe2_dispdec
+// base address: 0x2d0
+#define regOPP_PIPE2_OPP_PIPE_CONTROL 0x1940
+#define regOPP_PIPE2_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc2_dispdec
+// base address: 0x2d0
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL 0x1945
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK 0x1946
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg3_dispdec
+// base address: 0x438
+#define regDPG3_DPG_CONTROL 0x1962
+#define regDPG3_DPG_CONTROL_BASE_IDX 2
+#define regDPG3_DPG_RAMP_CONTROL 0x1963
+#define regDPG3_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG3_DPG_DIMENSIONS 0x1964
+#define regDPG3_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_R_CR 0x1965
+#define regDPG3_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_G_Y 0x1966
+#define regDPG3_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_B_CB 0x1967
+#define regDPG3_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG3_DPG_OFFSET_SEGMENT 0x1968
+#define regDPG3_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG3_DPG_STATUS 0x1969
+#define regDPG3_DPG_STATUS_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTA 0x196c
+#define regDPG3_OPP_PIPE_CRC_RESULTA_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTR 0x196d
+#define regDPG3_OPP_PIPE_CRC_RESULTR_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTG 0x196e
+#define regDPG3_OPP_PIPE_CRC_RESULTG_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTB 0x196f
+#define regDPG3_OPP_PIPE_CRC_RESULTB_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTC 0x1970
+#define regDPG3_OPP_PIPE_CRC_RESULTC_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt3_dispdec
+// base address: 0x438
+#define regFMT3_FMT_CLAMP_COMPONENT_R 0x194a
+#define regFMT3_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_COMPONENT_G 0x194b
+#define regFMT3_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_COMPONENT_B 0x194c
+#define regFMT3_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT3_FMT_DYNAMIC_EXP_CNTL 0x194d
+#define regFMT3_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT3_FMT_CONTROL 0x194e
+#define regFMT3_FMT_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_BIT_DEPTH_CONTROL 0x194f
+#define regFMT3_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_R_SEED 0x1950
+#define regFMT3_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_G_SEED 0x1951
+#define regFMT3_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_B_SEED 0x1952
+#define regFMT3_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_CNTL 0x1953
+#define regFMT3_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1954
+#define regFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_MAP420_MEMORY_CONTROL 0x1955
+#define regFMT3_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_422_CONTROL 0x1957
+#define regFMT3_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf3_dispdec
+// base address: 0x438
+#define regOPPBUF3_OPPBUF_CONTROL 0x1992
+#define regOPPBUF3_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_0 0x1993
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_1 0x1994
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_CONTROL1 0x1997
+#define regOPPBUF3_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe3_dispdec
+// base address: 0x438
+#define regOPP_PIPE3_OPP_PIPE_CONTROL 0x199a
+#define regOPP_PIPE3_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc3_dispdec
+// base address: 0x438
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL 0x199f
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK 0x19a0
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm0_dispdec
+// base address: 0x0
+#define regDSCRM0_DSCRM_DSC_FORWARD_CONFIG 0x1a64
+#define regDSCRM0_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm1_dispdec
+// base address: 0x4
+#define regDSCRM1_DSCRM_DSC_FORWARD_CONFIG 0x1a65
+#define regDSCRM1_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm2_dispdec
+// base address: 0x8
+#define regDSCRM2_DSCRM_DSC_FORWARD_CONFIG 0x1a66
+#define regDSCRM2_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm3_dispdec
+// base address: 0xc
+#define regDSCRM3_DSCRM_DSC_FORWARD_CONFIG 0x1a67
+#define regDSCRM3_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+// base address: 0x0
+#define regOPP_TOP_CLK_CONTROL 0x1a5e
+#define regOPP_TOP_CLK_CONTROL_BASE_IDX 2
+#define regOPP_ABM_CONTROL 0x1a60
+#define regOPP_ABM_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x6af8
+#define regDC_PERFMON16_PERFCOUNTER_CNTL 0x1abe
+#define regDC_PERFMON16_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON16_PERFCOUNTER_CNTL2 0x1abf
+#define regDC_PERFMON16_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON16_PERFCOUNTER_STATE 0x1ac0
+#define regDC_PERFMON16_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CNTL 0x1ac1
+#define regDC_PERFMON16_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CNTL2 0x1ac2
+#define regDC_PERFMON16_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CVALUE_INT_MISC 0x1ac3
+#define regDC_PERFMON16_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CVALUE_LOW 0x1ac4
+#define regDC_PERFMON16_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_HI 0x1ac5
+#define regDC_PERFMON16_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_LOW 0x1ac6
+#define regDC_PERFMON16_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+// base address: 0x0
+#define regODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca
+#define regODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_DATA_SOURCE_SELECT 0x1acb
+#define regODM0_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM0_OPTC_DATA_FORMAT_CONTROL 0x1acc
+#define regODM0_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_BYTES_PER_PIXEL 0x1acd
+#define regODM0_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM0_OPTC_WIDTH_CONTROL 0x1ace
+#define regODM0_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_INPUT_CLOCK_CONTROL 0x1acf
+#define regODM0_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_MEMORY_CONFIG 0x1ad0
+#define regODM0_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM0_OPTC_INPUT_SPARE_REGISTER 0x1ad1
+#define regODM0_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+#define regODM0_OPTC_UNDERFLOW_THRESHOLD 0x1ad5
+#define regODM0_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+// base address: 0x40
+#define regODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada
+#define regODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_DATA_SOURCE_SELECT 0x1adb
+#define regODM1_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM1_OPTC_DATA_FORMAT_CONTROL 0x1adc
+#define regODM1_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_BYTES_PER_PIXEL 0x1add
+#define regODM1_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM1_OPTC_WIDTH_CONTROL 0x1ade
+#define regODM1_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_INPUT_CLOCK_CONTROL 0x1adf
+#define regODM1_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_MEMORY_CONFIG 0x1ae0
+#define regODM1_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM1_OPTC_INPUT_SPARE_REGISTER 0x1ae1
+#define regODM1_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+#define regODM1_OPTC_UNDERFLOW_THRESHOLD 0x1ae5
+#define regODM1_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm2_dispdec
+// base address: 0x80
+#define regODM2_OPTC_INPUT_GLOBAL_CONTROL 0x1aea
+#define regODM2_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_DATA_SOURCE_SELECT 0x1aeb
+#define regODM2_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM2_OPTC_DATA_FORMAT_CONTROL 0x1aec
+#define regODM2_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_BYTES_PER_PIXEL 0x1aed
+#define regODM2_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM2_OPTC_WIDTH_CONTROL 0x1aee
+#define regODM2_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_INPUT_CLOCK_CONTROL 0x1aef
+#define regODM2_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_MEMORY_CONFIG 0x1af0
+#define regODM2_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM2_OPTC_INPUT_SPARE_REGISTER 0x1af1
+#define regODM2_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+#define regODM2_OPTC_UNDERFLOW_THRESHOLD 0x1af5
+#define regODM2_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm3_dispdec
+// base address: 0xc0
+#define regODM3_OPTC_INPUT_GLOBAL_CONTROL 0x1afa
+#define regODM3_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_DATA_SOURCE_SELECT 0x1afb
+#define regODM3_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM3_OPTC_DATA_FORMAT_CONTROL 0x1afc
+#define regODM3_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_BYTES_PER_PIXEL 0x1afd
+#define regODM3_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM3_OPTC_WIDTH_CONTROL 0x1afe
+#define regODM3_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_INPUT_CLOCK_CONTROL 0x1aff
+#define regODM3_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_MEMORY_CONFIG 0x1b00
+#define regODM3_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM3_OPTC_INPUT_SPARE_REGISTER 0x1b01
+#define regODM3_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+#define regODM3_OPTC_UNDERFLOW_THRESHOLD 0x1b05
+#define regODM3_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+// base address: 0x0
+#define regOTG0_OTG_H_TOTAL 0x1b2a
+#define regOTG0_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG0_OTG_H_BLANK_START_END 0x1b2b
+#define regOTG0_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG0_OTG_H_SYNC_A 0x1b2c
+#define regOTG0_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG0_OTG_H_SYNC_A_CNTL 0x1b2d
+#define regOTG0_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG0_OTG_H_TIMING_CNTL 0x1b2e
+#define regOTG0_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL 0x1b2f
+#define regOTG0_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MIN 0x1b30
+#define regOTG0_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MAX 0x1b31
+#define regOTG0_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MID 0x1b32
+#define regOTG0_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_CONTROL 0x1b33
+#define regOTG0_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL 0x1b34
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL2 0x1b35
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_INT_STATUS 0x1b36
+#define regOTG0_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_VSYNC_NOM_INT_STATUS 0x1b37
+#define regOTG0_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_V_BLANK_START_END 0x1b38
+#define regOTG0_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG0_OTG_V_SYNC_A 0x1b39
+#define regOTG0_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG0_OTG_V_SYNC_A_CNTL 0x1b3a
+#define regOTG0_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGA_CNTL 0x1b3b
+#define regOTG0_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGA_MANUAL_TRIG 0x1b3c
+#define regOTG0_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG0_OTG_TRIGB_CNTL 0x1b3d
+#define regOTG0_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGB_MANUAL_TRIG 0x1b3e
+#define regOTG0_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG0_OTG_FORCE_COUNT_NOW_CNTL 0x1b3f
+#define regOTG0_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG0_OTG_STEREO_FORCE_NEXT_EYE 0x1b41
+#define regOTG0_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG0_OTG_CONTROL 0x1b43
+#define regOTG0_OTG_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DLPC_CONTROL 0x1b44
+#define regOTG0_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_INTERLACE_CONTROL 0x1b45
+#define regOTG0_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_INTERLACE_STATUS 0x1b46
+#define regOTG0_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG0_OTG_PIXEL_DATA_READBACK0 0x1b47
+#define regOTG0_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG0_OTG_PIXEL_DATA_READBACK1 0x1b48
+#define regOTG0_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG0_OTG_STATUS 0x1b49
+#define regOTG0_OTG_STATUS_BASE_IDX 2
+#define regOTG0_OTG_STATUS_POSITION 0x1b4a
+#define regOTG0_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG0_OTG_LONG_VBLANK_STATUS 0x1b4b
+#define regOTG0_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG0_OTG_NOM_VERT_POSITION 0x1b4c
+#define regOTG0_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG0_OTG_STATUS_FRAME_COUNT 0x1b4d
+#define regOTG0_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG0_OTG_STATUS_VF_COUNT 0x1b4e
+#define regOTG0_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG0_OTG_STATUS_HV_COUNT 0x1b4f
+#define regOTG0_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG0_OTG_COUNT_CONTROL 0x1b50
+#define regOTG0_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_COUNT_RESET 0x1b51
+#define regOTG0_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1b52
+#define regOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG0_OTG_VERT_SYNC_CONTROL 0x1b53
+#define regOTG0_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_STEREO_STATUS 0x1b54
+#define regOTG0_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG0_OTG_STEREO_CONTROL 0x1b55
+#define regOTG0_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_STATUS 0x1b56
+#define regOTG0_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_CONTROL 0x1b57
+#define regOTG0_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_POSITION 0x1b58
+#define regOTG0_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_FRAME 0x1b59
+#define regOTG0_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG0_OTG_INTERRUPT_CONTROL 0x1b5a
+#define regOTG0_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_UPDATE_LOCK 0x1b5b
+#define regOTG0_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG0_OTG_DOUBLE_BUFFER_CONTROL 0x1b5c
+#define regOTG0_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_MASTER_EN 0x1b5d
+#define regOTG0_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_POSITION 0x1b5f
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1b60
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_POSITION 0x1b61
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1b62
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_POSITION 0x1b63
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1b64
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC_CNTL 0x1b65
+#define regOTG0_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL 0x1b66
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL 0x1b67
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL 0x1b68
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL 0x1b69
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_DATA_RG 0x1b6a
+#define regOTG0_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC0_DATA_B 0x1b6b
+#define regOTG0_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL 0x1b6c
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL 0x1b6d
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL 0x1b6e
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL 0x1b6f
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_DATA_RG 0x1b70
+#define regOTG0_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC1_DATA_B 0x1b71
+#define regOTG0_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC2_DATA_RG 0x1b72
+#define regOTG0_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC2_DATA_B 0x1b73
+#define regOTG0_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC3_DATA_RG 0x1b74
+#define regOTG0_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC3_DATA_B 0x1b75
+#define regOTG0_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC_SIG_RED_GREEN_MASK 0x1b76
+#define regOTG0_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1b77
+#define regOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1b78
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1b79
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1b7a
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1b7b
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1b7c
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1b7d
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1b7e
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1b7f
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_STATIC_SCREEN_CONTROL 0x1b80
+#define regOTG0_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_3D_STRUCTURE_CONTROL 0x1b81
+#define regOTG0_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_GSL_VSYNC_GAP 0x1b82
+#define regOTG0_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG0_OTG_MASTER_UPDATE_MODE 0x1b83
+#define regOTG0_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG0_OTG_CLOCK_CONTROL 0x1b84
+#define regOTG0_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VSTARTUP_PARAM 0x1b85
+#define regOTG0_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG0_OTG_VUPDATE_PARAM 0x1b86
+#define regOTG0_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG0_OTG_VREADY_PARAM 0x1b87
+#define regOTG0_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_SYNC_STATUS 0x1b88
+#define regOTG0_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG0_OTG_MASTER_UPDATE_LOCK 0x1b89
+#define regOTG0_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG0_OTG_GSL_CONTROL 0x1b8a
+#define regOTG0_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_GSL_WINDOW_X 0x1b8b
+#define regOTG0_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG0_OTG_GSL_WINDOW_Y 0x1b8c
+#define regOTG0_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG0_OTG_VUPDATE_KEEPOUT 0x1b8d
+#define regOTG0_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL0 0x1b8e
+#define regOTG0_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL1 0x1b8f
+#define regOTG0_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL2 0x1b90
+#define regOTG0_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL3 0x1b91
+#define regOTG0_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL4 0x1b92
+#define regOTG0_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG0_OTG_TRIG_MANUAL_CONTROL 0x1b93
+#define regOTG0_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DRR_TIMING_INT_STATUS 0x1b95
+#define regOTG0_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_DRR_V_TOTAL_REACH_RANGE 0x1b96
+#define regOTG0_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG0_OTG_DRR_V_TOTAL_CHANGE 0x1b97
+#define regOTG0_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG0_OTG_DRR_TRIGGER_WINDOW 0x1b98
+#define regOTG0_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG0_OTG_DRR_CONTROL 0x1b99
+#define regOTG0_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DRR_CONTOL2 0x1b9a
+#define regOTG0_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG0_OTG_M_CONST_DTO0 0x1b9b
+#define regOTG0_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG0_OTG_M_CONST_DTO1 0x1b9c
+#define regOTG0_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG0_OTG_REQUEST_CONTROL 0x1b9d
+#define regOTG0_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DSC_START_POSITION 0x1b9e
+#define regOTG0_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG0_OTG_PIPE_UPDATE_STATUS 0x1b9f
+#define regOTG0_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG0_OTG_SPARE_REGISTER 0x1ba0
+#define regOTG0_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+// base address: 0x200
+#define regOTG1_OTG_H_TOTAL 0x1baa
+#define regOTG1_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG1_OTG_H_BLANK_START_END 0x1bab
+#define regOTG1_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG1_OTG_H_SYNC_A 0x1bac
+#define regOTG1_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG1_OTG_H_SYNC_A_CNTL 0x1bad
+#define regOTG1_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG1_OTG_H_TIMING_CNTL 0x1bae
+#define regOTG1_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL 0x1baf
+#define regOTG1_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MIN 0x1bb0
+#define regOTG1_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MAX 0x1bb1
+#define regOTG1_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MID 0x1bb2
+#define regOTG1_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_CONTROL 0x1bb3
+#define regOTG1_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL 0x1bb4
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL2 0x1bb5
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_INT_STATUS 0x1bb6
+#define regOTG1_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_VSYNC_NOM_INT_STATUS 0x1bb7
+#define regOTG1_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_V_BLANK_START_END 0x1bb8
+#define regOTG1_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG1_OTG_V_SYNC_A 0x1bb9
+#define regOTG1_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG1_OTG_V_SYNC_A_CNTL 0x1bba
+#define regOTG1_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGA_CNTL 0x1bbb
+#define regOTG1_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGA_MANUAL_TRIG 0x1bbc
+#define regOTG1_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG1_OTG_TRIGB_CNTL 0x1bbd
+#define regOTG1_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGB_MANUAL_TRIG 0x1bbe
+#define regOTG1_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG1_OTG_FORCE_COUNT_NOW_CNTL 0x1bbf
+#define regOTG1_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG1_OTG_STEREO_FORCE_NEXT_EYE 0x1bc1
+#define regOTG1_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG1_OTG_CONTROL 0x1bc3
+#define regOTG1_OTG_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DLPC_CONTROL 0x1bc4
+#define regOTG1_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_INTERLACE_CONTROL 0x1bc5
+#define regOTG1_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_INTERLACE_STATUS 0x1bc6
+#define regOTG1_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG1_OTG_PIXEL_DATA_READBACK0 0x1bc7
+#define regOTG1_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG1_OTG_PIXEL_DATA_READBACK1 0x1bc8
+#define regOTG1_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG1_OTG_STATUS 0x1bc9
+#define regOTG1_OTG_STATUS_BASE_IDX 2
+#define regOTG1_OTG_STATUS_POSITION 0x1bca
+#define regOTG1_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG1_OTG_LONG_VBLANK_STATUS 0x1bcb
+#define regOTG1_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG1_OTG_NOM_VERT_POSITION 0x1bcc
+#define regOTG1_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG1_OTG_STATUS_FRAME_COUNT 0x1bcd
+#define regOTG1_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG1_OTG_STATUS_VF_COUNT 0x1bce
+#define regOTG1_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG1_OTG_STATUS_HV_COUNT 0x1bcf
+#define regOTG1_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG1_OTG_COUNT_CONTROL 0x1bd0
+#define regOTG1_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_COUNT_RESET 0x1bd1
+#define regOTG1_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bd2
+#define regOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG1_OTG_VERT_SYNC_CONTROL 0x1bd3
+#define regOTG1_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_STEREO_STATUS 0x1bd4
+#define regOTG1_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG1_OTG_STEREO_CONTROL 0x1bd5
+#define regOTG1_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_STATUS 0x1bd6
+#define regOTG1_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_CONTROL 0x1bd7
+#define regOTG1_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_POSITION 0x1bd8
+#define regOTG1_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_FRAME 0x1bd9
+#define regOTG1_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG1_OTG_INTERRUPT_CONTROL 0x1bda
+#define regOTG1_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_UPDATE_LOCK 0x1bdb
+#define regOTG1_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG1_OTG_DOUBLE_BUFFER_CONTROL 0x1bdc
+#define regOTG1_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_MASTER_EN 0x1bdd
+#define regOTG1_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_POSITION 0x1bdf
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1be0
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_POSITION 0x1be1
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1be2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_POSITION 0x1be3
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1be4
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC_CNTL 0x1be5
+#define regOTG1_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL 0x1be6
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL 0x1be7
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL 0x1be8
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL 0x1be9
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_DATA_RG 0x1bea
+#define regOTG1_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC0_DATA_B 0x1beb
+#define regOTG1_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL 0x1bec
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL 0x1bed
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL 0x1bee
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL 0x1bef
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_DATA_RG 0x1bf0
+#define regOTG1_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC1_DATA_B 0x1bf1
+#define regOTG1_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC2_DATA_RG 0x1bf2
+#define regOTG1_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC2_DATA_B 0x1bf3
+#define regOTG1_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC3_DATA_RG 0x1bf4
+#define regOTG1_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC3_DATA_B 0x1bf5
+#define regOTG1_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC_SIG_RED_GREEN_MASK 0x1bf6
+#define regOTG1_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1bf7
+#define regOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1bf8
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1bf9
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1bfa
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1bfb
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1bfc
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1bfd
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1bfe
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1bff
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_STATIC_SCREEN_CONTROL 0x1c00
+#define regOTG1_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_3D_STRUCTURE_CONTROL 0x1c01
+#define regOTG1_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_GSL_VSYNC_GAP 0x1c02
+#define regOTG1_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG1_OTG_MASTER_UPDATE_MODE 0x1c03
+#define regOTG1_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG1_OTG_CLOCK_CONTROL 0x1c04
+#define regOTG1_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VSTARTUP_PARAM 0x1c05
+#define regOTG1_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG1_OTG_VUPDATE_PARAM 0x1c06
+#define regOTG1_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG1_OTG_VREADY_PARAM 0x1c07
+#define regOTG1_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_SYNC_STATUS 0x1c08
+#define regOTG1_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG1_OTG_MASTER_UPDATE_LOCK 0x1c09
+#define regOTG1_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG1_OTG_GSL_CONTROL 0x1c0a
+#define regOTG1_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_GSL_WINDOW_X 0x1c0b
+#define regOTG1_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG1_OTG_GSL_WINDOW_Y 0x1c0c
+#define regOTG1_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG1_OTG_VUPDATE_KEEPOUT 0x1c0d
+#define regOTG1_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL0 0x1c0e
+#define regOTG1_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL1 0x1c0f
+#define regOTG1_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL2 0x1c10
+#define regOTG1_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL3 0x1c11
+#define regOTG1_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL4 0x1c12
+#define regOTG1_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG1_OTG_TRIG_MANUAL_CONTROL 0x1c13
+#define regOTG1_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DRR_TIMING_INT_STATUS 0x1c15
+#define regOTG1_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_DRR_V_TOTAL_REACH_RANGE 0x1c16
+#define regOTG1_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG1_OTG_DRR_V_TOTAL_CHANGE 0x1c17
+#define regOTG1_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG1_OTG_DRR_TRIGGER_WINDOW 0x1c18
+#define regOTG1_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG1_OTG_DRR_CONTROL 0x1c19
+#define regOTG1_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DRR_CONTOL2 0x1c1a
+#define regOTG1_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG1_OTG_M_CONST_DTO0 0x1c1b
+#define regOTG1_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG1_OTG_M_CONST_DTO1 0x1c1c
+#define regOTG1_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG1_OTG_REQUEST_CONTROL 0x1c1d
+#define regOTG1_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DSC_START_POSITION 0x1c1e
+#define regOTG1_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG1_OTG_PIPE_UPDATE_STATUS 0x1c1f
+#define regOTG1_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG1_OTG_SPARE_REGISTER 0x1c20
+#define regOTG1_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg2_dispdec
+// base address: 0x400
+#define regOTG2_OTG_H_TOTAL 0x1c2a
+#define regOTG2_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG2_OTG_H_BLANK_START_END 0x1c2b
+#define regOTG2_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG2_OTG_H_SYNC_A 0x1c2c
+#define regOTG2_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG2_OTG_H_SYNC_A_CNTL 0x1c2d
+#define regOTG2_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG2_OTG_H_TIMING_CNTL 0x1c2e
+#define regOTG2_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL 0x1c2f
+#define regOTG2_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MIN 0x1c30
+#define regOTG2_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MAX 0x1c31
+#define regOTG2_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MID 0x1c32
+#define regOTG2_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_CONTROL 0x1c33
+#define regOTG2_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL 0x1c34
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL2 0x1c35
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_INT_STATUS 0x1c36
+#define regOTG2_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_VSYNC_NOM_INT_STATUS 0x1c37
+#define regOTG2_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_V_BLANK_START_END 0x1c38
+#define regOTG2_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG2_OTG_V_SYNC_A 0x1c39
+#define regOTG2_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG2_OTG_V_SYNC_A_CNTL 0x1c3a
+#define regOTG2_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGA_CNTL 0x1c3b
+#define regOTG2_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGA_MANUAL_TRIG 0x1c3c
+#define regOTG2_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG2_OTG_TRIGB_CNTL 0x1c3d
+#define regOTG2_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGB_MANUAL_TRIG 0x1c3e
+#define regOTG2_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG2_OTG_FORCE_COUNT_NOW_CNTL 0x1c3f
+#define regOTG2_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG2_OTG_STEREO_FORCE_NEXT_EYE 0x1c41
+#define regOTG2_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG2_OTG_CONTROL 0x1c43
+#define regOTG2_OTG_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DLPC_CONTROL 0x1c44
+#define regOTG2_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_INTERLACE_CONTROL 0x1c45
+#define regOTG2_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_INTERLACE_STATUS 0x1c46
+#define regOTG2_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG2_OTG_PIXEL_DATA_READBACK0 0x1c47
+#define regOTG2_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG2_OTG_PIXEL_DATA_READBACK1 0x1c48
+#define regOTG2_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG2_OTG_STATUS 0x1c49
+#define regOTG2_OTG_STATUS_BASE_IDX 2
+#define regOTG2_OTG_STATUS_POSITION 0x1c4a
+#define regOTG2_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG2_OTG_LONG_VBLANK_STATUS 0x1c4b
+#define regOTG2_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG2_OTG_NOM_VERT_POSITION 0x1c4c
+#define regOTG2_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG2_OTG_STATUS_FRAME_COUNT 0x1c4d
+#define regOTG2_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG2_OTG_STATUS_VF_COUNT 0x1c4e
+#define regOTG2_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG2_OTG_STATUS_HV_COUNT 0x1c4f
+#define regOTG2_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG2_OTG_COUNT_CONTROL 0x1c50
+#define regOTG2_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_COUNT_RESET 0x1c51
+#define regOTG2_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1c52
+#define regOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG2_OTG_VERT_SYNC_CONTROL 0x1c53
+#define regOTG2_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_STEREO_STATUS 0x1c54
+#define regOTG2_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG2_OTG_STEREO_CONTROL 0x1c55
+#define regOTG2_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_STATUS 0x1c56
+#define regOTG2_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_CONTROL 0x1c57
+#define regOTG2_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_POSITION 0x1c58
+#define regOTG2_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_FRAME 0x1c59
+#define regOTG2_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG2_OTG_INTERRUPT_CONTROL 0x1c5a
+#define regOTG2_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_UPDATE_LOCK 0x1c5b
+#define regOTG2_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG2_OTG_DOUBLE_BUFFER_CONTROL 0x1c5c
+#define regOTG2_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_MASTER_EN 0x1c5d
+#define regOTG2_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_POSITION 0x1c5f
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1c60
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_POSITION 0x1c61
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1c62
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_POSITION 0x1c63
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1c64
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC_CNTL 0x1c65
+#define regOTG2_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL 0x1c66
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL 0x1c67
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL 0x1c68
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL 0x1c69
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_DATA_RG 0x1c6a
+#define regOTG2_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC0_DATA_B 0x1c6b
+#define regOTG2_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL 0x1c6c
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL 0x1c6d
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL 0x1c6e
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL 0x1c6f
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_DATA_RG 0x1c70
+#define regOTG2_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC1_DATA_B 0x1c71
+#define regOTG2_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC2_DATA_RG 0x1c72
+#define regOTG2_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC2_DATA_B 0x1c73
+#define regOTG2_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC3_DATA_RG 0x1c74
+#define regOTG2_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC3_DATA_B 0x1c75
+#define regOTG2_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC_SIG_RED_GREEN_MASK 0x1c76
+#define regOTG2_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1c77
+#define regOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1c78
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1c79
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1c7a
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1c7b
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1c7c
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1c7d
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1c7e
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1c7f
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_STATIC_SCREEN_CONTROL 0x1c80
+#define regOTG2_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_3D_STRUCTURE_CONTROL 0x1c81
+#define regOTG2_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_GSL_VSYNC_GAP 0x1c82
+#define regOTG2_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG2_OTG_MASTER_UPDATE_MODE 0x1c83
+#define regOTG2_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG2_OTG_CLOCK_CONTROL 0x1c84
+#define regOTG2_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VSTARTUP_PARAM 0x1c85
+#define regOTG2_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG2_OTG_VUPDATE_PARAM 0x1c86
+#define regOTG2_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG2_OTG_VREADY_PARAM 0x1c87
+#define regOTG2_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_SYNC_STATUS 0x1c88
+#define regOTG2_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG2_OTG_MASTER_UPDATE_LOCK 0x1c89
+#define regOTG2_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG2_OTG_GSL_CONTROL 0x1c8a
+#define regOTG2_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_GSL_WINDOW_X 0x1c8b
+#define regOTG2_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG2_OTG_GSL_WINDOW_Y 0x1c8c
+#define regOTG2_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG2_OTG_VUPDATE_KEEPOUT 0x1c8d
+#define regOTG2_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL0 0x1c8e
+#define regOTG2_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL1 0x1c8f
+#define regOTG2_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL2 0x1c90
+#define regOTG2_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL3 0x1c91
+#define regOTG2_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL4 0x1c92
+#define regOTG2_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG2_OTG_TRIG_MANUAL_CONTROL 0x1c93
+#define regOTG2_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DRR_TIMING_INT_STATUS 0x1c95
+#define regOTG2_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_DRR_V_TOTAL_REACH_RANGE 0x1c96
+#define regOTG2_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG2_OTG_DRR_V_TOTAL_CHANGE 0x1c97
+#define regOTG2_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG2_OTG_DRR_TRIGGER_WINDOW 0x1c98
+#define regOTG2_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG2_OTG_DRR_CONTROL 0x1c99
+#define regOTG2_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DRR_CONTOL2 0x1c9a
+#define regOTG2_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG2_OTG_M_CONST_DTO0 0x1c9b
+#define regOTG2_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG2_OTG_M_CONST_DTO1 0x1c9c
+#define regOTG2_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG2_OTG_REQUEST_CONTROL 0x1c9d
+#define regOTG2_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DSC_START_POSITION 0x1c9e
+#define regOTG2_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG2_OTG_PIPE_UPDATE_STATUS 0x1c9f
+#define regOTG2_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG2_OTG_SPARE_REGISTER 0x1ca0
+#define regOTG2_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg3_dispdec
+// base address: 0x600
+#define regOTG3_OTG_H_TOTAL 0x1caa
+#define regOTG3_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG3_OTG_H_BLANK_START_END 0x1cab
+#define regOTG3_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG3_OTG_H_SYNC_A 0x1cac
+#define regOTG3_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG3_OTG_H_SYNC_A_CNTL 0x1cad
+#define regOTG3_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG3_OTG_H_TIMING_CNTL 0x1cae
+#define regOTG3_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL 0x1caf
+#define regOTG3_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MIN 0x1cb0
+#define regOTG3_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MAX 0x1cb1
+#define regOTG3_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MID 0x1cb2
+#define regOTG3_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_CONTROL 0x1cb3
+#define regOTG3_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL 0x1cb4
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL2 0x1cb5
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_INT_STATUS 0x1cb6
+#define regOTG3_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_VSYNC_NOM_INT_STATUS 0x1cb7
+#define regOTG3_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_V_BLANK_START_END 0x1cb8
+#define regOTG3_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG3_OTG_V_SYNC_A 0x1cb9
+#define regOTG3_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG3_OTG_V_SYNC_A_CNTL 0x1cba
+#define regOTG3_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGA_CNTL 0x1cbb
+#define regOTG3_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGA_MANUAL_TRIG 0x1cbc
+#define regOTG3_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG3_OTG_TRIGB_CNTL 0x1cbd
+#define regOTG3_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGB_MANUAL_TRIG 0x1cbe
+#define regOTG3_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG3_OTG_FORCE_COUNT_NOW_CNTL 0x1cbf
+#define regOTG3_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG3_OTG_STEREO_FORCE_NEXT_EYE 0x1cc1
+#define regOTG3_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG3_OTG_CONTROL 0x1cc3
+#define regOTG3_OTG_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DLPC_CONTROL 0x1cc4
+#define regOTG3_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_INTERLACE_CONTROL 0x1cc5
+#define regOTG3_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_INTERLACE_STATUS 0x1cc6
+#define regOTG3_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG3_OTG_PIXEL_DATA_READBACK0 0x1cc7
+#define regOTG3_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG3_OTG_PIXEL_DATA_READBACK1 0x1cc8
+#define regOTG3_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG3_OTG_STATUS 0x1cc9
+#define regOTG3_OTG_STATUS_BASE_IDX 2
+#define regOTG3_OTG_STATUS_POSITION 0x1cca
+#define regOTG3_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG3_OTG_LONG_VBLANK_STATUS 0x1ccb
+#define regOTG3_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG3_OTG_NOM_VERT_POSITION 0x1ccc
+#define regOTG3_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG3_OTG_STATUS_FRAME_COUNT 0x1ccd
+#define regOTG3_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG3_OTG_STATUS_VF_COUNT 0x1cce
+#define regOTG3_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG3_OTG_STATUS_HV_COUNT 0x1ccf
+#define regOTG3_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG3_OTG_COUNT_CONTROL 0x1cd0
+#define regOTG3_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_COUNT_RESET 0x1cd1
+#define regOTG3_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1cd2
+#define regOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG3_OTG_VERT_SYNC_CONTROL 0x1cd3
+#define regOTG3_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_STEREO_STATUS 0x1cd4
+#define regOTG3_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG3_OTG_STEREO_CONTROL 0x1cd5
+#define regOTG3_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_STATUS 0x1cd6
+#define regOTG3_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_CONTROL 0x1cd7
+#define regOTG3_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_POSITION 0x1cd8
+#define regOTG3_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_FRAME 0x1cd9
+#define regOTG3_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG3_OTG_INTERRUPT_CONTROL 0x1cda
+#define regOTG3_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_UPDATE_LOCK 0x1cdb
+#define regOTG3_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG3_OTG_DOUBLE_BUFFER_CONTROL 0x1cdc
+#define regOTG3_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_MASTER_EN 0x1cdd
+#define regOTG3_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_POSITION 0x1cdf
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1ce0
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_POSITION 0x1ce1
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1ce2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_POSITION 0x1ce3
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1ce4
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC_CNTL 0x1ce5
+#define regOTG3_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL 0x1ce6
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL 0x1ce7
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL 0x1ce8
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL 0x1ce9
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_DATA_RG 0x1cea
+#define regOTG3_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC0_DATA_B 0x1ceb
+#define regOTG3_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL 0x1cec
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL 0x1ced
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL 0x1cee
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL 0x1cef
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_DATA_RG 0x1cf0
+#define regOTG3_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC1_DATA_B 0x1cf1
+#define regOTG3_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC2_DATA_RG 0x1cf2
+#define regOTG3_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC2_DATA_B 0x1cf3
+#define regOTG3_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC3_DATA_RG 0x1cf4
+#define regOTG3_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC3_DATA_B 0x1cf5
+#define regOTG3_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC_SIG_RED_GREEN_MASK 0x1cf6
+#define regOTG3_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1cf7
+#define regOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1cf8
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1cf9
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1cfa
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1cfb
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1cfc
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1cfd
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1cfe
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1cff
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_STATIC_SCREEN_CONTROL 0x1d00
+#define regOTG3_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_3D_STRUCTURE_CONTROL 0x1d01
+#define regOTG3_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_GSL_VSYNC_GAP 0x1d02
+#define regOTG3_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG3_OTG_MASTER_UPDATE_MODE 0x1d03
+#define regOTG3_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG3_OTG_CLOCK_CONTROL 0x1d04
+#define regOTG3_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VSTARTUP_PARAM 0x1d05
+#define regOTG3_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG3_OTG_VUPDATE_PARAM 0x1d06
+#define regOTG3_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG3_OTG_VREADY_PARAM 0x1d07
+#define regOTG3_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_SYNC_STATUS 0x1d08
+#define regOTG3_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG3_OTG_MASTER_UPDATE_LOCK 0x1d09
+#define regOTG3_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG3_OTG_GSL_CONTROL 0x1d0a
+#define regOTG3_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_GSL_WINDOW_X 0x1d0b
+#define regOTG3_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG3_OTG_GSL_WINDOW_Y 0x1d0c
+#define regOTG3_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG3_OTG_VUPDATE_KEEPOUT 0x1d0d
+#define regOTG3_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL0 0x1d0e
+#define regOTG3_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL1 0x1d0f
+#define regOTG3_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL2 0x1d10
+#define regOTG3_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL3 0x1d11
+#define regOTG3_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL4 0x1d12
+#define regOTG3_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG3_OTG_TRIG_MANUAL_CONTROL 0x1d13
+#define regOTG3_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DRR_TIMING_INT_STATUS 0x1d15
+#define regOTG3_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_DRR_V_TOTAL_REACH_RANGE 0x1d16
+#define regOTG3_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG3_OTG_DRR_V_TOTAL_CHANGE 0x1d17
+#define regOTG3_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG3_OTG_DRR_TRIGGER_WINDOW 0x1d18
+#define regOTG3_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG3_OTG_DRR_CONTROL 0x1d19
+#define regOTG3_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DRR_CONTOL2 0x1d1a
+#define regOTG3_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG3_OTG_M_CONST_DTO0 0x1d1b
+#define regOTG3_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG3_OTG_M_CONST_DTO1 0x1d1c
+#define regOTG3_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG3_OTG_REQUEST_CONTROL 0x1d1d
+#define regOTG3_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DSC_START_POSITION 0x1d1e
+#define regOTG3_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG3_OTG_PIPE_UPDATE_STATUS 0x1d1f
+#define regOTG3_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG3_OTG_SPARE_REGISTER 0x1d20
+#define regOTG3_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg_crc320_dispdec
+// base address: 0x0
+#define regOTG_CRC320_OTG_CRC0_DATA_R32 0x1dd2
+#define regOTG_CRC320_OTG_CRC0_DATA_R32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC0_DATA_G32 0x1dd3
+#define regOTG_CRC320_OTG_CRC0_DATA_G32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC0_DATA_B32 0x1dd4
+#define regOTG_CRC320_OTG_CRC0_DATA_B32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC0_DATA_C32 0x1dd5
+#define regOTG_CRC320_OTG_CRC0_DATA_C32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC0_DATA_AES 0x1dd6
+#define regOTG_CRC320_OTG_CRC0_DATA_AES_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC1_DATA_R32 0x1dd7
+#define regOTG_CRC320_OTG_CRC1_DATA_R32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC1_DATA_G32 0x1dd8
+#define regOTG_CRC320_OTG_CRC1_DATA_G32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC1_DATA_B32 0x1dd9
+#define regOTG_CRC320_OTG_CRC1_DATA_B32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC1_DATA_C32 0x1dda
+#define regOTG_CRC320_OTG_CRC1_DATA_C32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC2_DATA_R32 0x1ddb
+#define regOTG_CRC320_OTG_CRC2_DATA_R32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC2_DATA_G32 0x1ddc
+#define regOTG_CRC320_OTG_CRC2_DATA_G32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC2_DATA_B32 0x1ddd
+#define regOTG_CRC320_OTG_CRC2_DATA_B32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC2_DATA_C32 0x1dde
+#define regOTG_CRC320_OTG_CRC2_DATA_C32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC3_DATA_R32 0x1ddf
+#define regOTG_CRC320_OTG_CRC3_DATA_R32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC3_DATA_G32 0x1de0
+#define regOTG_CRC320_OTG_CRC3_DATA_G32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC3_DATA_B32 0x1de1
+#define regOTG_CRC320_OTG_CRC3_DATA_B32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC3_DATA_C32 0x1de2
+#define regOTG_CRC320_OTG_CRC3_DATA_C32_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg_crc321_dispdec
+// base address: 0x50
+#define regOTG_CRC321_OTG_CRC0_DATA_R32 0x1de6
+#define regOTG_CRC321_OTG_CRC0_DATA_R32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC0_DATA_G32 0x1de7
+#define regOTG_CRC321_OTG_CRC0_DATA_G32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC0_DATA_B32 0x1de8
+#define regOTG_CRC321_OTG_CRC0_DATA_B32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC0_DATA_C32 0x1de9
+#define regOTG_CRC321_OTG_CRC0_DATA_C32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC0_DATA_AES 0x1dea
+#define regOTG_CRC321_OTG_CRC0_DATA_AES_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC1_DATA_R32 0x1deb
+#define regOTG_CRC321_OTG_CRC1_DATA_R32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC1_DATA_G32 0x1dec
+#define regOTG_CRC321_OTG_CRC1_DATA_G32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC1_DATA_B32 0x1ded
+#define regOTG_CRC321_OTG_CRC1_DATA_B32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC1_DATA_C32 0x1dee
+#define regOTG_CRC321_OTG_CRC1_DATA_C32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC2_DATA_R32 0x1def
+#define regOTG_CRC321_OTG_CRC2_DATA_R32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC2_DATA_G32 0x1df0
+#define regOTG_CRC321_OTG_CRC2_DATA_G32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC2_DATA_B32 0x1df1
+#define regOTG_CRC321_OTG_CRC2_DATA_B32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC2_DATA_C32 0x1df2
+#define regOTG_CRC321_OTG_CRC2_DATA_C32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC3_DATA_R32 0x1df3
+#define regOTG_CRC321_OTG_CRC3_DATA_R32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC3_DATA_G32 0x1df4
+#define regOTG_CRC321_OTG_CRC3_DATA_G32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC3_DATA_B32 0x1df5
+#define regOTG_CRC321_OTG_CRC3_DATA_B32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC3_DATA_C32 0x1df6
+#define regOTG_CRC321_OTG_CRC3_DATA_C32_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg_crc322_dispdec
+// base address: 0xa0
+#define regOTG_CRC322_OTG_CRC0_DATA_R32 0x1dfa
+#define regOTG_CRC322_OTG_CRC0_DATA_R32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC0_DATA_G32 0x1dfb
+#define regOTG_CRC322_OTG_CRC0_DATA_G32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC0_DATA_B32 0x1dfc
+#define regOTG_CRC322_OTG_CRC0_DATA_B32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC0_DATA_C32 0x1dfd
+#define regOTG_CRC322_OTG_CRC0_DATA_C32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC0_DATA_AES 0x1dfe
+#define regOTG_CRC322_OTG_CRC0_DATA_AES_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC1_DATA_R32 0x1dff
+#define regOTG_CRC322_OTG_CRC1_DATA_R32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC1_DATA_G32 0x1e00
+#define regOTG_CRC322_OTG_CRC1_DATA_G32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC1_DATA_B32 0x1e01
+#define regOTG_CRC322_OTG_CRC1_DATA_B32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC1_DATA_C32 0x1e02
+#define regOTG_CRC322_OTG_CRC1_DATA_C32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC2_DATA_R32 0x1e03
+#define regOTG_CRC322_OTG_CRC2_DATA_R32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC2_DATA_G32 0x1e04
+#define regOTG_CRC322_OTG_CRC2_DATA_G32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC2_DATA_B32 0x1e05
+#define regOTG_CRC322_OTG_CRC2_DATA_B32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC2_DATA_C32 0x1e06
+#define regOTG_CRC322_OTG_CRC2_DATA_C32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC3_DATA_R32 0x1e07
+#define regOTG_CRC322_OTG_CRC3_DATA_R32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC3_DATA_G32 0x1e08
+#define regOTG_CRC322_OTG_CRC3_DATA_G32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC3_DATA_B32 0x1e09
+#define regOTG_CRC322_OTG_CRC3_DATA_B32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC3_DATA_C32 0x1e0a
+#define regOTG_CRC322_OTG_CRC3_DATA_C32_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg_crc323_dispdec
+// base address: 0xf0
+#define regOTG_CRC323_OTG_CRC0_DATA_R32 0x1e0e
+#define regOTG_CRC323_OTG_CRC0_DATA_R32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC0_DATA_G32 0x1e0f
+#define regOTG_CRC323_OTG_CRC0_DATA_G32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC0_DATA_B32 0x1e10
+#define regOTG_CRC323_OTG_CRC0_DATA_B32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC0_DATA_C32 0x1e11
+#define regOTG_CRC323_OTG_CRC0_DATA_C32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC0_DATA_AES 0x1e12
+#define regOTG_CRC323_OTG_CRC0_DATA_AES_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC1_DATA_R32 0x1e13
+#define regOTG_CRC323_OTG_CRC1_DATA_R32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC1_DATA_G32 0x1e14
+#define regOTG_CRC323_OTG_CRC1_DATA_G32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC1_DATA_B32 0x1e15
+#define regOTG_CRC323_OTG_CRC1_DATA_B32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC1_DATA_C32 0x1e16
+#define regOTG_CRC323_OTG_CRC1_DATA_C32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC2_DATA_R32 0x1e17
+#define regOTG_CRC323_OTG_CRC2_DATA_R32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC2_DATA_G32 0x1e18
+#define regOTG_CRC323_OTG_CRC2_DATA_G32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC2_DATA_B32 0x1e19
+#define regOTG_CRC323_OTG_CRC2_DATA_B32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC2_DATA_C32 0x1e1a
+#define regOTG_CRC323_OTG_CRC2_DATA_C32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC3_DATA_R32 0x1e1b
+#define regOTG_CRC323_OTG_CRC3_DATA_R32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC3_DATA_G32 0x1e1c
+#define regOTG_CRC323_OTG_CRC3_DATA_G32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC3_DATA_B32 0x1e1d
+#define regOTG_CRC323_OTG_CRC3_DATA_B32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC3_DATA_C32 0x1e1e
+#define regOTG_CRC323_OTG_CRC3_DATA_C32_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+// base address: 0x0
+#define regGSL_SOURCE_SELECT 0x1e2b
+#define regGSL_SOURCE_SELECT_BASE_IDX 2
+#define regOPTC_DLPC_CONTROL 0x1e2c
+#define regOPTC_DLPC_CONTROL_BASE_IDX 2
+#define regOPTC_CLOCK_CONTROL 0x1e2d
+#define regOPTC_CLOCK_CONTROL_BASE_IDX 2
+#define regODM_MEM_PWR_CTRL 0x1e2e
+#define regODM_MEM_PWR_CTRL_BASE_IDX 2
+#define regODM_MEM_PWR_CTRL3 0x1e30
+#define regODM_MEM_PWR_CTRL3_BASE_IDX 2
+#define regODM_MEM_PWR_STATUS 0x1e31
+#define regODM_MEM_PWR_STATUS_BASE_IDX 2
+#define regOPTC_MISC_SPARE_REGISTER 0x1e32
+#define regOPTC_MISC_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_dcperfmon_dc_perfmon_dispdec
+// base address: 0x79a8
+#define regDC_PERFMON17_PERFCOUNTER_CNTL 0x1e6a
+#define regDC_PERFMON17_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON17_PERFCOUNTER_CNTL2 0x1e6b
+#define regDC_PERFMON17_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON17_PERFCOUNTER_STATE 0x1e6c
+#define regDC_PERFMON17_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CNTL 0x1e6d
+#define regDC_PERFMON17_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CNTL2 0x1e6e
+#define regDC_PERFMON17_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CVALUE_INT_MISC 0x1e6f
+#define regDC_PERFMON17_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CVALUE_LOW 0x1e70
+#define regDC_PERFMON17_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_HI 0x1e71
+#define regDC_PERFMON17_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_LOW 0x1e72
+#define regDC_PERFMON17_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+// base address: 0x0
+#define regHPD0_DC_HPD_INT_STATUS 0x1f14
+#define regHPD0_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD0_DC_HPD_INT_CONTROL 0x1f15
+#define regHPD0_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD0_DC_HPD_CONTROL 0x1f16
+#define regHPD0_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD0_DC_HPD_FAST_TRAIN_CNTL 0x1f17
+#define regHPD0_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD0_DC_HPD_TOGGLE_FILT_CNTL 0x1f18
+#define regHPD0_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+// base address: 0x20
+#define regHPD1_DC_HPD_INT_STATUS 0x1f1c
+#define regHPD1_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD1_DC_HPD_INT_CONTROL 0x1f1d
+#define regHPD1_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD1_DC_HPD_CONTROL 0x1f1e
+#define regHPD1_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD1_DC_HPD_FAST_TRAIN_CNTL 0x1f1f
+#define regHPD1_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD1_DC_HPD_TOGGLE_FILT_CNTL 0x1f20
+#define regHPD1_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd2_dispdec
+// base address: 0x40
+#define regHPD2_DC_HPD_INT_STATUS 0x1f24
+#define regHPD2_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD2_DC_HPD_INT_CONTROL 0x1f25
+#define regHPD2_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD2_DC_HPD_CONTROL 0x1f26
+#define regHPD2_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD2_DC_HPD_FAST_TRAIN_CNTL 0x1f27
+#define regHPD2_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD2_DC_HPD_TOGGLE_FILT_CNTL 0x1f28
+#define regHPD2_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd3_dispdec
+// base address: 0x60
+#define regHPD3_DC_HPD_INT_STATUS 0x1f2c
+#define regHPD3_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD3_DC_HPD_INT_CONTROL 0x1f2d
+#define regHPD3_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD3_DC_HPD_CONTROL 0x1f2e
+#define regHPD3_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD3_DC_HPD_FAST_TRAIN_CNTL 0x1f2f
+#define regHPD3_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD3_DC_HPD_TOGGLE_FILT_CNTL 0x1f30
+#define regHPD3_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd4_dispdec
+// base address: 0x80
+#define regHPD4_DC_HPD_INT_STATUS 0x1f34
+#define regHPD4_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD4_DC_HPD_INT_CONTROL 0x1f35
+#define regHPD4_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD4_DC_HPD_CONTROL 0x1f36
+#define regHPD4_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD4_DC_HPD_FAST_TRAIN_CNTL 0x1f37
+#define regHPD4_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD4_DC_HPD_TOGGLE_FILT_CNTL 0x1f38
+#define regHPD4_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+// base address: 0x0
+#define regDP0_DP_LINK_CNTL 0x211e
+#define regDP0_DP_LINK_CNTL_BASE_IDX 2
+#define regDP0_DP_PIXEL_FORMAT 0x211f
+#define regDP0_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP0_DP_MSA_COLORIMETRY 0x2120
+#define regDP0_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP0_DP_CONFIG 0x2121
+#define regDP0_DP_CONFIG_BASE_IDX 2
+#define regDP0_DP_VID_STREAM_CNTL 0x2122
+#define regDP0_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP0_DP_STEER_FIFO 0x2123
+#define regDP0_DP_STEER_FIFO_BASE_IDX 2
+#define regDP0_DP_MSA_MISC 0x2124
+#define regDP0_DP_MSA_MISC_BASE_IDX 2
+#define regDP0_DP_DPHY_INTERNAL_CTRL 0x2125
+#define regDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP0_DP_VID_TIMING 0x2126
+#define regDP0_DP_VID_TIMING_BASE_IDX 2
+#define regDP0_DP_VID_N 0x2127
+#define regDP0_DP_VID_N_BASE_IDX 2
+#define regDP0_DP_VID_M 0x2128
+#define regDP0_DP_VID_M_BASE_IDX 2
+#define regDP0_DP_LINK_FRAMING_CNTL 0x2129
+#define regDP0_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP0_DP_HBR2_EYE_PATTERN 0x212a
+#define regDP0_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP0_DP_VID_MSA_VBID 0x212b
+#define regDP0_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP0_DP_VID_INTERRUPT_CNTL 0x212c
+#define regDP0_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_CNTL 0x212d
+#define regDP0_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x212e
+#define regDP0_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM0 0x212f
+#define regDP0_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM1 0x2130
+#define regDP0_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM2 0x2131
+#define regDP0_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP0_DP_DPHY_8B10B_CNTL 0x2132
+#define regDP0_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_PRBS_CNTL 0x2133
+#define regDP0_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_SCRAM_CNTL 0x2134
+#define regDP0_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_CONTROL0 0x2135
+#define regDP0_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_CONTROL1 0x2136
+#define regDP0_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT0 0x2137
+#define regDP0_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT1 0x2138
+#define regDP0_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_STATUS 0x2139
+#define regDP0_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP0_DP_DPHY_FAST_TRAINING 0x213a
+#define regDP0_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP0_DP_DPHY_FAST_TRAINING_STATUS 0x213b
+#define regDP0_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT2 0x213c
+#define regDP0_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT3 0x213d
+#define regDP0_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL 0x2141
+#define regDP0_DP_SEC_CNTL_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL1 0x2142
+#define regDP0_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING1 0x2143
+#define regDP0_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING2 0x2144
+#define regDP0_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING3 0x2145
+#define regDP0_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING4 0x2146
+#define regDP0_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_N 0x2147
+#define regDP0_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_N_READBACK 0x2148
+#define regDP0_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_M 0x2149
+#define regDP0_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_M_READBACK 0x214a
+#define regDP0_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP0_DP_SEC_TIMESTAMP 0x214b
+#define regDP0_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP0_DP_SEC_PACKET_CNTL 0x214c
+#define regDP0_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP0_DP_MSE_RATE_CNTL 0x214d
+#define regDP0_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP0_DP_MSE_RATE_UPDATE 0x214f
+#define regDP0_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP0_DP_MSE_SAT0 0x2150
+#define regDP0_DP_MSE_SAT0_BASE_IDX 2
+#define regDP0_DP_MSE_SAT1 0x2151
+#define regDP0_DP_MSE_SAT1_BASE_IDX 2
+#define regDP0_DP_MSE_SAT2 0x2152
+#define regDP0_DP_MSE_SAT2_BASE_IDX 2
+#define regDP0_DP_MSE_SAT_UPDATE 0x2153
+#define regDP0_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP0_DP_MSE_LINK_TIMING 0x2154
+#define regDP0_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP0_DP_MSE_MISC_CNTL 0x2155
+#define regDP0_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x215a
+#define regDP0_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_HBR2_PATTERN_CONTROL 0x215b
+#define regDP0_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP0_DP_MSE_SAT0_STATUS 0x215d
+#define regDP0_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP0_DP_MSE_SAT1_STATUS 0x215e
+#define regDP0_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP0_DP_MSE_SAT2_STATUS 0x215f
+#define regDP0_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP0_DP_DPIA_SPARE 0x2160
+#define regDP0_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM1 0x2162
+#define regDP0_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM2 0x2163
+#define regDP0_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM3 0x2164
+#define regDP0_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM4 0x2165
+#define regDP0_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP0_DP_MSO_CNTL 0x2166
+#define regDP0_DP_MSO_CNTL_BASE_IDX 2
+#define regDP0_DP_MSO_CNTL1 0x2167
+#define regDP0_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP0_DP_DSC_CNTL 0x2168
+#define regDP0_DP_DSC_CNTL_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL2 0x2169
+#define regDP0_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL3 0x216a
+#define regDP0_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL4 0x216b
+#define regDP0_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL5 0x216c
+#define regDP0_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL6 0x216d
+#define regDP0_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL7 0x216e
+#define regDP0_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP0_DP_DB_CNTL 0x216f
+#define regDP0_DP_DB_CNTL_BASE_IDX 2
+#define regDP0_DP_MSA_VBID_MISC 0x2170
+#define regDP0_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP0_DP_SEC_METADATA_TRANSMISSION 0x2171
+#define regDP0_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP0_DP_ALPM_CNTL 0x2173
+#define regDP0_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP8_CNTL 0x2174
+#define regDP0_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP9_CNTL 0x2175
+#define regDP0_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP10_CNTL 0x2176
+#define regDP0_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP11_CNTL 0x2177
+#define regDP0_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP_EN_DB_STATUS 0x2178
+#define regDP0_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL1 0x2179
+#define regDP0_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL2 0x217a
+#define regDP0_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL3 0x217b
+#define regDP0_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL4 0x217c
+#define regDP0_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL5 0x217d
+#define regDP0_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP0_DP_STREAM_SYMBOL_COUNT_STATUS 0x217e
+#define regDP0_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP0_DP_STREAM_SYMBOL_COUNT_CONTROL 0x217f
+#define regDP0_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS0 0x2180
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS1 0x2181
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_CONTROL 0x2182
+#define regDP0_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+// base address: 0x0
+#define regDIG0_DIG_FE_CNTL 0x2093
+#define regDIG0_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG0_DIG_FE_CLK_CNTL 0x2094
+#define regDIG0_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG0_DIG_FE_EN_CNTL 0x2095
+#define regDIG0_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG0_DIG_OUTPUT_CRC_CNTL 0x2096
+#define regDIG0_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG0_DIG_OUTPUT_CRC_RESULT 0x2097
+#define regDIG0_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG0_DIG_CLOCK_PATTERN 0x2098
+#define regDIG0_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG0_DIG_TEST_PATTERN 0x2099
+#define regDIG0_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG0_DIG_RANDOM_PATTERN_SEED 0x209a
+#define regDIG0_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG0_DIG_FIFO_CTRL0 0x209b
+#define regDIG0_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG0_DIG_FIFO_CTRL1 0x209c
+#define regDIG0_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG0_HDMI_METADATA_PACKET_CONTROL 0x209d
+#define regDIG0_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_CONTROL 0x209e
+#define regDIG0_HDMI_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_STATUS 0x209f
+#define regDIG0_HDMI_STATUS_BASE_IDX 2
+#define regDIG0_HDMI_AUDIO_PACKET_CONTROL 0x20a0
+#define regDIG0_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_ACR_PACKET_CONTROL 0x20a1
+#define regDIG0_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_VBI_PACKET_CONTROL 0x20a2
+#define regDIG0_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_INFOFRAME_CONTROL0 0x20a3
+#define regDIG0_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG0_HDMI_INFOFRAME_CONTROL1 0x20a4
+#define regDIG0_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x20a5
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL6 0x20a6
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL5 0x20a7
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG0_HDMI_GC 0x20a8
+#define regDIG0_HDMI_GC_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x20a9
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL2 0x20aa
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL3 0x20ab
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL4 0x20ac
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL7 0x20ad
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL8 0x20ae
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL9 0x20af
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL10 0x20b0
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG0_HDMI_DB_CONTROL 0x20b1
+#define regDIG0_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_ACR_32_0 0x20b2
+#define regDIG0_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_32_1 0x20b3
+#define regDIG0_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_44_0 0x20b4
+#define regDIG0_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_44_1 0x20b5
+#define regDIG0_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_48_0 0x20b6
+#define regDIG0_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_48_1 0x20b7
+#define regDIG0_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_STATUS_0 0x20b8
+#define regDIG0_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_STATUS_1 0x20b9
+#define regDIG0_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG0_AFMT_CNTL 0x20ba
+#define regDIG0_AFMT_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_CLK_CNTL 0x20bb
+#define regDIG0_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_CNTL 0x20bc
+#define regDIG0_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_EN_CNTL 0x20bd
+#define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CNTL 0x20e4
+#define regDIG0_TMDS_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CONTROL_CHAR 0x20e5
+#define regDIG0_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG0_TMDS_CONTROL0_FEEDBACK 0x20e6
+#define regDIG0_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG0_TMDS_STEREOSYNC_CTL_SEL 0x20e7
+#define regDIG0_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x20e8
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x20e9
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG0_TMDS_CTL_BITS 0x20eb
+#define regDIG0_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG0_TMDS_DCBALANCER_CONTROL 0x20ec
+#define regDIG0_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_DCBALANCE_CHAR 0x20ed
+#define regDIG0_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG0_TMDS_CTL0_1_GEN_CNTL 0x20ee
+#define regDIG0_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CTL2_3_GEN_CNTL 0x20ef
+#define regDIG0_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG0_DIG_VERSION 0x20f1
+#define regDIG0_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+// base address: 0x490
+#define regDP1_DP_LINK_CNTL 0x2242
+#define regDP1_DP_LINK_CNTL_BASE_IDX 2
+#define regDP1_DP_PIXEL_FORMAT 0x2243
+#define regDP1_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP1_DP_MSA_COLORIMETRY 0x2244
+#define regDP1_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP1_DP_CONFIG 0x2245
+#define regDP1_DP_CONFIG_BASE_IDX 2
+#define regDP1_DP_VID_STREAM_CNTL 0x2246
+#define regDP1_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP1_DP_STEER_FIFO 0x2247
+#define regDP1_DP_STEER_FIFO_BASE_IDX 2
+#define regDP1_DP_MSA_MISC 0x2248
+#define regDP1_DP_MSA_MISC_BASE_IDX 2
+#define regDP1_DP_DPHY_INTERNAL_CTRL 0x2249
+#define regDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP1_DP_VID_TIMING 0x224a
+#define regDP1_DP_VID_TIMING_BASE_IDX 2
+#define regDP1_DP_VID_N 0x224b
+#define regDP1_DP_VID_N_BASE_IDX 2
+#define regDP1_DP_VID_M 0x224c
+#define regDP1_DP_VID_M_BASE_IDX 2
+#define regDP1_DP_LINK_FRAMING_CNTL 0x224d
+#define regDP1_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP1_DP_HBR2_EYE_PATTERN 0x224e
+#define regDP1_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP1_DP_VID_MSA_VBID 0x224f
+#define regDP1_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP1_DP_VID_INTERRUPT_CNTL 0x2250
+#define regDP1_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_CNTL 0x2251
+#define regDP1_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x2252
+#define regDP1_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM0 0x2253
+#define regDP1_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM1 0x2254
+#define regDP1_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM2 0x2255
+#define regDP1_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP1_DP_DPHY_8B10B_CNTL 0x2256
+#define regDP1_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_PRBS_CNTL 0x2257
+#define regDP1_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_SCRAM_CNTL 0x2258
+#define regDP1_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_CONTROL0 0x2259
+#define regDP1_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_CONTROL1 0x225a
+#define regDP1_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT0 0x225b
+#define regDP1_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT1 0x225c
+#define regDP1_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_STATUS 0x225d
+#define regDP1_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP1_DP_DPHY_FAST_TRAINING 0x225e
+#define regDP1_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP1_DP_DPHY_FAST_TRAINING_STATUS 0x225f
+#define regDP1_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT2 0x2260
+#define regDP1_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT3 0x2261
+#define regDP1_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL 0x2265
+#define regDP1_DP_SEC_CNTL_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL1 0x2266
+#define regDP1_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING1 0x2267
+#define regDP1_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING2 0x2268
+#define regDP1_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING3 0x2269
+#define regDP1_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING4 0x226a
+#define regDP1_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_N 0x226b
+#define regDP1_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_N_READBACK 0x226c
+#define regDP1_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_M 0x226d
+#define regDP1_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_M_READBACK 0x226e
+#define regDP1_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP1_DP_SEC_TIMESTAMP 0x226f
+#define regDP1_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP1_DP_SEC_PACKET_CNTL 0x2270
+#define regDP1_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP1_DP_MSE_RATE_CNTL 0x2271
+#define regDP1_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP1_DP_MSE_RATE_UPDATE 0x2273
+#define regDP1_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP1_DP_MSE_SAT0 0x2274
+#define regDP1_DP_MSE_SAT0_BASE_IDX 2
+#define regDP1_DP_MSE_SAT1 0x2275
+#define regDP1_DP_MSE_SAT1_BASE_IDX 2
+#define regDP1_DP_MSE_SAT2 0x2276
+#define regDP1_DP_MSE_SAT2_BASE_IDX 2
+#define regDP1_DP_MSE_SAT_UPDATE 0x2277
+#define regDP1_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP1_DP_MSE_LINK_TIMING 0x2278
+#define regDP1_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP1_DP_MSE_MISC_CNTL 0x2279
+#define regDP1_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x227e
+#define regDP1_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_HBR2_PATTERN_CONTROL 0x227f
+#define regDP1_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP1_DP_MSE_SAT0_STATUS 0x2281
+#define regDP1_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP1_DP_MSE_SAT1_STATUS 0x2282
+#define regDP1_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP1_DP_MSE_SAT2_STATUS 0x2283
+#define regDP1_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP1_DP_DPIA_SPARE 0x2284
+#define regDP1_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM1 0x2286
+#define regDP1_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM2 0x2287
+#define regDP1_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM3 0x2288
+#define regDP1_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM4 0x2289
+#define regDP1_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP1_DP_MSO_CNTL 0x228a
+#define regDP1_DP_MSO_CNTL_BASE_IDX 2
+#define regDP1_DP_MSO_CNTL1 0x228b
+#define regDP1_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP1_DP_DSC_CNTL 0x228c
+#define regDP1_DP_DSC_CNTL_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL2 0x228d
+#define regDP1_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL3 0x228e
+#define regDP1_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL4 0x228f
+#define regDP1_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL5 0x2290
+#define regDP1_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL6 0x2291
+#define regDP1_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL7 0x2292
+#define regDP1_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP1_DP_DB_CNTL 0x2293
+#define regDP1_DP_DB_CNTL_BASE_IDX 2
+#define regDP1_DP_MSA_VBID_MISC 0x2294
+#define regDP1_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP1_DP_SEC_METADATA_TRANSMISSION 0x2295
+#define regDP1_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP1_DP_ALPM_CNTL 0x2297
+#define regDP1_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP8_CNTL 0x2298
+#define regDP1_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP9_CNTL 0x2299
+#define regDP1_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP10_CNTL 0x229a
+#define regDP1_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP11_CNTL 0x229b
+#define regDP1_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP_EN_DB_STATUS 0x229c
+#define regDP1_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL1 0x229d
+#define regDP1_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL2 0x229e
+#define regDP1_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL3 0x229f
+#define regDP1_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL4 0x22a0
+#define regDP1_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL5 0x22a1
+#define regDP1_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_STATUS 0x22a2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_CONTROL 0x22a3
+#define regDP1_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS0 0x22a4
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS1 0x22a5
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_CONTROL 0x22a6
+#define regDP1_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+// base address: 0x490
+#define regDIG1_DIG_FE_CNTL 0x21b7
+#define regDIG1_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG1_DIG_FE_CLK_CNTL 0x21b8
+#define regDIG1_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG1_DIG_FE_EN_CNTL 0x21b9
+#define regDIG1_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG1_DIG_OUTPUT_CRC_CNTL 0x21ba
+#define regDIG1_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG1_DIG_OUTPUT_CRC_RESULT 0x21bb
+#define regDIG1_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG1_DIG_CLOCK_PATTERN 0x21bc
+#define regDIG1_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG1_DIG_TEST_PATTERN 0x21bd
+#define regDIG1_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG1_DIG_RANDOM_PATTERN_SEED 0x21be
+#define regDIG1_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG1_DIG_FIFO_CTRL0 0x21bf
+#define regDIG1_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG1_DIG_FIFO_CTRL1 0x21c0
+#define regDIG1_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG1_HDMI_METADATA_PACKET_CONTROL 0x21c1
+#define regDIG1_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_CONTROL 0x21c2
+#define regDIG1_HDMI_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_STATUS 0x21c3
+#define regDIG1_HDMI_STATUS_BASE_IDX 2
+#define regDIG1_HDMI_AUDIO_PACKET_CONTROL 0x21c4
+#define regDIG1_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_ACR_PACKET_CONTROL 0x21c5
+#define regDIG1_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_VBI_PACKET_CONTROL 0x21c6
+#define regDIG1_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_INFOFRAME_CONTROL0 0x21c7
+#define regDIG1_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG1_HDMI_INFOFRAME_CONTROL1 0x21c8
+#define regDIG1_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x21c9
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL6 0x21ca
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL5 0x21cb
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG1_HDMI_GC 0x21cc
+#define regDIG1_HDMI_GC_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x21cd
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL2 0x21ce
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL3 0x21cf
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL4 0x21d0
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL7 0x21d1
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL8 0x21d2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL9 0x21d3
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL10 0x21d4
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG1_HDMI_DB_CONTROL 0x21d5
+#define regDIG1_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_ACR_32_0 0x21d6
+#define regDIG1_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_32_1 0x21d7
+#define regDIG1_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_44_0 0x21d8
+#define regDIG1_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_44_1 0x21d9
+#define regDIG1_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_48_0 0x21da
+#define regDIG1_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_48_1 0x21db
+#define regDIG1_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_STATUS_0 0x21dc
+#define regDIG1_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_STATUS_1 0x21dd
+#define regDIG1_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG1_AFMT_CNTL 0x21de
+#define regDIG1_AFMT_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_CLK_CNTL 0x21df
+#define regDIG1_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_CNTL 0x21e0
+#define regDIG1_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_EN_CNTL 0x21e1
+#define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CNTL 0x2208
+#define regDIG1_TMDS_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CONTROL_CHAR 0x2209
+#define regDIG1_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG1_TMDS_CONTROL0_FEEDBACK 0x220a
+#define regDIG1_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG1_TMDS_STEREOSYNC_CTL_SEL 0x220b
+#define regDIG1_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x220c
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x220d
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG1_TMDS_CTL_BITS 0x220f
+#define regDIG1_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG1_TMDS_DCBALANCER_CONTROL 0x2210
+#define regDIG1_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_DCBALANCE_CHAR 0x2211
+#define regDIG1_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG1_TMDS_CTL0_1_GEN_CNTL 0x2212
+#define regDIG1_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CTL2_3_GEN_CNTL 0x2213
+#define regDIG1_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG1_DIG_VERSION 0x2215
+#define regDIG1_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp2_dispdec
+// base address: 0x920
+#define regDP2_DP_LINK_CNTL 0x2366
+#define regDP2_DP_LINK_CNTL_BASE_IDX 2
+#define regDP2_DP_PIXEL_FORMAT 0x2367
+#define regDP2_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP2_DP_MSA_COLORIMETRY 0x2368
+#define regDP2_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP2_DP_CONFIG 0x2369
+#define regDP2_DP_CONFIG_BASE_IDX 2
+#define regDP2_DP_VID_STREAM_CNTL 0x236a
+#define regDP2_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP2_DP_STEER_FIFO 0x236b
+#define regDP2_DP_STEER_FIFO_BASE_IDX 2
+#define regDP2_DP_MSA_MISC 0x236c
+#define regDP2_DP_MSA_MISC_BASE_IDX 2
+#define regDP2_DP_DPHY_INTERNAL_CTRL 0x236d
+#define regDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP2_DP_VID_TIMING 0x236e
+#define regDP2_DP_VID_TIMING_BASE_IDX 2
+#define regDP2_DP_VID_N 0x236f
+#define regDP2_DP_VID_N_BASE_IDX 2
+#define regDP2_DP_VID_M 0x2370
+#define regDP2_DP_VID_M_BASE_IDX 2
+#define regDP2_DP_LINK_FRAMING_CNTL 0x2371
+#define regDP2_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP2_DP_HBR2_EYE_PATTERN 0x2372
+#define regDP2_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP2_DP_VID_MSA_VBID 0x2373
+#define regDP2_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP2_DP_VID_INTERRUPT_CNTL 0x2374
+#define regDP2_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_CNTL 0x2375
+#define regDP2_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_TRAINING_PATTERN_SEL 0x2376
+#define regDP2_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM0 0x2377
+#define regDP2_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM1 0x2378
+#define regDP2_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM2 0x2379
+#define regDP2_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP2_DP_DPHY_8B10B_CNTL 0x237a
+#define regDP2_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_PRBS_CNTL 0x237b
+#define regDP2_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_SCRAM_CNTL 0x237c
+#define regDP2_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_CONTROL0 0x237d
+#define regDP2_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_CONTROL1 0x237e
+#define regDP2_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT0 0x237f
+#define regDP2_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT1 0x2380
+#define regDP2_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_STATUS 0x2381
+#define regDP2_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP2_DP_DPHY_FAST_TRAINING 0x2382
+#define regDP2_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP2_DP_DPHY_FAST_TRAINING_STATUS 0x2383
+#define regDP2_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT2 0x2384
+#define regDP2_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT3 0x2385
+#define regDP2_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL 0x2389
+#define regDP2_DP_SEC_CNTL_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL1 0x238a
+#define regDP2_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING1 0x238b
+#define regDP2_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING2 0x238c
+#define regDP2_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING3 0x238d
+#define regDP2_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING4 0x238e
+#define regDP2_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_N 0x238f
+#define regDP2_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_N_READBACK 0x2390
+#define regDP2_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_M 0x2391
+#define regDP2_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_M_READBACK 0x2392
+#define regDP2_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP2_DP_SEC_TIMESTAMP 0x2393
+#define regDP2_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP2_DP_SEC_PACKET_CNTL 0x2394
+#define regDP2_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP2_DP_MSE_RATE_CNTL 0x2395
+#define regDP2_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP2_DP_MSE_RATE_UPDATE 0x2397
+#define regDP2_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP2_DP_MSE_SAT0 0x2398
+#define regDP2_DP_MSE_SAT0_BASE_IDX 2
+#define regDP2_DP_MSE_SAT1 0x2399
+#define regDP2_DP_MSE_SAT1_BASE_IDX 2
+#define regDP2_DP_MSE_SAT2 0x239a
+#define regDP2_DP_MSE_SAT2_BASE_IDX 2
+#define regDP2_DP_MSE_SAT_UPDATE 0x239b
+#define regDP2_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP2_DP_MSE_LINK_TIMING 0x239c
+#define regDP2_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP2_DP_MSE_MISC_CNTL 0x239d
+#define regDP2_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x23a2
+#define regDP2_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_HBR2_PATTERN_CONTROL 0x23a3
+#define regDP2_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP2_DP_MSE_SAT0_STATUS 0x23a5
+#define regDP2_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP2_DP_MSE_SAT1_STATUS 0x23a6
+#define regDP2_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP2_DP_MSE_SAT2_STATUS 0x23a7
+#define regDP2_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP2_DP_DPIA_SPARE 0x23a8
+#define regDP2_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM1 0x23aa
+#define regDP2_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM2 0x23ab
+#define regDP2_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM3 0x23ac
+#define regDP2_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM4 0x23ad
+#define regDP2_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP2_DP_MSO_CNTL 0x23ae
+#define regDP2_DP_MSO_CNTL_BASE_IDX 2
+#define regDP2_DP_MSO_CNTL1 0x23af
+#define regDP2_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP2_DP_DSC_CNTL 0x23b0
+#define regDP2_DP_DSC_CNTL_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL2 0x23b1
+#define regDP2_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL3 0x23b2
+#define regDP2_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL4 0x23b3
+#define regDP2_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL5 0x23b4
+#define regDP2_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL6 0x23b5
+#define regDP2_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL7 0x23b6
+#define regDP2_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP2_DP_DB_CNTL 0x23b7
+#define regDP2_DP_DB_CNTL_BASE_IDX 2
+#define regDP2_DP_MSA_VBID_MISC 0x23b8
+#define regDP2_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP2_DP_SEC_METADATA_TRANSMISSION 0x23b9
+#define regDP2_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP2_DP_ALPM_CNTL 0x23bb
+#define regDP2_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP8_CNTL 0x23bc
+#define regDP2_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP9_CNTL 0x23bd
+#define regDP2_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP10_CNTL 0x23be
+#define regDP2_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP11_CNTL 0x23bf
+#define regDP2_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP_EN_DB_STATUS 0x23c0
+#define regDP2_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL1 0x23c1
+#define regDP2_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL2 0x23c2
+#define regDP2_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL3 0x23c3
+#define regDP2_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL4 0x23c4
+#define regDP2_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL5 0x23c5
+#define regDP2_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP2_DP_STREAM_SYMBOL_COUNT_STATUS 0x23c6
+#define regDP2_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP2_DP_STREAM_SYMBOL_COUNT_CONTROL 0x23c7
+#define regDP2_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS0 0x23c8
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS1 0x23c9
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_CONTROL 0x23ca
+#define regDP2_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_dispdec
+// base address: 0x920
+#define regDIG2_DIG_FE_CNTL 0x22db
+#define regDIG2_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG2_DIG_FE_CLK_CNTL 0x22dc
+#define regDIG2_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG2_DIG_FE_EN_CNTL 0x22dd
+#define regDIG2_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG2_DIG_OUTPUT_CRC_CNTL 0x22de
+#define regDIG2_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG2_DIG_OUTPUT_CRC_RESULT 0x22df
+#define regDIG2_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG2_DIG_CLOCK_PATTERN 0x22e0
+#define regDIG2_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG2_DIG_TEST_PATTERN 0x22e1
+#define regDIG2_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG2_DIG_RANDOM_PATTERN_SEED 0x22e2
+#define regDIG2_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG2_DIG_FIFO_CTRL0 0x22e3
+#define regDIG2_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG2_DIG_FIFO_CTRL1 0x22e4
+#define regDIG2_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG2_HDMI_METADATA_PACKET_CONTROL 0x22e5
+#define regDIG2_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_CONTROL 0x22e6
+#define regDIG2_HDMI_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_STATUS 0x22e7
+#define regDIG2_HDMI_STATUS_BASE_IDX 2
+#define regDIG2_HDMI_AUDIO_PACKET_CONTROL 0x22e8
+#define regDIG2_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_ACR_PACKET_CONTROL 0x22e9
+#define regDIG2_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_VBI_PACKET_CONTROL 0x22ea
+#define regDIG2_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_INFOFRAME_CONTROL0 0x22eb
+#define regDIG2_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG2_HDMI_INFOFRAME_CONTROL1 0x22ec
+#define regDIG2_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL0 0x22ed
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL6 0x22ee
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL5 0x22ef
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG2_HDMI_GC 0x22f0
+#define regDIG2_HDMI_GC_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL1 0x22f1
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL2 0x22f2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL3 0x22f3
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL4 0x22f4
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL7 0x22f5
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL8 0x22f6
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL9 0x22f7
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL10 0x22f8
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG2_HDMI_DB_CONTROL 0x22f9
+#define regDIG2_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_ACR_32_0 0x22fa
+#define regDIG2_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_32_1 0x22fb
+#define regDIG2_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_44_0 0x22fc
+#define regDIG2_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_44_1 0x22fd
+#define regDIG2_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_48_0 0x22fe
+#define regDIG2_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_48_1 0x22ff
+#define regDIG2_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_STATUS_0 0x2300
+#define regDIG2_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_STATUS_1 0x2301
+#define regDIG2_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG2_AFMT_CNTL 0x2302
+#define regDIG2_AFMT_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_CLK_CNTL 0x2303
+#define regDIG2_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_CNTL 0x2304
+#define regDIG2_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_EN_CNTL 0x2305
+#define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CNTL 0x232c
+#define regDIG2_TMDS_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CONTROL_CHAR 0x232d
+#define regDIG2_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG2_TMDS_CONTROL0_FEEDBACK 0x232e
+#define regDIG2_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG2_TMDS_STEREOSYNC_CTL_SEL 0x232f
+#define regDIG2_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_0_1 0x2330
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_2_3 0x2331
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG2_TMDS_CTL_BITS 0x2333
+#define regDIG2_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG2_TMDS_DCBALANCER_CONTROL 0x2334
+#define regDIG2_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_DCBALANCE_CHAR 0x2335
+#define regDIG2_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG2_TMDS_CTL0_1_GEN_CNTL 0x2336
+#define regDIG2_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CTL2_3_GEN_CNTL 0x2337
+#define regDIG2_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG2_DIG_VERSION 0x2339
+#define regDIG2_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp3_dispdec
+// base address: 0xdb0
+#define regDP3_DP_LINK_CNTL 0x248a
+#define regDP3_DP_LINK_CNTL_BASE_IDX 2
+#define regDP3_DP_PIXEL_FORMAT 0x248b
+#define regDP3_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP3_DP_MSA_COLORIMETRY 0x248c
+#define regDP3_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP3_DP_CONFIG 0x248d
+#define regDP3_DP_CONFIG_BASE_IDX 2
+#define regDP3_DP_VID_STREAM_CNTL 0x248e
+#define regDP3_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP3_DP_STEER_FIFO 0x248f
+#define regDP3_DP_STEER_FIFO_BASE_IDX 2
+#define regDP3_DP_MSA_MISC 0x2490
+#define regDP3_DP_MSA_MISC_BASE_IDX 2
+#define regDP3_DP_DPHY_INTERNAL_CTRL 0x2491
+#define regDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP3_DP_VID_TIMING 0x2492
+#define regDP3_DP_VID_TIMING_BASE_IDX 2
+#define regDP3_DP_VID_N 0x2493
+#define regDP3_DP_VID_N_BASE_IDX 2
+#define regDP3_DP_VID_M 0x2494
+#define regDP3_DP_VID_M_BASE_IDX 2
+#define regDP3_DP_LINK_FRAMING_CNTL 0x2495
+#define regDP3_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP3_DP_HBR2_EYE_PATTERN 0x2496
+#define regDP3_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP3_DP_VID_MSA_VBID 0x2497
+#define regDP3_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP3_DP_VID_INTERRUPT_CNTL 0x2498
+#define regDP3_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_CNTL 0x2499
+#define regDP3_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_TRAINING_PATTERN_SEL 0x249a
+#define regDP3_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM0 0x249b
+#define regDP3_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM1 0x249c
+#define regDP3_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM2 0x249d
+#define regDP3_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP3_DP_DPHY_8B10B_CNTL 0x249e
+#define regDP3_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_PRBS_CNTL 0x249f
+#define regDP3_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_SCRAM_CNTL 0x24a0
+#define regDP3_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_CONTROL0 0x24a1
+#define regDP3_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_CONTROL1 0x24a2
+#define regDP3_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT0 0x24a3
+#define regDP3_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT1 0x24a4
+#define regDP3_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_STATUS 0x24a5
+#define regDP3_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP3_DP_DPHY_FAST_TRAINING 0x24a6
+#define regDP3_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP3_DP_DPHY_FAST_TRAINING_STATUS 0x24a7
+#define regDP3_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT2 0x24a8
+#define regDP3_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT3 0x24a9
+#define regDP3_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL 0x24ad
+#define regDP3_DP_SEC_CNTL_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL1 0x24ae
+#define regDP3_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING1 0x24af
+#define regDP3_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING2 0x24b0
+#define regDP3_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING3 0x24b1
+#define regDP3_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING4 0x24b2
+#define regDP3_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_N 0x24b3
+#define regDP3_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_N_READBACK 0x24b4
+#define regDP3_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_M 0x24b5
+#define regDP3_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_M_READBACK 0x24b6
+#define regDP3_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP3_DP_SEC_TIMESTAMP 0x24b7
+#define regDP3_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP3_DP_SEC_PACKET_CNTL 0x24b8
+#define regDP3_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP3_DP_MSE_RATE_CNTL 0x24b9
+#define regDP3_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP3_DP_MSE_RATE_UPDATE 0x24bb
+#define regDP3_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP3_DP_MSE_SAT0 0x24bc
+#define regDP3_DP_MSE_SAT0_BASE_IDX 2
+#define regDP3_DP_MSE_SAT1 0x24bd
+#define regDP3_DP_MSE_SAT1_BASE_IDX 2
+#define regDP3_DP_MSE_SAT2 0x24be
+#define regDP3_DP_MSE_SAT2_BASE_IDX 2
+#define regDP3_DP_MSE_SAT_UPDATE 0x24bf
+#define regDP3_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP3_DP_MSE_LINK_TIMING 0x24c0
+#define regDP3_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP3_DP_MSE_MISC_CNTL 0x24c1
+#define regDP3_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x24c6
+#define regDP3_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_HBR2_PATTERN_CONTROL 0x24c7
+#define regDP3_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP3_DP_MSE_SAT0_STATUS 0x24c9
+#define regDP3_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP3_DP_MSE_SAT1_STATUS 0x24ca
+#define regDP3_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP3_DP_MSE_SAT2_STATUS 0x24cb
+#define regDP3_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP3_DP_DPIA_SPARE 0x24cc
+#define regDP3_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM1 0x24ce
+#define regDP3_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM2 0x24cf
+#define regDP3_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM3 0x24d0
+#define regDP3_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM4 0x24d1
+#define regDP3_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP3_DP_MSO_CNTL 0x24d2
+#define regDP3_DP_MSO_CNTL_BASE_IDX 2
+#define regDP3_DP_MSO_CNTL1 0x24d3
+#define regDP3_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP3_DP_DSC_CNTL 0x24d4
+#define regDP3_DP_DSC_CNTL_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL2 0x24d5
+#define regDP3_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL3 0x24d6
+#define regDP3_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL4 0x24d7
+#define regDP3_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL5 0x24d8
+#define regDP3_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL6 0x24d9
+#define regDP3_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL7 0x24da
+#define regDP3_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP3_DP_DB_CNTL 0x24db
+#define regDP3_DP_DB_CNTL_BASE_IDX 2
+#define regDP3_DP_MSA_VBID_MISC 0x24dc
+#define regDP3_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP3_DP_SEC_METADATA_TRANSMISSION 0x24dd
+#define regDP3_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP3_DP_ALPM_CNTL 0x24df
+#define regDP3_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP8_CNTL 0x24e0
+#define regDP3_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP9_CNTL 0x24e1
+#define regDP3_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP10_CNTL 0x24e2
+#define regDP3_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP11_CNTL 0x24e3
+#define regDP3_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP_EN_DB_STATUS 0x24e4
+#define regDP3_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL1 0x24e5
+#define regDP3_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL2 0x24e6
+#define regDP3_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL3 0x24e7
+#define regDP3_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL4 0x24e8
+#define regDP3_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL5 0x24e9
+#define regDP3_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP3_DP_STREAM_SYMBOL_COUNT_STATUS 0x24ea
+#define regDP3_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP3_DP_STREAM_SYMBOL_COUNT_CONTROL 0x24eb
+#define regDP3_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS0 0x24ec
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS1 0x24ed
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_CONTROL 0x24ee
+#define regDP3_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_dispdec
+// base address: 0xdb0
+#define regDIG3_DIG_FE_CNTL 0x23ff
+#define regDIG3_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG3_DIG_FE_CLK_CNTL 0x2400
+#define regDIG3_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG3_DIG_FE_EN_CNTL 0x2401
+#define regDIG3_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG3_DIG_OUTPUT_CRC_CNTL 0x2402
+#define regDIG3_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG3_DIG_OUTPUT_CRC_RESULT 0x2403
+#define regDIG3_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG3_DIG_CLOCK_PATTERN 0x2404
+#define regDIG3_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG3_DIG_TEST_PATTERN 0x2405
+#define regDIG3_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG3_DIG_RANDOM_PATTERN_SEED 0x2406
+#define regDIG3_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG3_DIG_FIFO_CTRL0 0x2407
+#define regDIG3_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG3_DIG_FIFO_CTRL1 0x2408
+#define regDIG3_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG3_HDMI_METADATA_PACKET_CONTROL 0x2409
+#define regDIG3_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_CONTROL 0x240a
+#define regDIG3_HDMI_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_STATUS 0x240b
+#define regDIG3_HDMI_STATUS_BASE_IDX 2
+#define regDIG3_HDMI_AUDIO_PACKET_CONTROL 0x240c
+#define regDIG3_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_ACR_PACKET_CONTROL 0x240d
+#define regDIG3_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_VBI_PACKET_CONTROL 0x240e
+#define regDIG3_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_INFOFRAME_CONTROL0 0x240f
+#define regDIG3_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG3_HDMI_INFOFRAME_CONTROL1 0x2410
+#define regDIG3_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL0 0x2411
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL6 0x2412
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL5 0x2413
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG3_HDMI_GC 0x2414
+#define regDIG3_HDMI_GC_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL1 0x2415
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL2 0x2416
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL3 0x2417
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL4 0x2418
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL7 0x2419
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL8 0x241a
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL9 0x241b
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL10 0x241c
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG3_HDMI_DB_CONTROL 0x241d
+#define regDIG3_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_ACR_32_0 0x241e
+#define regDIG3_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_32_1 0x241f
+#define regDIG3_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_44_0 0x2420
+#define regDIG3_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_44_1 0x2421
+#define regDIG3_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_48_0 0x2422
+#define regDIG3_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_48_1 0x2423
+#define regDIG3_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_STATUS_0 0x2424
+#define regDIG3_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_STATUS_1 0x2425
+#define regDIG3_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG3_AFMT_CNTL 0x2426
+#define regDIG3_AFMT_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_CLK_CNTL 0x2427
+#define regDIG3_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_CNTL 0x2428
+#define regDIG3_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_EN_CNTL 0x2429
+#define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CNTL 0x2450
+#define regDIG3_TMDS_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CONTROL_CHAR 0x2451
+#define regDIG3_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG3_TMDS_CONTROL0_FEEDBACK 0x2452
+#define regDIG3_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG3_TMDS_STEREOSYNC_CTL_SEL 0x2453
+#define regDIG3_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_0_1 0x2454
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_2_3 0x2455
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG3_TMDS_CTL_BITS 0x2457
+#define regDIG3_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG3_TMDS_DCBALANCER_CONTROL 0x2458
+#define regDIG3_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_DCBALANCE_CHAR 0x2459
+#define regDIG3_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG3_TMDS_CTL0_1_GEN_CNTL 0x245a
+#define regDIG3_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CTL2_3_GEN_CNTL 0x245b
+#define regDIG3_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG3_DIG_VERSION 0x245d
+#define regDIG3_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp4_dispdec
+// base address: 0x1240
+#define regDP4_DP_LINK_CNTL 0x25ae
+#define regDP4_DP_LINK_CNTL_BASE_IDX 2
+#define regDP4_DP_PIXEL_FORMAT 0x25af
+#define regDP4_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP4_DP_MSA_COLORIMETRY 0x25b0
+#define regDP4_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP4_DP_CONFIG 0x25b1
+#define regDP4_DP_CONFIG_BASE_IDX 2
+#define regDP4_DP_VID_STREAM_CNTL 0x25b2
+#define regDP4_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP4_DP_STEER_FIFO 0x25b3
+#define regDP4_DP_STEER_FIFO_BASE_IDX 2
+#define regDP4_DP_MSA_MISC 0x25b4
+#define regDP4_DP_MSA_MISC_BASE_IDX 2
+#define regDP4_DP_DPHY_INTERNAL_CTRL 0x25b5
+#define regDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP4_DP_VID_TIMING 0x25b6
+#define regDP4_DP_VID_TIMING_BASE_IDX 2
+#define regDP4_DP_VID_N 0x25b7
+#define regDP4_DP_VID_N_BASE_IDX 2
+#define regDP4_DP_VID_M 0x25b8
+#define regDP4_DP_VID_M_BASE_IDX 2
+#define regDP4_DP_LINK_FRAMING_CNTL 0x25b9
+#define regDP4_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP4_DP_HBR2_EYE_PATTERN 0x25ba
+#define regDP4_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP4_DP_VID_MSA_VBID 0x25bb
+#define regDP4_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP4_DP_VID_INTERRUPT_CNTL 0x25bc
+#define regDP4_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_CNTL 0x25bd
+#define regDP4_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_TRAINING_PATTERN_SEL 0x25be
+#define regDP4_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM0 0x25bf
+#define regDP4_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM1 0x25c0
+#define regDP4_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM2 0x25c1
+#define regDP4_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP4_DP_DPHY_8B10B_CNTL 0x25c2
+#define regDP4_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_PRBS_CNTL 0x25c3
+#define regDP4_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_SCRAM_CNTL 0x25c4
+#define regDP4_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_CONTROL0 0x25c5
+#define regDP4_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_CONTROL1 0x25c6
+#define regDP4_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT0 0x25c7
+#define regDP4_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT1 0x25c8
+#define regDP4_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_STATUS 0x25c9
+#define regDP4_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP4_DP_DPHY_FAST_TRAINING 0x25ca
+#define regDP4_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP4_DP_DPHY_FAST_TRAINING_STATUS 0x25cb
+#define regDP4_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT2 0x25cc
+#define regDP4_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT3 0x25cd
+#define regDP4_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL 0x25d1
+#define regDP4_DP_SEC_CNTL_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL1 0x25d2
+#define regDP4_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING1 0x25d3
+#define regDP4_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING2 0x25d4
+#define regDP4_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING3 0x25d5
+#define regDP4_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING4 0x25d6
+#define regDP4_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_N 0x25d7
+#define regDP4_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_N_READBACK 0x25d8
+#define regDP4_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_M 0x25d9
+#define regDP4_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_M_READBACK 0x25da
+#define regDP4_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP4_DP_SEC_TIMESTAMP 0x25db
+#define regDP4_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP4_DP_SEC_PACKET_CNTL 0x25dc
+#define regDP4_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP4_DP_MSE_RATE_CNTL 0x25dd
+#define regDP4_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP4_DP_MSE_RATE_UPDATE 0x25df
+#define regDP4_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP4_DP_MSE_SAT0 0x25e0
+#define regDP4_DP_MSE_SAT0_BASE_IDX 2
+#define regDP4_DP_MSE_SAT1 0x25e1
+#define regDP4_DP_MSE_SAT1_BASE_IDX 2
+#define regDP4_DP_MSE_SAT2 0x25e2
+#define regDP4_DP_MSE_SAT2_BASE_IDX 2
+#define regDP4_DP_MSE_SAT_UPDATE 0x25e3
+#define regDP4_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP4_DP_MSE_LINK_TIMING 0x25e4
+#define regDP4_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP4_DP_MSE_MISC_CNTL 0x25e5
+#define regDP4_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x25ea
+#define regDP4_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_HBR2_PATTERN_CONTROL 0x25eb
+#define regDP4_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP4_DP_MSE_SAT0_STATUS 0x25ed
+#define regDP4_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP4_DP_MSE_SAT1_STATUS 0x25ee
+#define regDP4_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP4_DP_MSE_SAT2_STATUS 0x25ef
+#define regDP4_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP4_DP_DPIA_SPARE 0x25f0
+#define regDP4_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM1 0x25f2
+#define regDP4_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM2 0x25f3
+#define regDP4_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM3 0x25f4
+#define regDP4_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM4 0x25f5
+#define regDP4_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP4_DP_MSO_CNTL 0x25f6
+#define regDP4_DP_MSO_CNTL_BASE_IDX 2
+#define regDP4_DP_MSO_CNTL1 0x25f7
+#define regDP4_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP4_DP_DSC_CNTL 0x25f8
+#define regDP4_DP_DSC_CNTL_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL2 0x25f9
+#define regDP4_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL3 0x25fa
+#define regDP4_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL4 0x25fb
+#define regDP4_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL5 0x25fc
+#define regDP4_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL6 0x25fd
+#define regDP4_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL7 0x25fe
+#define regDP4_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP4_DP_DB_CNTL 0x25ff
+#define regDP4_DP_DB_CNTL_BASE_IDX 2
+#define regDP4_DP_MSA_VBID_MISC 0x2600
+#define regDP4_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP4_DP_SEC_METADATA_TRANSMISSION 0x2601
+#define regDP4_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP4_DP_ALPM_CNTL 0x2603
+#define regDP4_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP8_CNTL 0x2604
+#define regDP4_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP9_CNTL 0x2605
+#define regDP4_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP10_CNTL 0x2606
+#define regDP4_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP11_CNTL 0x2607
+#define regDP4_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP_EN_DB_STATUS 0x2608
+#define regDP4_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL1 0x2609
+#define regDP4_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL2 0x260a
+#define regDP4_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL3 0x260b
+#define regDP4_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL4 0x260c
+#define regDP4_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL5 0x260d
+#define regDP4_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP4_DP_STREAM_SYMBOL_COUNT_STATUS 0x260e
+#define regDP4_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP4_DP_STREAM_SYMBOL_COUNT_CONTROL 0x260f
+#define regDP4_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS0 0x2610
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS1 0x2611
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_CONTROL 0x2612
+#define regDP4_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_dispdec
+// base address: 0x1240
+#define regDIG4_DIG_FE_CNTL 0x2523
+#define regDIG4_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG4_DIG_FE_CLK_CNTL 0x2524
+#define regDIG4_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG4_DIG_FE_EN_CNTL 0x2525
+#define regDIG4_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG4_DIG_OUTPUT_CRC_CNTL 0x2526
+#define regDIG4_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG4_DIG_OUTPUT_CRC_RESULT 0x2527
+#define regDIG4_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG4_DIG_CLOCK_PATTERN 0x2528
+#define regDIG4_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG4_DIG_TEST_PATTERN 0x2529
+#define regDIG4_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG4_DIG_RANDOM_PATTERN_SEED 0x252a
+#define regDIG4_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG4_DIG_FIFO_CTRL0 0x252b
+#define regDIG4_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG4_DIG_FIFO_CTRL1 0x252c
+#define regDIG4_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG4_HDMI_METADATA_PACKET_CONTROL 0x252d
+#define regDIG4_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_CONTROL 0x252e
+#define regDIG4_HDMI_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_STATUS 0x252f
+#define regDIG4_HDMI_STATUS_BASE_IDX 2
+#define regDIG4_HDMI_AUDIO_PACKET_CONTROL 0x2530
+#define regDIG4_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_ACR_PACKET_CONTROL 0x2531
+#define regDIG4_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_VBI_PACKET_CONTROL 0x2532
+#define regDIG4_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_INFOFRAME_CONTROL0 0x2533
+#define regDIG4_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG4_HDMI_INFOFRAME_CONTROL1 0x2534
+#define regDIG4_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL0 0x2535
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL6 0x2536
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL5 0x2537
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG4_HDMI_GC 0x2538
+#define regDIG4_HDMI_GC_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL1 0x2539
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL2 0x253a
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL3 0x253b
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL4 0x253c
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL7 0x253d
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL8 0x253e
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL9 0x253f
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL10 0x2540
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG4_HDMI_DB_CONTROL 0x2541
+#define regDIG4_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_ACR_32_0 0x2542
+#define regDIG4_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_32_1 0x2543
+#define regDIG4_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_44_0 0x2544
+#define regDIG4_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_44_1 0x2545
+#define regDIG4_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_48_0 0x2546
+#define regDIG4_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_48_1 0x2547
+#define regDIG4_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_STATUS_0 0x2548
+#define regDIG4_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_STATUS_1 0x2549
+#define regDIG4_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG4_AFMT_CNTL 0x254a
+#define regDIG4_AFMT_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_CLK_CNTL 0x254b
+#define regDIG4_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_CNTL 0x254c
+#define regDIG4_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_EN_CNTL 0x254d
+#define regDIG4_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CNTL 0x2574
+#define regDIG4_TMDS_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CONTROL_CHAR 0x2575
+#define regDIG4_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG4_TMDS_CONTROL0_FEEDBACK 0x2576
+#define regDIG4_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG4_TMDS_STEREOSYNC_CTL_SEL 0x2577
+#define regDIG4_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_0_1 0x2578
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_2_3 0x2579
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG4_TMDS_CTL_BITS 0x257b
+#define regDIG4_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG4_TMDS_DCBALANCER_CONTROL 0x257c
+#define regDIG4_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_DCBALANCE_CHAR 0x257d
+#define regDIG4_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG4_TMDS_CTL0_1_GEN_CNTL 0x257e
+#define regDIG4_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CTL2_3_GEN_CNTL 0x257f
+#define regDIG4_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG4_DIG_VERSION 0x2581
+#define regDIG4_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_afmt_afmt_dispdec
+// base address: 0x154cc
+#define regAFMT0_AFMT_ACP 0x2073
+#define regAFMT0_AFMT_ACP_BASE_IDX 2
+#define regAFMT0_AFMT_VBI_PACKET_CONTROL 0x2074
+#define regAFMT0_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL2 0x2075
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_INFO0 0x2076
+#define regAFMT0_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_INFO1 0x2077
+#define regAFMT0_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT0_AFMT_60958_0 0x2078
+#define regAFMT0_AFMT_60958_0_BASE_IDX 2
+#define regAFMT0_AFMT_60958_1 0x2079
+#define regAFMT0_AFMT_60958_1_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_CRC_CONTROL 0x207a
+#define regAFMT0_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL0 0x207b
+#define regAFMT0_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL1 0x207c
+#define regAFMT0_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL2 0x207d
+#define regAFMT0_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL3 0x207e
+#define regAFMT0_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT0_AFMT_60958_2 0x207f
+#define regAFMT0_AFMT_60958_2_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_CRC_RESULT 0x2080
+#define regAFMT0_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT0_AFMT_STATUS 0x2081
+#define regAFMT0_AFMT_STATUS_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL 0x2082
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_INFOFRAME_CONTROL0 0x2083
+#define regAFMT0_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT0_AFMT_INTERRUPT_STATUS 0x2084
+#define regAFMT0_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_SRC_CONTROL 0x2085
+#define regAFMT0_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_MEM_PWR 0x2087
+#define regAFMT0_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_afmt_afmt_dispdec
+// base address: 0x1595c
+#define regAFMT1_AFMT_ACP 0x2197
+#define regAFMT1_AFMT_ACP_BASE_IDX 2
+#define regAFMT1_AFMT_VBI_PACKET_CONTROL 0x2198
+#define regAFMT1_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL2 0x2199
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_INFO0 0x219a
+#define regAFMT1_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_INFO1 0x219b
+#define regAFMT1_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT1_AFMT_60958_0 0x219c
+#define regAFMT1_AFMT_60958_0_BASE_IDX 2
+#define regAFMT1_AFMT_60958_1 0x219d
+#define regAFMT1_AFMT_60958_1_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_CRC_CONTROL 0x219e
+#define regAFMT1_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL0 0x219f
+#define regAFMT1_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL1 0x21a0
+#define regAFMT1_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL2 0x21a1
+#define regAFMT1_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL3 0x21a2
+#define regAFMT1_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT1_AFMT_60958_2 0x21a3
+#define regAFMT1_AFMT_60958_2_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_CRC_RESULT 0x21a4
+#define regAFMT1_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT1_AFMT_STATUS 0x21a5
+#define regAFMT1_AFMT_STATUS_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL 0x21a6
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_INFOFRAME_CONTROL0 0x21a7
+#define regAFMT1_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT1_AFMT_INTERRUPT_STATUS 0x21a8
+#define regAFMT1_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_SRC_CONTROL 0x21a9
+#define regAFMT1_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_MEM_PWR 0x21ab
+#define regAFMT1_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_afmt_afmt_dispdec
+// base address: 0x15dec
+#define regAFMT2_AFMT_ACP 0x22bb
+#define regAFMT2_AFMT_ACP_BASE_IDX 2
+#define regAFMT2_AFMT_VBI_PACKET_CONTROL 0x22bc
+#define regAFMT2_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL2 0x22bd
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_INFO0 0x22be
+#define regAFMT2_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_INFO1 0x22bf
+#define regAFMT2_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT2_AFMT_60958_0 0x22c0
+#define regAFMT2_AFMT_60958_0_BASE_IDX 2
+#define regAFMT2_AFMT_60958_1 0x22c1
+#define regAFMT2_AFMT_60958_1_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_CRC_CONTROL 0x22c2
+#define regAFMT2_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL0 0x22c3
+#define regAFMT2_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL1 0x22c4
+#define regAFMT2_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL2 0x22c5
+#define regAFMT2_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL3 0x22c6
+#define regAFMT2_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT2_AFMT_60958_2 0x22c7
+#define regAFMT2_AFMT_60958_2_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_CRC_RESULT 0x22c8
+#define regAFMT2_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT2_AFMT_STATUS 0x22c9
+#define regAFMT2_AFMT_STATUS_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL 0x22ca
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_INFOFRAME_CONTROL0 0x22cb
+#define regAFMT2_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT2_AFMT_INTERRUPT_STATUS 0x22cc
+#define regAFMT2_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_SRC_CONTROL 0x22cd
+#define regAFMT2_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_MEM_PWR 0x22cf
+#define regAFMT2_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_afmt_afmt_dispdec
+// base address: 0x1627c
+#define regAFMT3_AFMT_ACP 0x23df
+#define regAFMT3_AFMT_ACP_BASE_IDX 2
+#define regAFMT3_AFMT_VBI_PACKET_CONTROL 0x23e0
+#define regAFMT3_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL2 0x23e1
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_INFO0 0x23e2
+#define regAFMT3_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_INFO1 0x23e3
+#define regAFMT3_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT3_AFMT_60958_0 0x23e4
+#define regAFMT3_AFMT_60958_0_BASE_IDX 2
+#define regAFMT3_AFMT_60958_1 0x23e5
+#define regAFMT3_AFMT_60958_1_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_CRC_CONTROL 0x23e6
+#define regAFMT3_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL0 0x23e7
+#define regAFMT3_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL1 0x23e8
+#define regAFMT3_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL2 0x23e9
+#define regAFMT3_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL3 0x23ea
+#define regAFMT3_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT3_AFMT_60958_2 0x23eb
+#define regAFMT3_AFMT_60958_2_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_CRC_RESULT 0x23ec
+#define regAFMT3_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT3_AFMT_STATUS 0x23ed
+#define regAFMT3_AFMT_STATUS_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL 0x23ee
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_INFOFRAME_CONTROL0 0x23ef
+#define regAFMT3_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT3_AFMT_INTERRUPT_STATUS 0x23f0
+#define regAFMT3_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_SRC_CONTROL 0x23f1
+#define regAFMT3_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_MEM_PWR 0x23f3
+#define regAFMT3_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_afmt_afmt_dispdec
+// base address: 0x1670c
+#define regAFMT4_AFMT_ACP 0x2503
+#define regAFMT4_AFMT_ACP_BASE_IDX 2
+#define regAFMT4_AFMT_VBI_PACKET_CONTROL 0x2504
+#define regAFMT4_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL2 0x2505
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_INFO0 0x2506
+#define regAFMT4_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_INFO1 0x2507
+#define regAFMT4_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT4_AFMT_60958_0 0x2508
+#define regAFMT4_AFMT_60958_0_BASE_IDX 2
+#define regAFMT4_AFMT_60958_1 0x2509
+#define regAFMT4_AFMT_60958_1_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_CRC_CONTROL 0x250a
+#define regAFMT4_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL0 0x250b
+#define regAFMT4_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL1 0x250c
+#define regAFMT4_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL2 0x250d
+#define regAFMT4_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL3 0x250e
+#define regAFMT4_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT4_AFMT_60958_2 0x250f
+#define regAFMT4_AFMT_60958_2_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_CRC_RESULT 0x2510
+#define regAFMT4_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT4_AFMT_STATUS 0x2511
+#define regAFMT4_AFMT_STATUS_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL 0x2512
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_INFOFRAME_CONTROL0 0x2513
+#define regAFMT4_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT4_AFMT_INTERRUPT_STATUS 0x2514
+#define regAFMT4_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_SRC_CONTROL 0x2515
+#define regAFMT4_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_MEM_PWR 0x2517
+#define regAFMT4_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_dme_dme_dispdec
+// base address: 0x15544
+#define regDME0_DME_CONTROL 0x2091
+#define regDME0_DME_CONTROL_BASE_IDX 2
+#define regDME0_DME_MEMORY_CONTROL 0x2092
+#define regDME0_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_vpg_vpg_dispdec
+// base address: 0x154a0
+#define regVPG0_VPG_GENERIC_PACKET_ACCESS_CTRL 0x2068
+#define regVPG0_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GENERIC_PACKET_DATA 0x2069
+#define regVPG0_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG0_VPG_GSP_FRAME_UPDATE_CTRL 0x206a
+#define regVPG0_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x206b
+#define regVPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GENERIC_STATUS 0x206c
+#define regVPG0_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG0_VPG_MEM_PWR 0x206d
+#define regVPG0_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG0_VPG_ISRC1_2_ACCESS_CTRL 0x206e
+#define regVPG0_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG0_VPG_ISRC1_2_DATA 0x206f
+#define regVPG0_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG0_VPG_MPEG_INFO0 0x2070
+#define regVPG0_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG0_VPG_MPEG_INFO1 0x2071
+#define regVPG0_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dme_dme_dispdec
+// base address: 0x159d4
+#define regDME1_DME_CONTROL 0x21b5
+#define regDME1_DME_CONTROL_BASE_IDX 2
+#define regDME1_DME_MEMORY_CONTROL 0x21b6
+#define regDME1_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_vpg_vpg_dispdec
+// base address: 0x15930
+#define regVPG1_VPG_GENERIC_PACKET_ACCESS_CTRL 0x218c
+#define regVPG1_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GENERIC_PACKET_DATA 0x218d
+#define regVPG1_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG1_VPG_GSP_FRAME_UPDATE_CTRL 0x218e
+#define regVPG1_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x218f
+#define regVPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GENERIC_STATUS 0x2190
+#define regVPG1_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG1_VPG_MEM_PWR 0x2191
+#define regVPG1_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG1_VPG_ISRC1_2_ACCESS_CTRL 0x2192
+#define regVPG1_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG1_VPG_ISRC1_2_DATA 0x2193
+#define regVPG1_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG1_VPG_MPEG_INFO0 0x2194
+#define regVPG1_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG1_VPG_MPEG_INFO1 0x2195
+#define regVPG1_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_dme_dme_dispdec
+// base address: 0x15e64
+#define regDME2_DME_CONTROL 0x22d9
+#define regDME2_DME_CONTROL_BASE_IDX 2
+#define regDME2_DME_MEMORY_CONTROL 0x22da
+#define regDME2_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_vpg_vpg_dispdec
+// base address: 0x15dc0
+#define regVPG2_VPG_GENERIC_PACKET_ACCESS_CTRL 0x22b0
+#define regVPG2_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GENERIC_PACKET_DATA 0x22b1
+#define regVPG2_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG2_VPG_GSP_FRAME_UPDATE_CTRL 0x22b2
+#define regVPG2_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x22b3
+#define regVPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GENERIC_STATUS 0x22b4
+#define regVPG2_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG2_VPG_MEM_PWR 0x22b5
+#define regVPG2_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG2_VPG_ISRC1_2_ACCESS_CTRL 0x22b6
+#define regVPG2_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG2_VPG_ISRC1_2_DATA 0x22b7
+#define regVPG2_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG2_VPG_MPEG_INFO0 0x22b8
+#define regVPG2_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG2_VPG_MPEG_INFO1 0x22b9
+#define regVPG2_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_dme_dme_dispdec
+// base address: 0x162f4
+#define regDME3_DME_CONTROL 0x23fd
+#define regDME3_DME_CONTROL_BASE_IDX 2
+#define regDME3_DME_MEMORY_CONTROL 0x23fe
+#define regDME3_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_vpg_vpg_dispdec
+// base address: 0x16250
+#define regVPG3_VPG_GENERIC_PACKET_ACCESS_CTRL 0x23d4
+#define regVPG3_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GENERIC_PACKET_DATA 0x23d5
+#define regVPG3_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG3_VPG_GSP_FRAME_UPDATE_CTRL 0x23d6
+#define regVPG3_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x23d7
+#define regVPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GENERIC_STATUS 0x23d8
+#define regVPG3_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG3_VPG_MEM_PWR 0x23d9
+#define regVPG3_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG3_VPG_ISRC1_2_ACCESS_CTRL 0x23da
+#define regVPG3_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG3_VPG_ISRC1_2_DATA 0x23db
+#define regVPG3_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG3_VPG_MPEG_INFO0 0x23dc
+#define regVPG3_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG3_VPG_MPEG_INFO1 0x23dd
+#define regVPG3_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_dme_dme_dispdec
+// base address: 0x16784
+#define regDME4_DME_CONTROL 0x2521
+#define regDME4_DME_CONTROL_BASE_IDX 2
+#define regDME4_DME_MEMORY_CONTROL 0x2522
+#define regDME4_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_vpg_vpg_dispdec
+// base address: 0x166e0
+#define regVPG4_VPG_GENERIC_PACKET_ACCESS_CTRL 0x24f8
+#define regVPG4_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GENERIC_PACKET_DATA 0x24f9
+#define regVPG4_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG4_VPG_GSP_FRAME_UPDATE_CTRL 0x24fa
+#define regVPG4_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x24fb
+#define regVPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GENERIC_STATUS 0x24fc
+#define regVPG4_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG4_VPG_MEM_PWR 0x24fd
+#define regVPG4_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG4_VPG_ISRC1_2_ACCESS_CTRL 0x24fe
+#define regVPG4_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG4_VPG_ISRC1_2_DATA 0x24ff
+#define regVPG4_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG4_VPG_MPEG_INFO0 0x2500
+#define regVPG4_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG4_VPG_MPEG_INFO1 0x2501
+#define regVPG4_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+// base address: 0x0
+#define regDP_AUX0_AUX_CONTROL 0x1f50
+#define regDP_AUX0_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_CONTROL 0x1f51
+#define regDP_AUX0_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_ARB_CONTROL 0x1f52
+#define regDP_AUX0_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_INTERRUPT_CONTROL 0x1f53
+#define regDP_AUX0_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_STATUS 0x1f54
+#define regDP_AUX0_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_LS_STATUS 0x1f55
+#define regDP_AUX0_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_DATA 0x1f56
+#define regDP_AUX0_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX0_AUX_LS_DATA 0x1f57
+#define regDP_AUX0_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1f58
+#define regDP_AUX0_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_CONTROL 0x1f59
+#define regDP_AUX0_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL0 0x1f5a
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL1 0x1f5b
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_STATUS 0x1f5c
+#define regDP_AUX0_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_STATUS 0x1f5d
+#define regDP_AUX0_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROL 0x1f5e
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL 0x1f5f
+#define regDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f60
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_STATUS 0x1f61
+#define regDP_AUX0_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_PHY_WAKE_CNTL 0x1f66
+#define regDP_AUX0_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+// base address: 0x70
+#define regDP_AUX1_AUX_CONTROL 0x1f6c
+#define regDP_AUX1_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_CONTROL 0x1f6d
+#define regDP_AUX1_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_ARB_CONTROL 0x1f6e
+#define regDP_AUX1_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_INTERRUPT_CONTROL 0x1f6f
+#define regDP_AUX1_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_STATUS 0x1f70
+#define regDP_AUX1_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_LS_STATUS 0x1f71
+#define regDP_AUX1_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_DATA 0x1f72
+#define regDP_AUX1_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX1_AUX_LS_DATA 0x1f73
+#define regDP_AUX1_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x1f74
+#define regDP_AUX1_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_CONTROL 0x1f75
+#define regDP_AUX1_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL0 0x1f76
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL1 0x1f77
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_STATUS 0x1f78
+#define regDP_AUX1_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_STATUS 0x1f79
+#define regDP_AUX1_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROL 0x1f7a
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL 0x1f7b
+#define regDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f7c
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_STATUS 0x1f7d
+#define regDP_AUX1_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_PHY_WAKE_CNTL 0x1f82
+#define regDP_AUX1_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux2_dispdec
+// base address: 0xe0
+#define regDP_AUX2_AUX_CONTROL 0x1f88
+#define regDP_AUX2_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_CONTROL 0x1f89
+#define regDP_AUX2_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_ARB_CONTROL 0x1f8a
+#define regDP_AUX2_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_INTERRUPT_CONTROL 0x1f8b
+#define regDP_AUX2_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_STATUS 0x1f8c
+#define regDP_AUX2_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_LS_STATUS 0x1f8d
+#define regDP_AUX2_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_DATA 0x1f8e
+#define regDP_AUX2_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX2_AUX_LS_DATA 0x1f8f
+#define regDP_AUX2_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_REF_CONTROL 0x1f90
+#define regDP_AUX2_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_CONTROL 0x1f91
+#define regDP_AUX2_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL0 0x1f92
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL1 0x1f93
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_STATUS 0x1f94
+#define regDP_AUX2_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_STATUS 0x1f95
+#define regDP_AUX2_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROL 0x1f96
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL 0x1f97
+#define regDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f98
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_STATUS 0x1f99
+#define regDP_AUX2_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_PHY_WAKE_CNTL 0x1f9e
+#define regDP_AUX2_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux3_dispdec
+// base address: 0x150
+#define regDP_AUX3_AUX_CONTROL 0x1fa4
+#define regDP_AUX3_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_CONTROL 0x1fa5
+#define regDP_AUX3_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_ARB_CONTROL 0x1fa6
+#define regDP_AUX3_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_INTERRUPT_CONTROL 0x1fa7
+#define regDP_AUX3_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_STATUS 0x1fa8
+#define regDP_AUX3_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_LS_STATUS 0x1fa9
+#define regDP_AUX3_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_DATA 0x1faa
+#define regDP_AUX3_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX3_AUX_LS_DATA 0x1fab
+#define regDP_AUX3_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_REF_CONTROL 0x1fac
+#define regDP_AUX3_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_CONTROL 0x1fad
+#define regDP_AUX3_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL0 0x1fae
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL1 0x1faf
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_STATUS 0x1fb0
+#define regDP_AUX3_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_STATUS 0x1fb1
+#define regDP_AUX3_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROL 0x1fb2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL 0x1fb3
+#define regDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1fb4
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_STATUS 0x1fb5
+#define regDP_AUX3_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_PHY_WAKE_CNTL 0x1fba
+#define regDP_AUX3_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux4_dispdec
+// base address: 0x1c0
+#define regDP_AUX4_AUX_CONTROL 0x1fc0
+#define regDP_AUX4_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_CONTROL 0x1fc1
+#define regDP_AUX4_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_ARB_CONTROL 0x1fc2
+#define regDP_AUX4_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_INTERRUPT_CONTROL 0x1fc3
+#define regDP_AUX4_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_STATUS 0x1fc4
+#define regDP_AUX4_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_LS_STATUS 0x1fc5
+#define regDP_AUX4_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_DATA 0x1fc6
+#define regDP_AUX4_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX4_AUX_LS_DATA 0x1fc7
+#define regDP_AUX4_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_REF_CONTROL 0x1fc8
+#define regDP_AUX4_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_CONTROL 0x1fc9
+#define regDP_AUX4_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL0 0x1fca
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL1 0x1fcb
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_STATUS 0x1fcc
+#define regDP_AUX4_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_STATUS 0x1fcd
+#define regDP_AUX4_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROL 0x1fce
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL 0x1fcf
+#define regDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1fd0
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_STATUS 0x1fd1
+#define regDP_AUX4_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_PHY_WAKE_CNTL 0x1fd6
+#define regDP_AUX4_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux0_dispdec
+// base address: 0x14de0
+#define regDIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL 0x1eb8
+#define regDIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux1_dispdec
+// base address: 0x14de4
+#define regDIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL 0x1eb9
+#define regDIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux2_dispdec
+// base address: 0x14de8
+#define regDIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL 0x1eba
+#define regDIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux3_dispdec
+// base address: 0x14dec
+#define regDIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL 0x1ebb
+#define regDIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+// base address: 0x0
+#define regDC_I2C_CONTROL 0x1e98
+#define regDC_I2C_CONTROL_BASE_IDX 2
+#define regDC_I2C_ARBITRATION 0x1e99
+#define regDC_I2C_ARBITRATION_BASE_IDX 2
+#define regDC_I2C_INTERRUPT_CONTROL 0x1e9a
+#define regDC_I2C_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDC_I2C_SW_STATUS 0x1e9b
+#define regDC_I2C_SW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC1_HW_STATUS 0x1e9c
+#define regDC_I2C_DDC1_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC2_HW_STATUS 0x1e9d
+#define regDC_I2C_DDC2_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC3_HW_STATUS 0x1e9e
+#define regDC_I2C_DDC3_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC4_HW_STATUS 0x1e9f
+#define regDC_I2C_DDC4_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC5_HW_STATUS 0x1ea0
+#define regDC_I2C_DDC5_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC1_SPEED 0x1ea2
+#define regDC_I2C_DDC1_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC1_SETUP 0x1ea3
+#define regDC_I2C_DDC1_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC2_SPEED 0x1ea4
+#define regDC_I2C_DDC2_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC2_SETUP 0x1ea5
+#define regDC_I2C_DDC2_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC3_SPEED 0x1ea6
+#define regDC_I2C_DDC3_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC3_SETUP 0x1ea7
+#define regDC_I2C_DDC3_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC4_SPEED 0x1ea8
+#define regDC_I2C_DDC4_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC4_SETUP 0x1ea9
+#define regDC_I2C_DDC4_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC5_SPEED 0x1eaa
+#define regDC_I2C_DDC5_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC5_SETUP 0x1eab
+#define regDC_I2C_DDC5_SETUP_BASE_IDX 2
+#define regDC_I2C_TRANSACTION0 0x1eae
+#define regDC_I2C_TRANSACTION0_BASE_IDX 2
+#define regDC_I2C_TRANSACTION1 0x1eaf
+#define regDC_I2C_TRANSACTION1_BASE_IDX 2
+#define regDC_I2C_TRANSACTION2 0x1eb0
+#define regDC_I2C_TRANSACTION2_BASE_IDX 2
+#define regDC_I2C_TRANSACTION3 0x1eb1
+#define regDC_I2C_TRANSACTION3_BASE_IDX 2
+#define regDC_I2C_DATA 0x1eb2
+#define regDC_I2C_DATA_BASE_IDX 2
+#define regDC_I2C_EDID_DETECT_CTRL 0x1eb6
+#define regDC_I2C_EDID_DETECT_CTRL_BASE_IDX 2
+#define regDC_I2C_READ_REQUEST_INTERRUPT 0x1eb7
+#define regDC_I2C_READ_REQUEST_INTERRUPT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_misc_dispdec
+// base address: 0x0
+#define regDIO_DCN_STATUS 0x1ec3
+#define regDIO_DCN_STATUS_BASE_IDX 2
+#define regDIO_SCRATCH0 0x1eca
+#define regDIO_SCRATCH0_BASE_IDX 2
+#define regDIO_SCRATCH1 0x1ecb
+#define regDIO_SCRATCH1_BASE_IDX 2
+#define regDIO_SCRATCH2 0x1ecc
+#define regDIO_SCRATCH2_BASE_IDX 2
+#define regDIO_SCRATCH3 0x1ecd
+#define regDIO_SCRATCH3_BASE_IDX 2
+#define regDIO_SCRATCH4 0x1ece
+#define regDIO_SCRATCH4_BASE_IDX 2
+#define regDIO_SCRATCH5 0x1ecf
+#define regDIO_SCRATCH5_BASE_IDX 2
+#define regDIO_SCRATCH6 0x1ed0
+#define regDIO_SCRATCH6_BASE_IDX 2
+#define regDIO_SCRATCH7 0x1ed1
+#define regDIO_SCRATCH7_BASE_IDX 2
+#define regDIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS 0x1ed3
+#define regDIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDIO_MEM_PWR_STATUS 0x1edd
+#define regDIO_MEM_PWR_STATUS_BASE_IDX 2
+#define regDIO_MEM_PWR_CTRL 0x1ede
+#define regDIO_MEM_PWR_CTRL_BASE_IDX 2
+#define regDIO_MEM_PWR_CTRL2 0x1edf
+#define regDIO_MEM_PWR_CTRL2_BASE_IDX 2
+#define regDIO_CLK_CNTL 0x1ee0
+#define regDIO_CLK_CNTL_BASE_IDX 2
+#define regDIO_POWER_MANAGEMENT_CNTL 0x1ee4
+#define regDIO_POWER_MANAGEMENT_CNTL_BASE_IDX 2
+#define regDIO_HDMI_RXSTATUS_TIMER_CONTROL 0x1eff
+#define regDIO_HDMI_RXSTATUS_TIMER_CONTROL_BASE_IDX 2
+#define regDIO_PSP_INTERRUPT_STATUS 0x1f00
+#define regDIO_PSP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDIO_PSP_INTERRUPT_CLEAR 0x1f01
+#define regDIO_PSP_INTERRUPT_CLEAR_BASE_IDX 2
+#define regDIO_STATUS 0x1f02
+#define regDIO_STATUS_BASE_IDX 2
+#define regDIO_LINKA_CNTL 0x1f04
+#define regDIO_LINKA_CNTL_BASE_IDX 2
+#define regDIO_LINKB_CNTL 0x1f05
+#define regDIO_LINKB_CNTL_BASE_IDX 2
+#define regDIO_LINKC_CNTL 0x1f06
+#define regDIO_LINKC_CNTL_BASE_IDX 2
+#define regDIO_LINKD_CNTL 0x1f07
+#define regDIO_LINKD_CNTL_BASE_IDX 2
+#define regDIO_LINKE_CNTL 0x1f08
+#define regDIO_LINKE_CNTL_BASE_IDX 2
+#define regDIO_LINKF_CNTL 0x1f09
+#define regDIO_LINKF_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig_stream_mapper_dispdec
+// base address: 0x0
+#define regDIG0_STREAM_MAPPER_CONTROL 0x1f0d
+#define regDIG0_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG1_STREAM_MAPPER_CONTROL 0x1f0e
+#define regDIG1_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG2_STREAM_MAPPER_CONTROL 0x1f0f
+#define regDIG2_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG3_STREAM_MAPPER_CONTROL 0x1f10
+#define regDIG3_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG4_STREAM_MAPPER_CONTROL 0x1f11
+#define regDIG4_STREAM_MAPPER_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dcperfmon_dc_perfmon_dispdec
+// base address: 0x7d10
+#define regDC_PERFMON18_PERFCOUNTER_CNTL 0x1f44
+#define regDC_PERFMON18_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON18_PERFCOUNTER_CNTL2 0x1f45
+#define regDC_PERFMON18_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON18_PERFCOUNTER_STATE 0x1f46
+#define regDC_PERFMON18_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CNTL 0x1f47
+#define regDC_PERFMON18_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CNTL2 0x1f48
+#define regDC_PERFMON18_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CVALUE_INT_MISC 0x1f49
+#define regDC_PERFMON18_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CVALUE_LOW 0x1f4a
+#define regDC_PERFMON18_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_HI 0x1f4b
+#define regDC_PERFMON18_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_LOW 0x1f4c
+#define regDC_PERFMON18_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+// base address: 0x0
+#define regDC_GENERICA 0x2868
+#define regDC_GENERICA_BASE_IDX 2
+#define regDC_GENERICB 0x2869
+#define regDC_GENERICB_BASE_IDX 2
+#define regDCIO_CLOCK_CNTL 0x286a
+#define regDCIO_CLOCK_CNTL_BASE_IDX 2
+#define regDC_REF_CLK_CNTL 0x286b
+#define regDC_REF_CLK_CNTL_BASE_IDX 2
+#define regUNIPHYA_LINK_CNTL 0x286d
+#define regUNIPHYA_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYA_CHANNEL_XBAR_CNTL 0x286e
+#define regUNIPHYA_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYB_LINK_CNTL 0x286f
+#define regUNIPHYB_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYB_CHANNEL_XBAR_CNTL 0x2870
+#define regUNIPHYB_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYC_LINK_CNTL 0x2871
+#define regUNIPHYC_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYC_CHANNEL_XBAR_CNTL 0x2872
+#define regUNIPHYC_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYD_CHANNEL_XBAR_CNTL 0x2874
+#define regUNIPHYD_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYE_CHANNEL_XBAR_CNTL 0x2876
+#define regUNIPHYE_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regDCIO_WRCMD_DELAY 0x287e
+#define regDCIO_WRCMD_DELAY_BASE_IDX 2
+#define regDC_PINSTRAPS 0x2880
+#define regDC_PINSTRAPS_BASE_IDX 2
+#define regDCIO_SPARE 0x2882
+#define regDCIO_SPARE_BASE_IDX 2
+#define regINTERCEPT_STATE 0x2884
+#define regINTERCEPT_STATE_BASE_IDX 2
+#define regDCIO_PATTERN_GEN_PAT 0x2886
+#define regDCIO_PATTERN_GEN_PAT_BASE_IDX 2
+#define regDCIO_PATTERN_GEN_EN 0x2887
+#define regDCIO_PATTERN_GEN_EN_BASE_IDX 2
+#define regDCIO_BL_PWM_FRAME_START_DISP_SEL 0x288b
+#define regDCIO_BL_PWM_FRAME_START_DISP_SEL_BASE_IDX 2
+#define regDCIO_GSL_GENLK_PAD_CNTL 0x288c
+#define regDCIO_GSL_GENLK_PAD_CNTL_BASE_IDX 2
+#define regDCIO_GSL_SWAPLOCK_PAD_CNTL 0x288d
+#define regDCIO_GSL_SWAPLOCK_PAD_CNTL_BASE_IDX 2
+#define regDCIO_SOFT_RESET 0x289e
+#define regDCIO_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+// base address: 0x0
+#define regDC_GPIO_GENERIC_MASK 0x28c8
+#define regDC_GPIO_GENERIC_MASK_BASE_IDX 2
+#define regDC_GPIO_GENERIC_A 0x28c9
+#define regDC_GPIO_GENERIC_A_BASE_IDX 2
+#define regDC_GPIO_GENERIC_EN 0x28ca
+#define regDC_GPIO_GENERIC_EN_BASE_IDX 2
+#define regDC_GPIO_GENERIC_Y 0x28cb
+#define regDC_GPIO_GENERIC_Y_BASE_IDX 2
+#define regDC_GPIO_DDC1_MASK 0x28d0
+#define regDC_GPIO_DDC1_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC1_A 0x28d1
+#define regDC_GPIO_DDC1_A_BASE_IDX 2
+#define regDC_GPIO_DDC1_EN 0x28d2
+#define regDC_GPIO_DDC1_EN_BASE_IDX 2
+#define regDC_GPIO_DDC1_Y 0x28d3
+#define regDC_GPIO_DDC1_Y_BASE_IDX 2
+#define regDC_GPIO_DDC2_MASK 0x28d4
+#define regDC_GPIO_DDC2_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC2_A 0x28d5
+#define regDC_GPIO_DDC2_A_BASE_IDX 2
+#define regDC_GPIO_DDC2_EN 0x28d6
+#define regDC_GPIO_DDC2_EN_BASE_IDX 2
+#define regDC_GPIO_DDC2_Y 0x28d7
+#define regDC_GPIO_DDC2_Y_BASE_IDX 2
+#define regDC_GPIO_DDC3_MASK 0x28d8
+#define regDC_GPIO_DDC3_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC3_A 0x28d9
+#define regDC_GPIO_DDC3_A_BASE_IDX 2
+#define regDC_GPIO_DDC3_EN 0x28da
+#define regDC_GPIO_DDC3_EN_BASE_IDX 2
+#define regDC_GPIO_DDC3_Y 0x28db
+#define regDC_GPIO_DDC3_Y_BASE_IDX 2
+#define regDC_GPIO_DDC4_MASK 0x28dc
+#define regDC_GPIO_DDC4_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC4_A 0x28dd
+#define regDC_GPIO_DDC4_A_BASE_IDX 2
+#define regDC_GPIO_DDC4_EN 0x28de
+#define regDC_GPIO_DDC4_EN_BASE_IDX 2
+#define regDC_GPIO_DDC4_Y 0x28df
+#define regDC_GPIO_DDC4_Y_BASE_IDX 2
+#define regDC_GPIO_DDC5_MASK 0x28e0
+#define regDC_GPIO_DDC5_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC5_A 0x28e1
+#define regDC_GPIO_DDC5_A_BASE_IDX 2
+#define regDC_GPIO_DDC5_EN 0x28e2
+#define regDC_GPIO_DDC5_EN_BASE_IDX 2
+#define regDC_GPIO_DDC5_Y 0x28e3
+#define regDC_GPIO_DDC5_Y_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_MASK 0x28e8
+#define regDC_GPIO_DDCVGA_MASK_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_A 0x28e9
+#define regDC_GPIO_DDCVGA_A_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_EN 0x28ea
+#define regDC_GPIO_DDCVGA_EN_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_Y 0x28eb
+#define regDC_GPIO_DDCVGA_Y_BASE_IDX 2
+#define regDC_GPIO_GENLK_MASK 0x28f0
+#define regDC_GPIO_GENLK_MASK_BASE_IDX 2
+#define regDC_GPIO_GENLK_A 0x28f1
+#define regDC_GPIO_GENLK_A_BASE_IDX 2
+#define regDC_GPIO_GENLK_EN 0x28f2
+#define regDC_GPIO_GENLK_EN_BASE_IDX 2
+#define regDC_GPIO_GENLK_Y 0x28f3
+#define regDC_GPIO_GENLK_Y_BASE_IDX 2
+#define regDC_GPIO_HPD_MASK 0x28f4
+#define regDC_GPIO_HPD_MASK_BASE_IDX 2
+#define regDC_GPIO_HPD_A 0x28f5
+#define regDC_GPIO_HPD_A_BASE_IDX 2
+#define regDC_GPIO_HPD_EN 0x28f6
+#define regDC_GPIO_HPD_EN_BASE_IDX 2
+#define regDC_GPIO_HPD_Y 0x28f7
+#define regDC_GPIO_HPD_Y_BASE_IDX 2
+#define regDC_GPIO_DRIVE_STRENGTH_S0 0x28f8
+#define regDC_GPIO_DRIVE_STRENGTH_S0_BASE_IDX 2
+#define regDC_GPIO_DRIVE_STRENGTH_S1 0x28f9
+#define regDC_GPIO_DRIVE_STRENGTH_S1_BASE_IDX 2
+#define regDC_GPIO_PWRSEQ0_EN 0x28fa
+#define regDC_GPIO_PWRSEQ0_EN_BASE_IDX 2
+#define regDC_GPIO_PAD_STRENGTH_1 0x28fc
+#define regDC_GPIO_PAD_STRENGTH_1_BASE_IDX 2
+#define regDC_GPIO_PAD_STRENGTH_2 0x28fd
+#define regDC_GPIO_PAD_STRENGTH_2_BASE_IDX 2
+#define regPHY_AUX_CNTL 0x28ff
+#define regPHY_AUX_CNTL_BASE_IDX 2
+#define regDC_GPIO_DRIVE_TXIMPSEL 0x2900
+#define regDC_GPIO_DRIVE_TXIMPSEL_BASE_IDX 2
+#define regDC_GPIO_PWRSEQ1_EN 0x2902
+#define regDC_GPIO_PWRSEQ1_EN_BASE_IDX 2
+#define regDC_GPIO_TX12_EN 0x2915
+#define regDC_GPIO_TX12_EN_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_0 0x2916
+#define regDC_GPIO_AUX_CTRL_0_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_1 0x2917
+#define regDC_GPIO_AUX_CTRL_1_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_2 0x2918
+#define regDC_GPIO_AUX_CTRL_2_BASE_IDX 2
+#define regDC_GPIO_RXEN 0x2919
+#define regDC_GPIO_RXEN_BASE_IDX 2
+#define regDC_GPIO_PULLUPEN 0x291a
+#define regDC_GPIO_PULLUPEN_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_3 0x291b
+#define regDC_GPIO_AUX_CTRL_3_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_4 0x291c
+#define regDC_GPIO_AUX_CTRL_4_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_5 0x291d
+#define regDC_GPIO_AUX_CTRL_5_BASE_IDX 2
+#define regAUXI2C_PAD_ALL_PWR_OK 0x291e
+#define regAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy1_dispdec
+// base address: 0x360
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0 0x2a00
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1 0x2a01
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2 0x2a02
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3 0x2a03
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4 0x2a04
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5 0x2a05
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6 0x2a06
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7 0x2a07
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8 0x2a08
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9 0x2a09
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10 0x2a0a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11 0x2a0b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12 0x2a0c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13 0x2a0d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14 0x2a0e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15 0x2a0f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16 0x2a10
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17 0x2a11
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18 0x2a12
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19 0x2a13
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20 0x2a14
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21 0x2a15
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22 0x2a16
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23 0x2a17
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24 0x2a18
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25 0x2a19
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26 0x2a1a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27 0x2a1b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28 0x2a1c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29 0x2a1d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30 0x2a1e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31 0x2a1f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32 0x2a20
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33 0x2a21
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34 0x2a22
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35 0x2a23
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36 0x2a24
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37 0x2a25
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38 0x2a26
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39 0x2a27
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40 0x2a28
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41 0x2a29
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42 0x2a2a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43 0x2a2b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44 0x2a2c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45 0x2a2d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46 0x2a2e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47 0x2a2f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48 0x2a30
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49 0x2a31
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50 0x2a32
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51 0x2a33
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52 0x2a34
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53 0x2a35
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54 0x2a36
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55 0x2a37
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56 0x2a38
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57 0x2a39
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy2_dispdec
+// base address: 0x6c0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0 0x2ad8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1 0x2ad9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2 0x2ada
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3 0x2adb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4 0x2adc
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5 0x2add
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6 0x2ade
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7 0x2adf
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8 0x2ae0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9 0x2ae1
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10 0x2ae2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11 0x2ae3
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12 0x2ae4
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13 0x2ae5
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14 0x2ae6
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15 0x2ae7
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16 0x2ae8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17 0x2ae9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18 0x2aea
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19 0x2aeb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20 0x2aec
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21 0x2aed
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22 0x2aee
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23 0x2aef
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24 0x2af0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25 0x2af1
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26 0x2af2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27 0x2af3
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28 0x2af4
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29 0x2af5
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30 0x2af6
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31 0x2af7
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32 0x2af8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33 0x2af9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34 0x2afa
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35 0x2afb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36 0x2afc
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37 0x2afd
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38 0x2afe
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39 0x2aff
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40 0x2b00
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41 0x2b01
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42 0x2b02
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43 0x2b03
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44 0x2b04
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45 0x2b05
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46 0x2b06
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47 0x2b07
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48 0x2b08
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49 0x2b09
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50 0x2b0a
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51 0x2b0b
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52 0x2b0c
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53 0x2b0d
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54 0x2b0e
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55 0x2b0f
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56 0x2b10
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57 0x2b11
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy3_dispdec
+// base address: 0xa20
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0 0x2bb0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1 0x2bb1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2 0x2bb2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3 0x2bb3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4 0x2bb4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5 0x2bb5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6 0x2bb6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7 0x2bb7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8 0x2bb8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9 0x2bb9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10 0x2bba
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11 0x2bbb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12 0x2bbc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13 0x2bbd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14 0x2bbe
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15 0x2bbf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16 0x2bc0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17 0x2bc1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18 0x2bc2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19 0x2bc3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20 0x2bc4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21 0x2bc5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22 0x2bc6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23 0x2bc7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24 0x2bc8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25 0x2bc9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26 0x2bca
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27 0x2bcb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28 0x2bcc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29 0x2bcd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30 0x2bce
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31 0x2bcf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32 0x2bd0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33 0x2bd1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34 0x2bd2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35 0x2bd3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36 0x2bd4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37 0x2bd5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38 0x2bd6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39 0x2bd7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40 0x2bd8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41 0x2bd9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42 0x2bda
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43 0x2bdb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44 0x2bdc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45 0x2bdd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46 0x2bde
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47 0x2bdf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48 0x2be0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49 0x2be1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50 0x2be2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51 0x2be3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52 0x2be4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53 0x2be5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54 0x2be6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55 0x2be7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56 0x2be8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57 0x2be9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy4_dispdec
+// base address: 0xd80
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0 0x2c88
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1 0x2c89
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2 0x2c8a
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3 0x2c8b
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4 0x2c8c
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5 0x2c8d
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6 0x2c8e
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7 0x2c8f
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8 0x2c90
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9 0x2c91
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10 0x2c92
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11 0x2c93
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12 0x2c94
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13 0x2c95
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14 0x2c96
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15 0x2c97
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16 0x2c98
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17 0x2c99
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18 0x2c9a
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19 0x2c9b
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20 0x2c9c
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21 0x2c9d
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22 0x2c9e
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23 0x2c9f
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24 0x2ca0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25 0x2ca1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26 0x2ca2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27 0x2ca3
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28 0x2ca4
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29 0x2ca5
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30 0x2ca6
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31 0x2ca7
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32 0x2ca8
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33 0x2ca9
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34 0x2caa
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35 0x2cab
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36 0x2cac
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37 0x2cad
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38 0x2cae
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39 0x2caf
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40 0x2cb0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41 0x2cb1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42 0x2cb2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43 0x2cb3
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44 0x2cb4
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45 0x2cb5
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46 0x2cb6
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47 0x2cb7
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48 0x2cb8
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49 0x2cb9
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50 0x2cba
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51 0x2cbb
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52 0x2cbc
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53 0x2cbd
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54 0x2cbe
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55 0x2cbf
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56 0x2cc0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57 0x2cc1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_pwrseq0_dispdec_pwrseq_dispdec
+// base address: 0x0
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_EN 0x2f10
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_EN_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_CTRL 0x2f11
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_CTRL_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_MASK 0x2f12
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_MASK_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_A_Y 0x2f13
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_A_Y_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_CNTL 0x2f14
+#define regPWRSEQ0_PANEL_PWRSEQ_CNTL_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_STATE 0x2f15
+#define regPWRSEQ0_PANEL_PWRSEQ_STATE_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY1 0x2f16
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY1_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY2 0x2f17
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY2_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV1 0x2f18
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV1_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_CNTL 0x2f19
+#define regPWRSEQ0_BL_PWM_CNTL_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_CNTL2 0x2f1a
+#define regPWRSEQ0_BL_PWM_CNTL2_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_PERIOD_CNTL 0x2f1b
+#define regPWRSEQ0_BL_PWM_PERIOD_CNTL_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_GRP1_REG_LOCK 0x2f1c
+#define regPWRSEQ0_BL_PWM_GRP1_REG_LOCK_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV2 0x2f1d
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV2_BASE_IDX 2
+#define regPWRSEQ0_PWRSEQ_SPARE 0x2f21
+#define regPWRSEQ0_PWRSEQ_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_pwrseq1_dispdec_pwrseq_dispdec
+// base address: 0x1b0
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_EN 0x2f7c
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_EN_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_CTRL 0x2f7d
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_CTRL_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_MASK 0x2f7e
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_MASK_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_A_Y 0x2f7f
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_A_Y_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_CNTL 0x2f80
+#define regPWRSEQ1_PANEL_PWRSEQ_CNTL_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_STATE 0x2f81
+#define regPWRSEQ1_PANEL_PWRSEQ_STATE_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY1 0x2f82
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY1_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY2 0x2f83
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY2_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV1 0x2f84
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV1_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_CNTL 0x2f85
+#define regPWRSEQ1_BL_PWM_CNTL_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_CNTL2 0x2f86
+#define regPWRSEQ1_BL_PWM_CNTL2_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_PERIOD_CNTL 0x2f87
+#define regPWRSEQ1_BL_PWM_PERIOD_CNTL_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_GRP1_REG_LOCK 0x2f88
+#define regPWRSEQ1_BL_PWM_GRP1_REG_LOCK_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV2 0x2f89
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV2_BASE_IDX 2
+#define regPWRSEQ1_PWRSEQ_SPARE 0x2f8d
+#define regPWRSEQ1_PWRSEQ_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dscc_dispdec
+// base address: 0x0
+#define regDSCC0_DSCC_CONFIG0 0x300a
+#define regDSCC0_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC0_DSCC_CONFIG1 0x300b
+#define regDSCC0_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC0_DSCC_STATUS 0x300c
+#define regDSCC0_DSCC_STATUS_BASE_IDX 2
+#define regDSCC0_DSCC_INTERRUPT_CONTROL_STATUS 0x300d
+#define regDSCC0_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG0 0x300e
+#define regDSCC0_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG1 0x300f
+#define regDSCC0_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG2 0x3010
+#define regDSCC0_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG3 0x3011
+#define regDSCC0_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG4 0x3012
+#define regDSCC0_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG5 0x3013
+#define regDSCC0_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG6 0x3014
+#define regDSCC0_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG7 0x3015
+#define regDSCC0_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG8 0x3016
+#define regDSCC0_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG9 0x3017
+#define regDSCC0_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG10 0x3018
+#define regDSCC0_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG11 0x3019
+#define regDSCC0_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG12 0x301a
+#define regDSCC0_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG13 0x301b
+#define regDSCC0_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG14 0x301c
+#define regDSCC0_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG15 0x301d
+#define regDSCC0_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG16 0x301e
+#define regDSCC0_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG17 0x301f
+#define regDSCC0_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG18 0x3020
+#define regDSCC0_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG19 0x3021
+#define regDSCC0_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG20 0x3022
+#define regDSCC0_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG21 0x3023
+#define regDSCC0_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG22 0x3024
+#define regDSCC0_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC0_DSCC_MEM_POWER_CONTROL 0x3025
+#define regDSCC0_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3026
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3027
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3028
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3029
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER 0x302a
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER 0x302b
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_MAX_ABS_ERROR0 0x302c
+#define regDSCC0_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC0_DSCC_MAX_ABS_ERROR1 0x302d
+#define regDSCC0_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x302e
+#define regDSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x302f
+#define regDSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x3030
+#define regDSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x3031
+#define regDSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x3032
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x3033
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3034
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
+// base address: 0x0
+#define regDSCCIF0_DSCCIF_CONFIG0 0x3005
+#define regDSCCIF0_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF0_DSCCIF_CONFIG1 0x3006
+#define regDSCCIF0_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_top_dispdec
+// base address: 0x0
+#define regDSC_TOP0_DSC_TOP_CONTROL 0x3000
+#define regDSC_TOP0_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP0_DSC_DEBUG_CONTROL 0x3001
+#define regDSC_TOP0_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc140
+#define regDC_PERFMON19_PERFCOUNTER_CNTL 0x3050
+#define regDC_PERFMON19_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON19_PERFCOUNTER_CNTL2 0x3051
+#define regDC_PERFMON19_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON19_PERFCOUNTER_STATE 0x3052
+#define regDC_PERFMON19_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CNTL 0x3053
+#define regDC_PERFMON19_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CNTL2 0x3054
+#define regDC_PERFMON19_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CVALUE_INT_MISC 0x3055
+#define regDC_PERFMON19_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CVALUE_LOW 0x3056
+#define regDC_PERFMON19_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_HI 0x3057
+#define regDC_PERFMON19_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_LOW 0x3058
+#define regDC_PERFMON19_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
+// base address: 0x170
+#define regDSCC1_DSCC_CONFIG0 0x3066
+#define regDSCC1_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC1_DSCC_CONFIG1 0x3067
+#define regDSCC1_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC1_DSCC_STATUS 0x3068
+#define regDSCC1_DSCC_STATUS_BASE_IDX 2
+#define regDSCC1_DSCC_INTERRUPT_CONTROL_STATUS 0x3069
+#define regDSCC1_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG0 0x306a
+#define regDSCC1_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG1 0x306b
+#define regDSCC1_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG2 0x306c
+#define regDSCC1_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG3 0x306d
+#define regDSCC1_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG4 0x306e
+#define regDSCC1_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG5 0x306f
+#define regDSCC1_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG6 0x3070
+#define regDSCC1_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG7 0x3071
+#define regDSCC1_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG8 0x3072
+#define regDSCC1_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG9 0x3073
+#define regDSCC1_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG10 0x3074
+#define regDSCC1_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG11 0x3075
+#define regDSCC1_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG12 0x3076
+#define regDSCC1_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG13 0x3077
+#define regDSCC1_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG14 0x3078
+#define regDSCC1_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG15 0x3079
+#define regDSCC1_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG16 0x307a
+#define regDSCC1_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG17 0x307b
+#define regDSCC1_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG18 0x307c
+#define regDSCC1_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG19 0x307d
+#define regDSCC1_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG20 0x307e
+#define regDSCC1_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG21 0x307f
+#define regDSCC1_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG22 0x3080
+#define regDSCC1_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC1_DSCC_MEM_POWER_CONTROL 0x3081
+#define regDSCC1_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3082
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3083
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3084
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3085
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER 0x3086
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER 0x3087
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_MAX_ABS_ERROR0 0x3088
+#define regDSCC1_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC1_DSCC_MAX_ABS_ERROR1 0x3089
+#define regDSCC1_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x308a
+#define regDSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x308b
+#define regDSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x308c
+#define regDSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x308d
+#define regDSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x308e
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x308f
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3090
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
+// base address: 0x170
+#define regDSCCIF1_DSCCIF_CONFIG0 0x3061
+#define regDSCCIF1_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF1_DSCCIF_CONFIG1 0x3062
+#define regDSCCIF1_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_top_dispdec
+// base address: 0x170
+#define regDSC_TOP1_DSC_TOP_CONTROL 0x305c
+#define regDSC_TOP1_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP1_DSC_DEBUG_CONTROL 0x305d
+#define regDSC_TOP1_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc2b0
+#define regDC_PERFMON20_PERFCOUNTER_CNTL 0x30ac
+#define regDC_PERFMON20_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON20_PERFCOUNTER_CNTL2 0x30ad
+#define regDC_PERFMON20_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON20_PERFCOUNTER_STATE 0x30ae
+#define regDC_PERFMON20_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CNTL 0x30af
+#define regDC_PERFMON20_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CNTL2 0x30b0
+#define regDC_PERFMON20_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CVALUE_INT_MISC 0x30b1
+#define regDC_PERFMON20_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CVALUE_LOW 0x30b2
+#define regDC_PERFMON20_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_HI 0x30b3
+#define regDC_PERFMON20_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_LOW 0x30b4
+#define regDC_PERFMON20_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dscc_dispdec
+// base address: 0x2e0
+#define regDSCC2_DSCC_CONFIG0 0x30c2
+#define regDSCC2_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC2_DSCC_CONFIG1 0x30c3
+#define regDSCC2_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC2_DSCC_STATUS 0x30c4
+#define regDSCC2_DSCC_STATUS_BASE_IDX 2
+#define regDSCC2_DSCC_INTERRUPT_CONTROL_STATUS 0x30c5
+#define regDSCC2_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG0 0x30c6
+#define regDSCC2_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG1 0x30c7
+#define regDSCC2_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG2 0x30c8
+#define regDSCC2_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG3 0x30c9
+#define regDSCC2_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG4 0x30ca
+#define regDSCC2_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG5 0x30cb
+#define regDSCC2_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG6 0x30cc
+#define regDSCC2_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG7 0x30cd
+#define regDSCC2_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG8 0x30ce
+#define regDSCC2_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG9 0x30cf
+#define regDSCC2_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG10 0x30d0
+#define regDSCC2_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG11 0x30d1
+#define regDSCC2_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG12 0x30d2
+#define regDSCC2_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG13 0x30d3
+#define regDSCC2_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG14 0x30d4
+#define regDSCC2_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG15 0x30d5
+#define regDSCC2_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG16 0x30d6
+#define regDSCC2_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG17 0x30d7
+#define regDSCC2_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG18 0x30d8
+#define regDSCC2_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG19 0x30d9
+#define regDSCC2_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG20 0x30da
+#define regDSCC2_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG21 0x30db
+#define regDSCC2_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG22 0x30dc
+#define regDSCC2_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC2_DSCC_MEM_POWER_CONTROL 0x30dd
+#define regDSCC2_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER 0x30de
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER 0x30df
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER 0x30e0
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER 0x30e1
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER 0x30e2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER 0x30e3
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_MAX_ABS_ERROR0 0x30e4
+#define regDSCC2_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC2_DSCC_MAX_ABS_ERROR1 0x30e5
+#define regDSCC2_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x30e6
+#define regDSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x30e7
+#define regDSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x30e8
+#define regDSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x30e9
+#define regDSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x30ea
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x30eb
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x30ec
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
+// base address: 0x2e0
+#define regDSCCIF2_DSCCIF_CONFIG0 0x30bd
+#define regDSCCIF2_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF2_DSCCIF_CONFIG1 0x30be
+#define regDSCCIF2_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_top_dispdec
+// base address: 0x2e0
+#define regDSC_TOP2_DSC_TOP_CONTROL 0x30b8
+#define regDSC_TOP2_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP2_DSC_DEBUG_CONTROL 0x30b9
+#define regDSC_TOP2_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc420
+#define regDC_PERFMON21_PERFCOUNTER_CNTL 0x3108
+#define regDC_PERFMON21_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON21_PERFCOUNTER_CNTL2 0x3109
+#define regDC_PERFMON21_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON21_PERFCOUNTER_STATE 0x310a
+#define regDC_PERFMON21_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CNTL 0x310b
+#define regDC_PERFMON21_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CNTL2 0x310c
+#define regDC_PERFMON21_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CVALUE_INT_MISC 0x310d
+#define regDC_PERFMON21_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CVALUE_LOW 0x310e
+#define regDC_PERFMON21_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_HI 0x310f
+#define regDC_PERFMON21_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_LOW 0x3110
+#define regDC_PERFMON21_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dscc_dispdec
+// base address: 0x450
+#define regDSCC3_DSCC_CONFIG0 0x311e
+#define regDSCC3_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC3_DSCC_CONFIG1 0x311f
+#define regDSCC3_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC3_DSCC_STATUS 0x3120
+#define regDSCC3_DSCC_STATUS_BASE_IDX 2
+#define regDSCC3_DSCC_INTERRUPT_CONTROL_STATUS 0x3121
+#define regDSCC3_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG0 0x3122
+#define regDSCC3_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG1 0x3123
+#define regDSCC3_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG2 0x3124
+#define regDSCC3_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG3 0x3125
+#define regDSCC3_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG4 0x3126
+#define regDSCC3_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG5 0x3127
+#define regDSCC3_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG6 0x3128
+#define regDSCC3_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG7 0x3129
+#define regDSCC3_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG8 0x312a
+#define regDSCC3_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG9 0x312b
+#define regDSCC3_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG10 0x312c
+#define regDSCC3_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG11 0x312d
+#define regDSCC3_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG12 0x312e
+#define regDSCC3_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG13 0x312f
+#define regDSCC3_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG14 0x3130
+#define regDSCC3_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG15 0x3131
+#define regDSCC3_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG16 0x3132
+#define regDSCC3_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG17 0x3133
+#define regDSCC3_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG18 0x3134
+#define regDSCC3_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG19 0x3135
+#define regDSCC3_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG20 0x3136
+#define regDSCC3_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG21 0x3137
+#define regDSCC3_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG22 0x3138
+#define regDSCC3_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC3_DSCC_MEM_POWER_CONTROL 0x3139
+#define regDSCC3_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER 0x313a
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER 0x313b
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER 0x313c
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER 0x313d
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER 0x313e
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER 0x313f
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_MAX_ABS_ERROR0 0x3140
+#define regDSCC3_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC3_DSCC_MAX_ABS_ERROR1 0x3141
+#define regDSCC3_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x3142
+#define regDSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x3143
+#define regDSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x3144
+#define regDSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x3145
+#define regDSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x3146
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x3147
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3148
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
+// base address: 0x450
+#define regDSCCIF3_DSCCIF_CONFIG0 0x3119
+#define regDSCCIF3_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF3_DSCCIF_CONFIG1 0x311a
+#define regDSCCIF3_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_top_dispdec
+// base address: 0x450
+#define regDSC_TOP3_DSC_TOP_CONTROL 0x3114
+#define regDSC_TOP3_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP3_DSC_DEBUG_CONTROL 0x3115
+#define regDSC_TOP3_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc590
+#define regDC_PERFMON22_PERFCOUNTER_CNTL 0x3164
+#define regDC_PERFMON22_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON22_PERFCOUNTER_CNTL2 0x3165
+#define regDC_PERFMON22_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON22_PERFCOUNTER_STATE 0x3166
+#define regDC_PERFMON22_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CNTL 0x3167
+#define regDC_PERFMON22_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CNTL2 0x3168
+#define regDC_PERFMON22_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CVALUE_INT_MISC 0x3169
+#define regDC_PERFMON22_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CVALUE_LOW 0x316a
+#define regDC_PERFMON22_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_HI 0x316b
+#define regDC_PERFMON22_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_LOW 0x316c
+#define regDC_PERFMON22_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+// base address: 0x2790c
+#define regHPO_TOP_CLOCK_CONTROL 0x0e43
+#define regHPO_TOP_CLOCK_CONTROL_BASE_IDX 3
+#define regHPO_TOP_HW_CONTROL 0x0e4a
+#define regHPO_TOP_HW_CONTROL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_dp_stream_mapper_dispdec
+// base address: 0x27958
+#define regDP_STREAM_MAPPER_CONTROL0 0x0e56
+#define regDP_STREAM_MAPPER_CONTROL0_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL1 0x0e57
+#define regDP_STREAM_MAPPER_CONTROL1_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL2 0x0e58
+#define regDP_STREAM_MAPPER_CONTROL2_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL3 0x0e59
+#define regDP_STREAM_MAPPER_CONTROL3_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1a698
+#define regDC_PERFMON23_PERFCOUNTER_CNTL 0x0e66
+#define regDC_PERFMON23_PERFCOUNTER_CNTL_BASE_IDX 3
+#define regDC_PERFMON23_PERFCOUNTER_CNTL2 0x0e67
+#define regDC_PERFMON23_PERFCOUNTER_CNTL2_BASE_IDX 3
+#define regDC_PERFMON23_PERFCOUNTER_STATE 0x0e68
+#define regDC_PERFMON23_PERFCOUNTER_STATE_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CNTL 0x0e69
+#define regDC_PERFMON23_PERFMON_CNTL_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CNTL2 0x0e6a
+#define regDC_PERFMON23_PERFMON_CNTL2_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CVALUE_INT_MISC 0x0e6b
+#define regDC_PERFMON23_PERFMON_CVALUE_INT_MISC_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CVALUE_LOW 0x0e6c
+#define regDC_PERFMON23_PERFMON_CVALUE_LOW_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_HI 0x0e6d
+#define regDC_PERFMON23_PERFMON_HI_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_LOW 0x0e6e
+#define regDC_PERFMON23_PERFMON_LOW_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_link_enc0_dispdec
+// base address: 0x2656c
+#define regHDMI_LINK_ENC_CONTROL 0x095b
+#define regHDMI_LINK_ENC_CONTROL_BASE_IDX 3
+#define regHDMI_LINK_ENC_CLK_CTRL 0x095c
+#define regHDMI_LINK_ENC_CLK_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_frl_enc0_dispdec
+// base address: 0x26594
+#define regHDMI_FRL_ENC_CONFIG 0x0965
+#define regHDMI_FRL_ENC_CONFIG_BASE_IDX 3
+#define regHDMI_FRL_ENC_CONFIG2 0x0966
+#define regHDMI_FRL_ENC_CONFIG2_BASE_IDX 3
+#define regHDMI_FRL_ENC_METER_BUFFER_STATUS 0x0967
+#define regHDMI_FRL_ENC_METER_BUFFER_STATUS_BASE_IDX 3
+#define regHDMI_FRL_ENC_MEM_CTRL 0x0968
+#define regHDMI_FRL_ENC_MEM_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dispdec
+// base address: 0x2634c
+#define regHDMI_STREAM_ENC_CLOCK_CONTROL 0x08d3
+#define regHDMI_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 3
+#define regHDMI_STREAM_ENC_INPUT_MUX_CONTROL 0x08d5
+#define regHDMI_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x08d6
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x08d7
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2 0x08d8
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_afmt_afmt_dispdec
+// base address: 0x2646c
+#define regAFMT5_AFMT_ACP 0x091b
+#define regAFMT5_AFMT_ACP_BASE_IDX 3
+#define regAFMT5_AFMT_VBI_PACKET_CONTROL 0x091c
+#define regAFMT5_AFMT_VBI_PACKET_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL2 0x091d
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_INFO0 0x091e
+#define regAFMT5_AFMT_AUDIO_INFO0_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_INFO1 0x091f
+#define regAFMT5_AFMT_AUDIO_INFO1_BASE_IDX 3
+#define regAFMT5_AFMT_60958_0 0x0920
+#define regAFMT5_AFMT_60958_0_BASE_IDX 3
+#define regAFMT5_AFMT_60958_1 0x0921
+#define regAFMT5_AFMT_60958_1_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_CRC_CONTROL 0x0922
+#define regAFMT5_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL0 0x0923
+#define regAFMT5_AFMT_RAMP_CONTROL0_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL1 0x0924
+#define regAFMT5_AFMT_RAMP_CONTROL1_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL2 0x0925
+#define regAFMT5_AFMT_RAMP_CONTROL2_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL3 0x0926
+#define regAFMT5_AFMT_RAMP_CONTROL3_BASE_IDX 3
+#define regAFMT5_AFMT_60958_2 0x0927
+#define regAFMT5_AFMT_60958_2_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_CRC_RESULT 0x0928
+#define regAFMT5_AFMT_AUDIO_CRC_RESULT_BASE_IDX 3
+#define regAFMT5_AFMT_STATUS 0x0929
+#define regAFMT5_AFMT_STATUS_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL 0x092a
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_INFOFRAME_CONTROL0 0x092b
+#define regAFMT5_AFMT_INFOFRAME_CONTROL0_BASE_IDX 3
+#define regAFMT5_AFMT_INTERRUPT_STATUS 0x092c
+#define regAFMT5_AFMT_INTERRUPT_STATUS_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_SRC_CONTROL 0x092d
+#define regAFMT5_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_MEM_PWR 0x092f
+#define regAFMT5_AFMT_MEM_PWR_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dme_dme_dispdec
+// base address: 0x264f0
+#define regDME5_DME_CONTROL 0x093c
+#define regDME5_DME_CONTROL_BASE_IDX 3
+#define regDME5_DME_MEMORY_CONTROL 0x093d
+#define regDME5_DME_MEMORY_CONTROL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_vpg_vpg_dispdec
+// base address: 0x264c4
+#define regVPG5_VPG_GENERIC_PACKET_ACCESS_CTRL 0x0931
+#define regVPG5_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GENERIC_PACKET_DATA 0x0932
+#define regVPG5_VPG_GENERIC_PACKET_DATA_BASE_IDX 3
+#define regVPG5_VPG_GSP_FRAME_UPDATE_CTRL 0x0933
+#define regVPG5_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x0934
+#define regVPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GENERIC_STATUS 0x0935
+#define regVPG5_VPG_GENERIC_STATUS_BASE_IDX 3
+#define regVPG5_VPG_MEM_PWR 0x0936
+#define regVPG5_VPG_MEM_PWR_BASE_IDX 3
+#define regVPG5_VPG_ISRC1_2_ACCESS_CTRL 0x0937
+#define regVPG5_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 3
+#define regVPG5_VPG_ISRC1_2_DATA 0x0938
+#define regVPG5_VPG_ISRC1_2_DATA_BASE_IDX 3
+#define regVPG5_VPG_MPEG_INFO0 0x0939
+#define regVPG5_VPG_MPEG_INFO0_BASE_IDX 3
+#define regVPG5_VPG_MPEG_INFO1 0x093a
+#define regVPG5_VPG_MPEG_INFO1_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_tb_enc0_dispdec
+// base address: 0x2637c
+#define regHDMI_TB_ENC_CONTROL 0x08df
+#define regHDMI_TB_ENC_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_PIXEL_FORMAT 0x08e0
+#define regHDMI_TB_ENC_PIXEL_FORMAT_BASE_IDX 3
+#define regHDMI_TB_ENC_PACKET_CONTROL 0x08e1
+#define regHDMI_TB_ENC_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_PACKET_CONTROL 0x08e2
+#define regHDMI_TB_ENC_ACR_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL1 0x08e3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL1_BASE_IDX 3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL2 0x08e4
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL2_BASE_IDX 3
+#define regHDMI_TB_ENC_GC_CONTROL 0x08e5
+#define regHDMI_TB_ENC_GC_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL0 0x08e6
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL0_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL1 0x08e7
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL1_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL2 0x08e8
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL2_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET0_1_LINE 0x08e9
+#define regHDMI_TB_ENC_GENERIC_PACKET0_1_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET2_3_LINE 0x08ea
+#define regHDMI_TB_ENC_GENERIC_PACKET2_3_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET4_5_LINE 0x08eb
+#define regHDMI_TB_ENC_GENERIC_PACKET4_5_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET6_7_LINE 0x08ec
+#define regHDMI_TB_ENC_GENERIC_PACKET6_7_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET8_9_LINE 0x08ed
+#define regHDMI_TB_ENC_GENERIC_PACKET8_9_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET10_11_LINE 0x08ee
+#define regHDMI_TB_ENC_GENERIC_PACKET10_11_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET12_13_LINE 0x08ef
+#define regHDMI_TB_ENC_GENERIC_PACKET12_13_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET14_LINE 0x08f0
+#define regHDMI_TB_ENC_GENERIC_PACKET14_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_DB_CONTROL 0x08f1
+#define regHDMI_TB_ENC_DB_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_32_0 0x08f2
+#define regHDMI_TB_ENC_ACR_32_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_32_1 0x08f3
+#define regHDMI_TB_ENC_ACR_32_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_44_0 0x08f4
+#define regHDMI_TB_ENC_ACR_44_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_44_1 0x08f5
+#define regHDMI_TB_ENC_ACR_44_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_48_0 0x08f6
+#define regHDMI_TB_ENC_ACR_48_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_48_1 0x08f7
+#define regHDMI_TB_ENC_ACR_48_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_STATUS_0 0x08f8
+#define regHDMI_TB_ENC_ACR_STATUS_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_STATUS_1 0x08f9
+#define regHDMI_TB_ENC_ACR_STATUS_1_BASE_IDX 3
+#define regHDMI_TB_ENC_BUFFER_CONTROL 0x08fb
+#define regHDMI_TB_ENC_BUFFER_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_MEM_CTRL 0x08fe
+#define regHDMI_TB_ENC_MEM_CTRL_BASE_IDX 3
+#define regHDMI_TB_ENC_METADATA_PACKET_CONTROL 0x08ff
+#define regHDMI_TB_ENC_METADATA_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_H_ACTIVE_BLANK 0x0900
+#define regHDMI_TB_ENC_H_ACTIVE_BLANK_BASE_IDX 3
+#define regHDMI_TB_ENC_HC_ACTIVE_BLANK 0x0901
+#define regHDMI_TB_ENC_HC_ACTIVE_BLANK_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_CNTL 0x0903
+#define regHDMI_TB_ENC_CRC_CNTL_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_RESULT_0 0x0904
+#define regHDMI_TB_ENC_CRC_RESULT_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ENCRYPTION_CONTROL 0x0907
+#define regHDMI_TB_ENC_ENCRYPTION_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_MODE 0x0908
+#define regHDMI_TB_ENC_MODE_BASE_IDX 3
+#define regHDMI_TB_ENC_INPUT_FIFO_STATUS 0x0909
+#define regHDMI_TB_ENC_INPUT_FIFO_STATUS_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_RESULT_1 0x090a
+#define regHDMI_TB_ENC_CRC_RESULT_1_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dispdec
+// base address: 0x1ab8c
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL 0x3623
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x3624
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL 0x3625
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x3626
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x3627
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_SPARE 0x3628
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_apg_apg_dispdec
+// base address: 0x1abc0
+#define regAPG0_APG_CONTROL 0x3630
+#define regAPG0_APG_CONTROL_BASE_IDX 2
+#define regAPG0_APG_CONTROL2 0x3631
+#define regAPG0_APG_CONTROL2_BASE_IDX 2
+#define regAPG0_APG_DBG_GEN_CONTROL 0x3632
+#define regAPG0_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG0_APG_PACKET_CONTROL 0x3633
+#define regAPG0_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_CONTROL 0x363a
+#define regAPG0_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_CONTROL2 0x363b
+#define regAPG0_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_RESULT 0x363c
+#define regAPG0_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG0_APG_STATUS 0x3641
+#define regAPG0_APG_STATUS_BASE_IDX 2
+#define regAPG0_APG_STATUS2 0x3642
+#define regAPG0_APG_STATUS2_BASE_IDX 2
+#define regAPG0_APG_MEM_PWR 0x3644
+#define regAPG0_APG_MEM_PWR_BASE_IDX 2
+#define regAPG0_APG_SPARE 0x3646
+#define regAPG0_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dme_dme_dispdec
+// base address: 0x1ac38
+#define regDME6_DME_CONTROL 0x364e
+#define regDME6_DME_CONTROL_BASE_IDX 2
+#define regDME6_DME_MEMORY_CONTROL 0x364f
+#define regDME6_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_vpg_vpg_dispdec
+// base address: 0x1ac44
+#define regVPG6_VPG_GENERIC_PACKET_ACCESS_CTRL 0x3651
+#define regVPG6_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GENERIC_PACKET_DATA 0x3652
+#define regVPG6_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG6_VPG_GSP_FRAME_UPDATE_CTRL 0x3653
+#define regVPG6_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x3654
+#define regVPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GENERIC_STATUS 0x3655
+#define regVPG6_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG6_VPG_MEM_PWR 0x3656
+#define regVPG6_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG6_VPG_ISRC1_2_ACCESS_CTRL 0x3657
+#define regVPG6_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG6_VPG_ISRC1_2_DATA 0x3658
+#define regVPG6_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG6_VPG_MPEG_INFO0 0x3659
+#define regVPG6_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG6_VPG_MPEG_INFO1 0x365a
+#define regVPG6_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc0_dispdec
+// base address: 0x1ac74
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_CONTROL 0x365d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL 0x365e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x365f
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3660
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3661
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0 0x3662
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1 0x3663
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2 0x3664
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3 0x3665
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4 0x3666
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5 0x3667
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6 0x3668
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7 0x3669
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8 0x366a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL 0x366b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x366c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x366d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x366e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x366f
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3670
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3671
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x3672
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x3673
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x3674
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x3675
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x3676
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x3677
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x3678
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x3679
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x367a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL 0x367b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x367c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x367d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x367e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL 0x3683
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL 0x3684
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3685
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x3686
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL 0x3687
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0 0x3688
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1 0x3689
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS 0x368a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x368b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x368c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL 0x368d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SPARE 0x368e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SPARE_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2 0x3693
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3 0x3694
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dispdec
+// base address: 0x1aedc
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL 0x36f7
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x36f8
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL 0x36f9
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x36fa
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x36fb
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_SPARE 0x36fc
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_apg_apg_dispdec
+// base address: 0x1af10
+#define regAPG1_APG_CONTROL 0x3704
+#define regAPG1_APG_CONTROL_BASE_IDX 2
+#define regAPG1_APG_CONTROL2 0x3705
+#define regAPG1_APG_CONTROL2_BASE_IDX 2
+#define regAPG1_APG_DBG_GEN_CONTROL 0x3706
+#define regAPG1_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG1_APG_PACKET_CONTROL 0x3707
+#define regAPG1_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_CONTROL 0x370e
+#define regAPG1_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_CONTROL2 0x370f
+#define regAPG1_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_RESULT 0x3710
+#define regAPG1_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG1_APG_STATUS 0x3715
+#define regAPG1_APG_STATUS_BASE_IDX 2
+#define regAPG1_APG_STATUS2 0x3716
+#define regAPG1_APG_STATUS2_BASE_IDX 2
+#define regAPG1_APG_MEM_PWR 0x3718
+#define regAPG1_APG_MEM_PWR_BASE_IDX 2
+#define regAPG1_APG_SPARE 0x371a
+#define regAPG1_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dme_dme_dispdec
+// base address: 0x1af88
+#define regDME7_DME_CONTROL 0x3722
+#define regDME7_DME_CONTROL_BASE_IDX 2
+#define regDME7_DME_MEMORY_CONTROL 0x3723
+#define regDME7_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_vpg_vpg_dispdec
+// base address: 0x1af94
+#define regVPG7_VPG_GENERIC_PACKET_ACCESS_CTRL 0x3725
+#define regVPG7_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GENERIC_PACKET_DATA 0x3726
+#define regVPG7_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG7_VPG_GSP_FRAME_UPDATE_CTRL 0x3727
+#define regVPG7_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x3728
+#define regVPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GENERIC_STATUS 0x3729
+#define regVPG7_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG7_VPG_MEM_PWR 0x372a
+#define regVPG7_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG7_VPG_ISRC1_2_ACCESS_CTRL 0x372b
+#define regVPG7_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG7_VPG_ISRC1_2_DATA 0x372c
+#define regVPG7_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG7_VPG_MPEG_INFO0 0x372d
+#define regVPG7_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG7_VPG_MPEG_INFO1 0x372e
+#define regVPG7_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc1_dispdec
+// base address: 0x1afc4
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_CONTROL 0x3731
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL 0x3732
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x3733
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3734
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3735
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0 0x3736
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1 0x3737
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2 0x3738
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3 0x3739
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4 0x373a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5 0x373b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6 0x373c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7 0x373d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8 0x373e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL 0x373f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x3740
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x3741
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x3742
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x3743
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3744
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3745
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x3746
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x3747
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x3748
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x3749
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x374a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x374b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x374c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x374d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x374e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL 0x374f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x3750
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x3751
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x3752
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL 0x3757
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL 0x3758
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3759
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x375a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL 0x375b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0 0x375c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1 0x375d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS 0x375e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x375f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3760
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3761
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SPARE 0x3762
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SPARE_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2 0x3767
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3 0x3768
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dispdec
+// base address: 0x1b22c
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL 0x37cb
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x37cc
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL 0x37cd
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x37ce
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x37cf
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_SPARE 0x37d0
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_apg_apg_dispdec
+// base address: 0x1b260
+#define regAPG2_APG_CONTROL 0x37d8
+#define regAPG2_APG_CONTROL_BASE_IDX 2
+#define regAPG2_APG_CONTROL2 0x37d9
+#define regAPG2_APG_CONTROL2_BASE_IDX 2
+#define regAPG2_APG_DBG_GEN_CONTROL 0x37da
+#define regAPG2_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG2_APG_PACKET_CONTROL 0x37db
+#define regAPG2_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_CONTROL 0x37e2
+#define regAPG2_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_CONTROL2 0x37e3
+#define regAPG2_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_RESULT 0x37e4
+#define regAPG2_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG2_APG_STATUS 0x37e9
+#define regAPG2_APG_STATUS_BASE_IDX 2
+#define regAPG2_APG_STATUS2 0x37ea
+#define regAPG2_APG_STATUS2_BASE_IDX 2
+#define regAPG2_APG_MEM_PWR 0x37ec
+#define regAPG2_APG_MEM_PWR_BASE_IDX 2
+#define regAPG2_APG_SPARE 0x37ee
+#define regAPG2_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dme_dme_dispdec
+// base address: 0x1b2d8
+#define regDME8_DME_CONTROL 0x37f6
+#define regDME8_DME_CONTROL_BASE_IDX 2
+#define regDME8_DME_MEMORY_CONTROL 0x37f7
+#define regDME8_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_vpg_vpg_dispdec
+// base address: 0x1b2e4
+#define regVPG8_VPG_GENERIC_PACKET_ACCESS_CTRL 0x37f9
+#define regVPG8_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GENERIC_PACKET_DATA 0x37fa
+#define regVPG8_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG8_VPG_GSP_FRAME_UPDATE_CTRL 0x37fb
+#define regVPG8_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x37fc
+#define regVPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GENERIC_STATUS 0x37fd
+#define regVPG8_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG8_VPG_MEM_PWR 0x37fe
+#define regVPG8_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG8_VPG_ISRC1_2_ACCESS_CTRL 0x37ff
+#define regVPG8_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG8_VPG_ISRC1_2_DATA 0x3800
+#define regVPG8_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG8_VPG_MPEG_INFO0 0x3801
+#define regVPG8_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG8_VPG_MPEG_INFO1 0x3802
+#define regVPG8_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc2_dispdec
+// base address: 0x1b314
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_CONTROL 0x3805
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL 0x3806
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x3807
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3808
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3809
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0 0x380a
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1 0x380b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2 0x380c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3 0x380d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4 0x380e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5 0x380f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6 0x3810
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7 0x3811
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8 0x3812
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL 0x3813
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x3814
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x3815
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x3816
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x3817
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3818
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3819
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x381a
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x381b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x381c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x381d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x381e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x381f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x3820
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x3821
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x3822
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL 0x3823
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x3824
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x3825
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x3826
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL 0x382b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL 0x382c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL 0x382d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x382e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL 0x382f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0 0x3830
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1 0x3831
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS 0x3832
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x3833
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3834
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3835
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SPARE 0x3836
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SPARE_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2 0x383b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3 0x383c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dispdec
+// base address: 0x1b57c
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL 0x389f
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x38a0
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL 0x38a1
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x38a2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x38a3
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_SPARE 0x38a4
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_apg_apg_dispdec
+// base address: 0x1b5b0
+#define regAPG3_APG_CONTROL 0x38ac
+#define regAPG3_APG_CONTROL_BASE_IDX 2
+#define regAPG3_APG_CONTROL2 0x38ad
+#define regAPG3_APG_CONTROL2_BASE_IDX 2
+#define regAPG3_APG_DBG_GEN_CONTROL 0x38ae
+#define regAPG3_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG3_APG_PACKET_CONTROL 0x38af
+#define regAPG3_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_CONTROL 0x38b6
+#define regAPG3_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_CONTROL2 0x38b7
+#define regAPG3_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_RESULT 0x38b8
+#define regAPG3_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG3_APG_STATUS 0x38bd
+#define regAPG3_APG_STATUS_BASE_IDX 2
+#define regAPG3_APG_STATUS2 0x38be
+#define regAPG3_APG_STATUS2_BASE_IDX 2
+#define regAPG3_APG_MEM_PWR 0x38c0
+#define regAPG3_APG_MEM_PWR_BASE_IDX 2
+#define regAPG3_APG_SPARE 0x38c2
+#define regAPG3_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dme_dme_dispdec
+// base address: 0x1b628
+#define regDME9_DME_CONTROL 0x38ca
+#define regDME9_DME_CONTROL_BASE_IDX 2
+#define regDME9_DME_MEMORY_CONTROL 0x38cb
+#define regDME9_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_vpg_vpg_dispdec
+// base address: 0x1b634
+#define regVPG9_VPG_GENERIC_PACKET_ACCESS_CTRL 0x38cd
+#define regVPG9_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GENERIC_PACKET_DATA 0x38ce
+#define regVPG9_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG9_VPG_GSP_FRAME_UPDATE_CTRL 0x38cf
+#define regVPG9_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x38d0
+#define regVPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GENERIC_STATUS 0x38d1
+#define regVPG9_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG9_VPG_MEM_PWR 0x38d2
+#define regVPG9_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG9_VPG_ISRC1_2_ACCESS_CTRL 0x38d3
+#define regVPG9_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG9_VPG_ISRC1_2_DATA 0x38d4
+#define regVPG9_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG9_VPG_MPEG_INFO0 0x38d5
+#define regVPG9_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG9_VPG_MPEG_INFO1 0x38d6
+#define regVPG9_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc3_dispdec
+// base address: 0x1b664
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_CONTROL 0x38d9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL 0x38da
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x38db
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x38dc
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x38dd
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0 0x38de
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1 0x38df
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2 0x38e0
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3 0x38e1
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4 0x38e2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5 0x38e3
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6 0x38e4
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7 0x38e5
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8 0x38e6
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL 0x38e7
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x38e8
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x38e9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x38ea
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x38eb
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x38ec
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x38ed
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x38ee
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x38ef
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x38f0
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x38f1
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x38f2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x38f3
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x38f4
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x38f5
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x38f6
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL 0x38f7
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x38f8
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x38f9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x38fa
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL 0x38ff
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL 0x3900
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3901
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x3902
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL 0x3903
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0 0x3904
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1 0x3905
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS 0x3906
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x3907
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3908
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3909
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SPARE 0x390a
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SPARE_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2 0x390f
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3 0x3910
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc0_dispdec
+// base address: 0x1ad5c
+#define regDP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL 0x3697
+#define regDP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_LINK_ENC0_DP_LINK_ENC_SPARE 0x3698
+#define regDP_LINK_ENC0_DP_LINK_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym320_dispdec
+// base address: 0x1ae00
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL 0x36c0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_STATUS 0x36c1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE 0x36c4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0 0x36c5
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1 0x36c6
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2 0x36c7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3 0x36c8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0 0x36cb
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1 0x36cc
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2 0x36cd
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3 0x36ce
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0 0x36d1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1 0x36d2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2 0x36d3
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3 0x36d4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG 0x36d7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0 0x36d8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1 0x36d9
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2 0x36da
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3 0x36db
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE 0x36dc
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0 0x36dd
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1 0x36de
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2 0x36df
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3 0x36e0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4 0x36e1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5 0x36e2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6 0x36e3
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7 0x36e4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8 0x36e5
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9 0x36e6
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10 0x36e7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS 0x36e8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE 0x36ea
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0 0x36eb
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1 0x36ec
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL 0x36ed
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0 0x36ee
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1 0x36ef
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS 0x36f0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT 0x36f1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc1_dispdec
+// base address: 0x1b0ac
+#define regDP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL 0x376b
+#define regDP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_LINK_ENC1_DP_LINK_ENC_SPARE 0x376c
+#define regDP_LINK_ENC1_DP_LINK_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym321_dispdec
+// base address: 0x1b150
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL 0x3794
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_STATUS 0x3795
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE 0x3798
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0 0x3799
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1 0x379a
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2 0x379b
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3 0x379c
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0 0x379f
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1 0x37a0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2 0x37a1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3 0x37a2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0 0x37a5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1 0x37a6
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2 0x37a7
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3 0x37a8
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG 0x37ab
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0 0x37ac
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1 0x37ad
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2 0x37ae
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3 0x37af
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE 0x37b0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0 0x37b1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1 0x37b2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2 0x37b3
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3 0x37b4
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4 0x37b5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5 0x37b6
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6 0x37b7
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7 0x37b8
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8 0x37b9
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9 0x37ba
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10 0x37bb
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS 0x37bc
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE 0x37be
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0 0x37bf
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1 0x37c0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL 0x37c1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0 0x37c2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1 0x37c3
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS 0x37c4
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT 0x37c5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchvm_hvm_dispdec
+// base address: 0x0
+#define regDCHVM_CTRL0 0x3603
+#define regDCHVM_CTRL0_BASE_IDX 2
+#define regDCHVM_CTRL1 0x3604
+#define regDCHVM_CTRL1_BASE_IDX 2
+#define regDCHVM_CLK_CTRL 0x3605
+#define regDCHVM_CLK_CTRL_BASE_IDX 2
+#define regDCHVM_MEM_CTRL 0x3606
+#define regDCHVM_MEM_CTRL_BASE_IDX 2
+#define regDCHVM_RIOMMU_CTRL0 0x3607
+#define regDCHVM_RIOMMU_CTRL0_BASE_IDX 2
+#define regDCHVM_RIOMMU_STAT0 0x3608
+#define regDCHVM_RIOMMU_STAT0_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dlpc_dlpc_dispdec
+// base address: 0x0
+#define regDLPC_ENABLE 0x2fe8
+#define regDLPC_ENABLE_BASE_IDX 2
+#define regDLPC_CURRENT_COUNT 0x2fe9
+#define regDLPC_CURRENT_COUNT_BASE_IDX 2
+#define regDLPC_OPTC_SNAPSHOT 0x2fea
+#define regDLPC_OPTC_SNAPSHOT_BASE_IDX 2
+#define regDLPC_PWRUP 0x2feb
+#define regDLPC_PWRUP_BASE_IDX 2
+#define regDLPC_OTG_RESYNC 0x2fec
+#define regDLPC_OTG_RESYNC_BASE_IDX 2
+#define regDLPC_DCN_ZSC_LONO_PWRUP 0x2fed
+#define regDLPC_DCN_ZSC_LONO_PWRUP_BASE_IDX 2
+#define regDLPC_SPARE 0x2fee
+#define regDLPC_SPARE_BASE_IDX 2
+#define regDLPC_COUNTER_INIT_VALUE 0x2fef
+#define regDLPC_COUNTER_INIT_VALUE_BASE_IDX 2
+
+
+// addressBlock: dce_dpia_dpia_mu0_dpiadec
+// base address: 0x72000
+#define regDPIA_MU_CLOCK_CTRL 0x13800
+#define regDPIA_MU_CLOCK_CTRL_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT0 0x13801
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT0 0x13802
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT1 0x13803
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT1 0x13804
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT2 0x13805
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT2 0x13806
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT3 0x13807
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT3 0x13808
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT0 0x13811
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT1 0x13812
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT2 0x13813
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT3 0x13814
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_TPI_MAX_CREDIT_COUNT 0x13819
+#define regDPIA_MU_TPI_MAX_CREDIT_COUNT_BASE_IDX 3
+#define regDPIA_MU_INTERRUPT_STATUS 0x1381a
+#define regDPIA_MU_INTERRUPT_STATUS_BASE_IDX 3
+#define regDPIA_MU_INTERRUPT_CTRL 0x1381b
+#define regDPIA_MU_INTERRUPT_CTRL_BASE_IDX 3
+#define regDPIA_MU_LOCAL_INTERRUPT_CTRL 0x1381c
+#define regDPIA_MU_LOCAL_INTERRUPT_CTRL_BASE_IDX 3
+#define regDPIA_MU_LOCAL_INTERRUPT_ACK 0x1381d
+#define regDPIA_MU_LOCAL_INTERRUPT_ACK_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL 0x1381e
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL2 0x1381f
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL2_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_STATUS 0x13820
+#define regDPIA_MU_RBBMIF_STATUS_BASE_IDX 3
+#define regDPIA_MU_MICROSECOND_REF_CTRL 0x13821
+#define regDPIA_MU_MICROSECOND_REF_CTRL_BASE_IDX 3
+#define regDPIA_MU_PORT_ADP_STATUS 0x13822
+#define regDPIA_MU_PORT_ADP_STATUS_BASE_IDX 3
+#define regDPIA_GLUE_CTRL 0x13823
+#define regDPIA_GLUE_CTRL_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL0 0x13825
+#define regDPIA_PERF_COUNT_CONTROL0_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL1 0x13826
+#define regDPIA_PERF_COUNT_CONTROL1_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL2 0x13827
+#define regDPIA_PERF_COUNT_CONTROL2_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL3 0x13828
+#define regDPIA_PERF_COUNT_CONTROL3_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL4 0x13829
+#define regDPIA_PERF_COUNT_CONTROL4_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL5 0x1382a
+#define regDPIA_PERF_COUNT_CONTROL5_BASE_IDX 3
+#define regDPIA_PERF_COUNT_INDEX 0x1382b
+#define regDPIA_PERF_COUNT_INDEX_BASE_IDX 3
+#define regDPIA_PERF_COUNT_DATA_LO 0x1382c
+#define regDPIA_PERF_COUNT_DATA_LO_BASE_IDX 3
+#define regDPIA_MU_SPARE 0x1382d
+#define regDPIA_MU_SPARE_BASE_IDX 3
+
+
+// addressBlock: azendpoint_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2200
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x2706
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x270d
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2 0x270e
+#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL 0x2724
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3 0x273e
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x2770
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x2771
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x2f09
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x2f0a
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x2f0b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY 0x3702
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x3707
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x3708
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x3709
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x371c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x371d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x371e
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x371f
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION 0x3770
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION 0x3771
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO 0x3772
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE 0x3777
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE 0x3778
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE 0x3779
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE 0x377a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC 0x377b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR 0x377c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX 0x3780
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA 0x3781
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x3785
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x3786
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x3787
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x3788
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x3789
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x378a
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x378b
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x378c
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x378d
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x378e
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x378f
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x3790
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x3791
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x3792
+#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO 0x3793
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x3797
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x3798
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB 0x3799
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x379a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE 0x379b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x379c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x379d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x379e
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x3f09
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES 0x3f0c
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH 0x3f0e
+
+
+// addressBlock: azendpoint_descriptorind
+// base address: 0x0
+#define ixAUDIO_DESCRIPTOR0 0x0001
+#define ixAUDIO_DESCRIPTOR1 0x0002
+#define ixAUDIO_DESCRIPTOR2 0x0003
+#define ixAUDIO_DESCRIPTOR3 0x0004
+#define ixAUDIO_DESCRIPTOR4 0x0005
+#define ixAUDIO_DESCRIPTOR5 0x0006
+#define ixAUDIO_DESCRIPTOR6 0x0007
+#define ixAUDIO_DESCRIPTOR7 0x0008
+#define ixAUDIO_DESCRIPTOR8 0x0009
+#define ixAUDIO_DESCRIPTOR9 0x000a
+#define ixAUDIO_DESCRIPTOR10 0x000b
+#define ixAUDIO_DESCRIPTOR11 0x000c
+#define ixAUDIO_DESCRIPTOR12 0x000d
+#define ixAUDIO_DESCRIPTOR13 0x000e
+
+
+// addressBlock: azendpoint_sinkinfoind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID 0x0000
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID 0x0001
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN 0x0002
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0 0x0003
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1 0x0004
+#define ixSINK_DESCRIPTION0 0x0005
+#define ixSINK_DESCRIPTION1 0x0006
+#define ixSINK_DESCRIPTION2 0x0007
+#define ixSINK_DESCRIPTION3 0x0008
+#define ixSINK_DESCRIPTION4 0x0009
+#define ixSINK_DESCRIPTION5 0x000a
+#define ixSINK_DESCRIPTION6 0x000b
+#define ixSINK_DESCRIPTION7 0x000c
+#define ixSINK_DESCRIPTION8 0x000d
+#define ixSINK_DESCRIPTION9 0x000e
+#define ixSINK_DESCRIPTION10 0x000f
+#define ixSINK_DESCRIPTION11 0x0010
+#define ixSINK_DESCRIPTION12 0x0011
+#define ixSINK_DESCRIPTION13 0x0012
+#define ixSINK_DESCRIPTION14 0x0013
+#define ixSINK_DESCRIPTION15 0x0014
+#define ixSINK_DESCRIPTION16 0x0015
+#define ixSINK_DESCRIPTION17 0x0016
+
+
+// addressBlock: azf0controller_azinputcrc0resultind
+// base address: 0x0
+#define ixAZALIA_INPUT_CRC0_CHANNEL0 0x0000
+#define ixAZALIA_INPUT_CRC0_CHANNEL1 0x0001
+#define ixAZALIA_INPUT_CRC0_CHANNEL2 0x0002
+#define ixAZALIA_INPUT_CRC0_CHANNEL3 0x0003
+#define ixAZALIA_INPUT_CRC0_CHANNEL4 0x0004
+#define ixAZALIA_INPUT_CRC0_CHANNEL5 0x0005
+#define ixAZALIA_INPUT_CRC0_CHANNEL6 0x0006
+#define ixAZALIA_INPUT_CRC0_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azinputcrc1resultind
+// base address: 0x0
+#define ixAZALIA_INPUT_CRC1_CHANNEL0 0x0000
+#define ixAZALIA_INPUT_CRC1_CHANNEL1 0x0001
+#define ixAZALIA_INPUT_CRC1_CHANNEL2 0x0002
+#define ixAZALIA_INPUT_CRC1_CHANNEL3 0x0003
+#define ixAZALIA_INPUT_CRC1_CHANNEL4 0x0004
+#define ixAZALIA_INPUT_CRC1_CHANNEL5 0x0005
+#define ixAZALIA_INPUT_CRC1_CHANNEL6 0x0006
+#define ixAZALIA_INPUT_CRC1_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azcrc0resultind
+// base address: 0x0
+#define ixAZALIA_CRC0_CHANNEL0 0x0000
+#define ixAZALIA_CRC0_CHANNEL1 0x0001
+#define ixAZALIA_CRC0_CHANNEL2 0x0002
+#define ixAZALIA_CRC0_CHANNEL3 0x0003
+#define ixAZALIA_CRC0_CHANNEL4 0x0004
+#define ixAZALIA_CRC0_CHANNEL5 0x0005
+#define ixAZALIA_CRC0_CHANNEL6 0x0006
+#define ixAZALIA_CRC0_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azcrc1resultind
+// base address: 0x0
+#define ixAZALIA_CRC1_CHANNEL0 0x0000
+#define ixAZALIA_CRC1_CHANNEL1 0x0001
+#define ixAZALIA_CRC1_CHANNEL2 0x0002
+#define ixAZALIA_CRC1_CHANNEL3 0x0003
+#define ixAZALIA_CRC1_CHANNEL4 0x0004
+#define ixAZALIA_CRC1_CHANNEL5 0x0005
+#define ixAZALIA_CRC1_CHANNEL6 0x0006
+#define ixAZALIA_CRC1_CHANNEL7 0x0007
+
+
+// addressBlock: azinputendpoint_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x6200
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x6706
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x670d
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x6f09
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x6f0a
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x6f0b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x7707
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x7708
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE 0x7709
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x771c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x771d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x771e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x771f
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x7771
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE 0x7777
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE 0x7778
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE 0x7779
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE 0x777a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR 0x777c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x7785
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x7786
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x7787
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x7788
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x7798
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB 0x7799
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x779a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x779b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x779c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L 0x779d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H 0x779e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x7f09
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x7f0c
+
+
+// addressBlock: azroot_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0f00
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID 0x0f02
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT 0x0f04
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE 0x1705
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x1720
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2 0x1721
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3 0x1722
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4 0x1723
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x1770
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET 0x17ff
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT 0x1f04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x1f05
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x1f0a
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x1f0b
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x1f0f
+
+
+// addressBlock: azf0stream0_streamind
+// base address: 0x0
+#define ixAZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream1_streamind
+// base address: 0x0
+#define ixAZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream2_streamind
+// base address: 0x0
+#define ixAZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream3_streamind
+// base address: 0x0
+#define ixAZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream4_streamind
+// base address: 0x0
+#define ixAZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream5_streamind
+// base address: 0x0
+#define ixAZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream6_streamind
+// base address: 0x0
+#define ixAZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream7_streamind
+// base address: 0x0
+#define ixAZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream8_streamind
+// base address: 0x0
+#define ixAZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream9_streamind
+// base address: 0x0
+#define ixAZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream10_streamind
+// base address: 0x0
+#define ixAZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream11_streamind
+// base address: 0x0
+#define ixAZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream12_streamind
+// base address: 0x0
+#define ixAZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream13_streamind
+// base address: 0x0
+#define ixAZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream14_streamind
+// base address: 0x0
+#define ixAZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream15_streamind
+// base address: 0x0
+#define ixAZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0endpoint0_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint1_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint2_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint3_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint4_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint5_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint6_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint7_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint2_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint3_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint4_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint5_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint6_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint7_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h
new file mode 100644
index 000000000000..3ffb6ec8b40a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h
@@ -0,0 +1,61940 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+
+#ifndef _dcn_3_6_0_SH_MASK_HEADER
+#define _dcn_3_6_0_SH_MASK_HEADER
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+//GLOBAL_CAPABILITIES
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED__SHIFT 0x0
+#define GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED__SHIFT 0x3
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED__SHIFT 0x8
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED__SHIFT 0xc
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED_MASK 0x0001L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x0006L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED_MASK 0x00F8L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED_MASK 0x0F00L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED_MASK 0xF000L
+//MINOR_VERSION
+#define MINOR_VERSION__MINOR_VERSION__SHIFT 0x0
+#define MINOR_VERSION__MINOR_VERSION_MASK 0xFFL
+//MAJOR_VERSION
+#define MAJOR_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define MAJOR_VERSION__MAJOR_VERSION_MASK 0xFFL
+//OUTPUT_PAYLOAD_CAPABILITY
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0xFFFFL
+//INPUT_PAYLOAD_CAPABILITY
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0xFFFFL
+//GLOBAL_CONTROL
+#define GLOBAL_CONTROL__CONTROLLER_RESET__SHIFT 0x0
+#define GLOBAL_CONTROL__FLUSH_CONTROL__SHIFT 0x1
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE__SHIFT 0x8
+#define GLOBAL_CONTROL__CONTROLLER_RESET_MASK 0x00000001L
+#define GLOBAL_CONTROL__FLUSH_CONTROL_MASK 0x00000002L
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE_MASK 0x00000100L
+//WAKE_ENABLE
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG__SHIFT 0x0
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG_MASK 0x0001L
+//STATE_CHANGE_STATUS
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS__SHIFT 0x0
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS_MASK 0x0001L
+//GLOBAL_STATUS
+#define GLOBAL_STATUS__FLUSH_STATUS__SHIFT 0x1
+#define GLOBAL_STATUS__FLUSH_STATUS_MASK 0x00000002L
+//OUTPUT_STREAM_PAYLOAD_CAPABILITY
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x0
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xFFFFL
+//INPUT_STREAM_PAYLOAD_CAPABILITY
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x0
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xFFFFL
+//INTERRUPT_CONTROL
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE__SHIFT 0x0
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE__SHIFT 0x1
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE__SHIFT 0x2
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE__SHIFT 0x3
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE__SHIFT 0x4
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE__SHIFT 0x5
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE__SHIFT 0x6
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE__SHIFT 0x7
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE__SHIFT 0x8
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE__SHIFT 0x9
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE__SHIFT 0xa
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE__SHIFT 0xb
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE__SHIFT 0xc
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE__SHIFT 0xd
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE__SHIFT 0xe
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE__SHIFT 0xf
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE__SHIFT 0x1e
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE__SHIFT 0x1f
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE_MASK 0x00000001L
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE_MASK 0x00000002L
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE_MASK 0x00000004L
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE_MASK 0x00000008L
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE_MASK 0x00000010L
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE_MASK 0x00000020L
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE_MASK 0x00000040L
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE_MASK 0x00000080L
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE_MASK 0x00000100L
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE_MASK 0x00000200L
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE_MASK 0x00000400L
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE_MASK 0x00000800L
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE_MASK 0x00001000L
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE_MASK 0x00002000L
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE_MASK 0x00004000L
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE_MASK 0x00008000L
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE_MASK 0x40000000L
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE_MASK 0x80000000L
+//INTERRUPT_STATUS
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS__SHIFT 0x0
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS__SHIFT 0x1
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS__SHIFT 0x2
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS__SHIFT 0x3
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS__SHIFT 0x4
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS__SHIFT 0x5
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS__SHIFT 0x6
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS__SHIFT 0x7
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS__SHIFT 0x8
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS__SHIFT 0x9
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS__SHIFT 0xa
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS__SHIFT 0xb
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS__SHIFT 0xc
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS__SHIFT 0xd
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS__SHIFT 0xe
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS__SHIFT 0xf
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS__SHIFT 0x1e
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS__SHIFT 0x1f
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS_MASK 0x00000001L
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS_MASK 0x00000002L
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS_MASK 0x00000004L
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS_MASK 0x00000008L
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS_MASK 0x00000010L
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS_MASK 0x00000020L
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS_MASK 0x00000040L
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS_MASK 0x00000080L
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS_MASK 0x00000100L
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS_MASK 0x00000200L
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS_MASK 0x00000400L
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS_MASK 0x00000800L
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS_MASK 0x00001000L
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS_MASK 0x00002000L
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS_MASK 0x00004000L
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS_MASK 0x00008000L
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS_MASK 0x40000000L
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS_MASK 0x80000000L
+//WALL_CLOCK_COUNTER
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER__SHIFT 0x0
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER_MASK 0xFFFFFFFFL
+//STREAM_SYNCHRONIZATION
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION__SHIFT 0x0
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION__SHIFT 0x1
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION__SHIFT 0x2
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION__SHIFT 0x3
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION__SHIFT 0x4
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION__SHIFT 0x5
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION__SHIFT 0x6
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION__SHIFT 0x7
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION__SHIFT 0x8
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION__SHIFT 0x9
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION__SHIFT 0xa
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION__SHIFT 0xb
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION__SHIFT 0xc
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION__SHIFT 0xd
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION__SHIFT 0xe
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION__SHIFT 0xf
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION_MASK 0x00000001L
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION_MASK 0x00000002L
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION_MASK 0x00000004L
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION_MASK 0x00000008L
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION_MASK 0x00000010L
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION_MASK 0x00000020L
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION_MASK 0x00000040L
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION_MASK 0x00000080L
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION_MASK 0x00000100L
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION_MASK 0x00000200L
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION_MASK 0x00000400L
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION_MASK 0x00000800L
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION_MASK 0x00001000L
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION_MASK 0x00002000L
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION_MASK 0x00004000L
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION_MASK 0x00008000L
+//CORB_LOWER_BASE_ADDRESS
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//CORB_UPPER_BASE_ADDRESS
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_CORB_WRITE_POINTER
+#define AZCONTROLLER0_CORB_WRITE_POINTER__CORB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_CORB_WRITE_POINTER__CORB_WRITE_POINTER_MASK 0x00FFL
+//AZCONTROLLER0_CORB_READ_POINTER
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_MASK 0x00FFL
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_RESET_MASK 0x8000L
+//AZCONTROLLER0_CORB_CONTROL
+#define AZCONTROLLER0_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE__SHIFT 0x0
+#define AZCONTROLLER0_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE__SHIFT 0x1
+#define AZCONTROLLER0_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE_MASK 0x01L
+#define AZCONTROLLER0_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE_MASK 0x02L
+//AZCONTROLLER0_CORB_STATUS
+#define AZCONTROLLER0_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION__SHIFT 0x0
+#define AZCONTROLLER0_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION_MASK 0x01L
+//AZCONTROLLER0_CORB_SIZE
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE__SHIFT 0x0
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_MASK 0x0003L
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_CAPABILITY_MASK 0x00F0L
+//AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//AZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS
+#define AZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_RIRB_WRITE_POINTER
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_MASK 0x00FFL
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET_MASK 0x8000L
+//AZCONTROLLER0_RESPONSE_INTERRUPT_COUNT
+#define AZCONTROLLER0_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT__SHIFT 0x0
+#define AZCONTROLLER0_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT_MASK 0x00FFL
+//AZCONTROLLER0_RIRB_CONTROL
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_CONTROL__RIRB_DMA_ENABLE__SHIFT 0x1
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL__SHIFT 0x2
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL_MASK 0x01L
+#define AZCONTROLLER0_RIRB_CONTROL__RIRB_DMA_ENABLE_MASK 0x02L
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL_MASK 0x04L
+//AZCONTROLLER0_RIRB_STATUS
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_INTERRUPT__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS__SHIFT 0x2
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_INTERRUPT_MASK 0x01L
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS_MASK 0x04L
+//AZCONTROLLER0_RIRB_SIZE
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_MASK 0x0003L
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_CAPABILITY_MASK 0x00F0L
+//AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS__SHIFT 0x1c
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD_MASK 0x0FFFFFFFL
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS_MASK 0xF0000000L
+//AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0000FFFFL
+//AZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE
+#define AZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID__SHIFT 0x1
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY_MASK 0x00000001L
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID_MASK 0x00000002L
+//AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE__SHIFT 0x0
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x1
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE_MASK 0x00000001L
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007EL
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//AZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS
+#define AZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS
+#define AZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS__SHIFT 0x0
+#define AZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+//AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+//AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+//AZCONTROLLER1_CORB_WRITE_POINTER
+#define AZCONTROLLER1_CORB_WRITE_POINTER__CORB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_CORB_WRITE_POINTER__CORB_WRITE_POINTER_MASK 0x00FFL
+//AZCONTROLLER1_CORB_READ_POINTER
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_MASK 0x00FFL
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_RESET_MASK 0x8000L
+//AZCONTROLLER1_CORB_CONTROL
+#define AZCONTROLLER1_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE__SHIFT 0x0
+#define AZCONTROLLER1_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE__SHIFT 0x1
+#define AZCONTROLLER1_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE_MASK 0x01L
+#define AZCONTROLLER1_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE_MASK 0x02L
+//AZCONTROLLER1_CORB_STATUS
+#define AZCONTROLLER1_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION__SHIFT 0x0
+#define AZCONTROLLER1_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION_MASK 0x01L
+//AZCONTROLLER1_CORB_SIZE
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE__SHIFT 0x0
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_MASK 0x0003L
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_CAPABILITY_MASK 0x00F0L
+//AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//AZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS
+#define AZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER1_RIRB_WRITE_POINTER
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_MASK 0x00FFL
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET_MASK 0x8000L
+//AZCONTROLLER1_RESPONSE_INTERRUPT_COUNT
+#define AZCONTROLLER1_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT__SHIFT 0x0
+#define AZCONTROLLER1_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT_MASK 0x00FFL
+//AZCONTROLLER1_RIRB_CONTROL
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_CONTROL__RIRB_DMA_ENABLE__SHIFT 0x1
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL__SHIFT 0x2
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL_MASK 0x01L
+#define AZCONTROLLER1_RIRB_CONTROL__RIRB_DMA_ENABLE_MASK 0x02L
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL_MASK 0x04L
+//AZCONTROLLER1_RIRB_STATUS
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_INTERRUPT__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS__SHIFT 0x2
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_INTERRUPT_MASK 0x01L
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS_MASK 0x04L
+//AZCONTROLLER1_RIRB_SIZE
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_MASK 0x0003L
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_CAPABILITY_MASK 0x00F0L
+//AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS__SHIFT 0x1c
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD_MASK 0x0FFFFFFFL
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS_MASK 0xF0000000L
+//AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0000FFFFL
+//AZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE
+#define AZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ_MASK 0xFFFFFFFFL
+//AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID__SHIFT 0x1
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY_MASK 0x00000001L
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID_MASK 0x00000002L
+//AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE__SHIFT 0x0
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x1
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE_MASK 0x00000001L
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007EL
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//AZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS
+#define AZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS
+#define AZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS__SHIFT 0x0
+#define AZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+//AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+//AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+//DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG__SHIFT 0x15
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG__SHIFT 0x16
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG_MASK 0x00200000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG_MASK 0x00400000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+//PHYPLLA_PIXCLK_RESYNC_CNTL
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x00000100L
+//PHYPLLB_PIXCLK_RESYNC_CNTL
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x00000100L
+//PHYPLLC_PIXCLK_RESYNC_CNTL
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE_MASK 0x00000100L
+//PHYPLLD_PIXCLK_RESYNC_CNTL
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE_MASK 0x00000100L
+//DP_DTO_DBUF_EN
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN__SHIFT 0x0
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN__SHIFT 0x1
+#define DP_DTO_DBUF_EN__DP_DTO2_DBUF_EN__SHIFT 0x2
+#define DP_DTO_DBUF_EN__DP_DTO3_DBUF_EN__SHIFT 0x3
+#define DP_DTO_DBUF_EN__DP_DTO4_DBUF_EN__SHIFT 0x4
+#define DP_DTO_DBUF_EN__DP_DTO5_DBUF_EN__SHIFT 0x5
+#define DP_DTO_DBUF_EN__DP_DTO6_DBUF_EN__SHIFT 0x6
+#define DP_DTO_DBUF_EN__DP_DTO7_DBUF_EN__SHIFT 0x7
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN_MASK 0x00000001L
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN_MASK 0x00000002L
+#define DP_DTO_DBUF_EN__DP_DTO2_DBUF_EN_MASK 0x00000004L
+#define DP_DTO_DBUF_EN__DP_DTO3_DBUF_EN_MASK 0x00000008L
+#define DP_DTO_DBUF_EN__DP_DTO4_DBUF_EN_MASK 0x00000010L
+#define DP_DTO_DBUF_EN__DP_DTO5_DBUF_EN_MASK 0x00000020L
+#define DP_DTO_DBUF_EN__DP_DTO6_DBUF_EN_MASK 0x00000040L
+#define DP_DTO_DBUF_EN__DP_DTO7_DBUF_EN_MASK 0x00000080L
+//DSCCLK3_DTO_PARAM
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_PHASE__SHIFT 0x0
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_MODULO__SHIFT 0x10
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_MODULO_MASK 0x00FF0000L
+//DPREFCLK_CGTT_BLK_CTRL_REG
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_GATE_DISABLE_CNTL4
+#define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE_MASK 0x00800000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE_MASK 0x04000000L
+//DPSTREAMCLK_CNTL
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN__SHIFT 0x3
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL__SHIFT 0x4
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_EN__SHIFT 0x7
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_SRC_SEL__SHIFT 0x8
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_EN__SHIFT 0xb
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_SRC_SEL__SHIFT 0xc
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_EN__SHIFT 0xf
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL_MASK 0x00000007L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN_MASK 0x00000008L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL_MASK 0x00000070L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_EN_MASK 0x00000080L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_SRC_SEL_MASK 0x00000700L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_EN_MASK 0x00000800L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_SRC_SEL_MASK 0x00007000L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_EN_MASK 0x00008000L
+//REFCLK_CGTT_BLK_CTRL_REG
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//PHYPLLE_PIXCLK_RESYNC_CNTL
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE_MASK 0x00000100L
+//DCCG_PERFMON_CNTL2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE__SHIFT 0x5
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE__SHIFT 0x6
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE__SHIFT 0x7
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE__SHIFT 0x8
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DTBCLK0_ENABLE__SHIFT 0x9
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE_MASK 0x00000100L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DTBCLK0_ENABLE_MASK 0x00000200L
+//DCCG_GLOBAL_FGCG_REP_CNTL
+#define DCCG_GLOBAL_FGCG_REP_CNTL__DCCG_GLOBAL_FGCG_REP_DIS__SHIFT 0x0
+#define DCCG_GLOBAL_FGCG_REP_CNTL__DCCG_GLOBAL_FGCG_REP_DIS_MASK 0x00000001L
+//DCCG_DS_DTO_INCR
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_DS_DTO_MODULO
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_DS_CNTL
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x00000001L
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x00000030L
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x00000100L
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x00000200L
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x00030000L
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x01000000L
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x02000000L
+//DCCG_DS_HW_CAL_INTERVAL
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xFFFFFFFFL
+//DPREFCLK_CNTL
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x00000007L
+//DCE_VERSION
+#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8
+#define DCE_VERSION__MAJOR_VERSION_MASK 0x000000FFL
+#define DCE_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+//DCCG_GTC_CNTL
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L
+//DCCG_GTC_DTO_INCR
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_GTC_DTO_MODULO
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_GTC_CURRENT
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xFFFFFFFFL
+//SYMCLK32_SE_CNTL
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_SRC_SEL__SHIFT 0x0
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_EN__SHIFT 0x3
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_SRC_SEL__SHIFT 0x4
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_EN__SHIFT 0x7
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_SRC_SEL__SHIFT 0x8
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_EN__SHIFT 0xb
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_SRC_SEL__SHIFT 0xc
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_EN__SHIFT 0xf
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_SRC_SEL_MASK 0x00000007L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_EN_MASK 0x00000008L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_SRC_SEL_MASK 0x00000070L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_EN_MASK 0x00000080L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_SRC_SEL_MASK 0x00000700L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_EN_MASK 0x00000800L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_SRC_SEL_MASK 0x00007000L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_EN_MASK 0x00008000L
+//SYMCLK32_LE_CNTL
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_SRC_SEL__SHIFT 0x0
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_EN__SHIFT 0x3
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_SRC_SEL__SHIFT 0x4
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_EN__SHIFT 0x7
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_SRC_SEL_MASK 0x00000007L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_EN_MASK 0x00000008L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_SRC_SEL_MASK 0x00000070L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_EN_MASK 0x00000080L
+//DTBCLK_P_CNTL
+#define DTBCLK_P_CNTL__DTBCLK_P0_SRC_SEL__SHIFT 0x0
+#define DTBCLK_P_CNTL__DTBCLK_P0_EN__SHIFT 0x2
+#define DTBCLK_P_CNTL__DTBCLK_P1_SRC_SEL__SHIFT 0x3
+#define DTBCLK_P_CNTL__DTBCLK_P1_EN__SHIFT 0x5
+#define DTBCLK_P_CNTL__DTBCLK_P2_SRC_SEL__SHIFT 0x6
+#define DTBCLK_P_CNTL__DTBCLK_P2_EN__SHIFT 0x8
+#define DTBCLK_P_CNTL__DTBCLK_P3_SRC_SEL__SHIFT 0x9
+#define DTBCLK_P_CNTL__DTBCLK_P3_EN__SHIFT 0xb
+#define DTBCLK_P_CNTL__DTBCLK_P0_SRC_SEL_MASK 0x00000003L
+#define DTBCLK_P_CNTL__DTBCLK_P0_EN_MASK 0x00000004L
+#define DTBCLK_P_CNTL__DTBCLK_P1_SRC_SEL_MASK 0x00000018L
+#define DTBCLK_P_CNTL__DTBCLK_P1_EN_MASK 0x00000020L
+#define DTBCLK_P_CNTL__DTBCLK_P2_SRC_SEL_MASK 0x000000C0L
+#define DTBCLK_P_CNTL__DTBCLK_P2_EN_MASK 0x00000100L
+#define DTBCLK_P_CNTL__DTBCLK_P3_SRC_SEL_MASK 0x00000600L
+#define DTBCLK_P_CNTL__DTBCLK_P3_EN_MASK 0x00000800L
+//DCCG_GATE_DISABLE_CNTL5
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P0_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P1_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P2_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P3_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_ROOT_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_GATE_DISABLE__SHIFT 0x7
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_ROOT_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_ROOT_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_ROOT_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_ROOT_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_ROOT_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_ROOT_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_ROOT_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_ROOT_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_FE_ROOT_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_FE_ROOT_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_FE_ROOT_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_FE_ROOT_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_FE_ROOT_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P0_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P1_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P2_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P3_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_ROOT_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_GATE_DISABLE_MASK 0x00000080L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_ROOT_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_ROOT_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_ROOT_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_ROOT_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_ROOT_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_ROOT_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_ROOT_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_ROOT_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_FE_ROOT_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_FE_ROOT_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_FE_ROOT_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_FE_ROOT_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_FE_ROOT_GATE_DISABLE_MASK 0x10000000L
+//DSCCLK0_DTO_PARAM
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_PHASE__SHIFT 0x0
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_MODULO__SHIFT 0x10
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_MODULO_MASK 0x00FF0000L
+//DSCCLK1_DTO_PARAM
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_PHASE__SHIFT 0x0
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_MODULO__SHIFT 0x10
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_MODULO_MASK 0x00FF0000L
+//DSCCLK2_DTO_PARAM
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_PHASE__SHIFT 0x0
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_MODULO__SHIFT 0x10
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_MODULO_MASK 0x00FF0000L
+//OTG_PIXEL_RATE_DIV
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK1__SHIFT 0x0
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK2__SHIFT 0x1
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK1__SHIFT 0x3
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK2__SHIFT 0x4
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK1__SHIFT 0x6
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK2__SHIFT 0x7
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK1__SHIFT 0x9
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK2__SHIFT 0xa
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK1_MASK 0x00000001L
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK2_MASK 0x00000006L
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK1_MASK 0x00000008L
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK2_MASK 0x00000030L
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK1_MASK 0x00000040L
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK2_MASK 0x00000180L
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK1_MASK 0x00000200L
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK2_MASK 0x00000C00L
+//MILLISECOND_TIME_BASE_DIV
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001FFFFL
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DISPCLK_FREQ_CHANGE_CNTL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003FFFL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000F0000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0E000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L
+//DC_MEM_GLOBAL_PWR_REQ_CNTL
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+//DCCG_PERFMON_CNTL
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL__SHIFT 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL_MASK 0x00000700L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xFFFFF800L
+//DCCG_GATE_DISABLE_CNTL
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000L
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000L
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000L
+//DISPCLK_CGTT_BLK_CTRL_REG
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//SOCCLK_CGTT_BLK_CTRL_REG
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_CAC_STATUS
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xFFFFFFFFL
+//MICROSECOND_TIME_BASE_DIV
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007F00L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DCCG_GATE_DISABLE_CNTL2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK0_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK1_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK2_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK3_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK4_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK5_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL2__PHYASYMCLK_ROOT_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL2__PHYBSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL2__PHYCSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL2__PHYDSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL2__PHYESYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK0_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK1_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK2_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK3_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK4_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK5_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x00010000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYASYMCLK_ROOT_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYBSYMCLK_ROOT_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYCSYMCLK_ROOT_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYDSYMCLK_ROOT_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYESYMCLK_ROOT_GATE_DISABLE_MASK 0x10000000L
+//SYMCLK_CGTT_BLK_CTRL_REG
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_DISP_CNTL_REG
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x00000100L
+//OTG0_PIXEL_RATE_CNTL
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PIXEL_RATE_CNTL__DTBCLK_DTO0_ENABLE__SHIFT 0x3
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5
+#define OTG0_PIXEL_RATE_CNTL__DTBCLKDTO0_ENABLE_STATUS__SHIFT 0x6
+#define OTG0_PIXEL_RATE_CNTL__DPDTO0_ENABLE_STATUS__SHIFT 0x7
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL__SHIFT 0x8
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL__SHIFT 0x9
+#define OTG0_PIXEL_RATE_CNTL__PIPE0_DTO_SRC_SEL__SHIFT 0xc
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG0_PIXEL_RATE_CNTL__DTBCLK_DTO0_ENABLE_MASK 0x00000008L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x00000020L
+#define OTG0_PIXEL_RATE_CNTL__DTBCLKDTO0_ENABLE_STATUS_MASK 0x00000040L
+#define OTG0_PIXEL_RATE_CNTL__DPDTO0_ENABLE_STATUS_MASK 0x00000080L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL_MASK 0x00000100L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL_MASK 0x00000200L
+#define OTG0_PIXEL_RATE_CNTL__PIPE0_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO0_PHASE
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO0_MODULO
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xFFFFFFFFL
+//OTG0_PHYPLL_PIXEL_RATE_CNTL
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG1_PIXEL_RATE_CNTL
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PIXEL_RATE_CNTL__DTBCLK_DTO1_ENABLE__SHIFT 0x3
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5
+#define OTG1_PIXEL_RATE_CNTL__DTBCLKDTO1_ENABLE_STATUS__SHIFT 0x6
+#define OTG1_PIXEL_RATE_CNTL__DPDTO1_ENABLE_STATUS__SHIFT 0x7
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL__SHIFT 0x8
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL__SHIFT 0x9
+#define OTG1_PIXEL_RATE_CNTL__PIPE1_DTO_SRC_SEL__SHIFT 0xc
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG1_PIXEL_RATE_CNTL__DTBCLK_DTO1_ENABLE_MASK 0x00000008L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x00000020L
+#define OTG1_PIXEL_RATE_CNTL__DTBCLKDTO1_ENABLE_STATUS_MASK 0x00000040L
+#define OTG1_PIXEL_RATE_CNTL__DPDTO1_ENABLE_STATUS_MASK 0x00000080L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL_MASK 0x00000100L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL_MASK 0x00000200L
+#define OTG1_PIXEL_RATE_CNTL__PIPE1_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO1_PHASE
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO1_MODULO
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xFFFFFFFFL
+//OTG1_PHYPLL_PIXEL_RATE_CNTL
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG2_PIXEL_RATE_CNTL
+#define OTG2_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG2_PIXEL_RATE_CNTL__DTBCLK_DTO2_ENABLE__SHIFT 0x3
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE__SHIFT 0x4
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE__SHIFT 0x5
+#define OTG2_PIXEL_RATE_CNTL__DTBCLKDTO2_ENABLE_STATUS__SHIFT 0x6
+#define OTG2_PIXEL_RATE_CNTL__DPDTO2_ENABLE_STATUS__SHIFT 0x7
+#define OTG2_PIXEL_RATE_CNTL__OTG2_ADD_PIXEL__SHIFT 0x8
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DROP_PIXEL__SHIFT 0x9
+#define OTG2_PIXEL_RATE_CNTL__PIPE2_DTO_SRC_SEL__SHIFT 0xc
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG2_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG2_PIXEL_RATE_CNTL__DTBCLK_DTO2_ENABLE_MASK 0x00000008L
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE_MASK 0x00000010L
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE_MASK 0x00000020L
+#define OTG2_PIXEL_RATE_CNTL__DTBCLKDTO2_ENABLE_STATUS_MASK 0x00000040L
+#define OTG2_PIXEL_RATE_CNTL__DPDTO2_ENABLE_STATUS_MASK 0x00000080L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_ADD_PIXEL_MASK 0x00000100L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DROP_PIXEL_MASK 0x00000200L
+#define OTG2_PIXEL_RATE_CNTL__PIPE2_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO2_PHASE
+#define DP_DTO2_PHASE__DP_DTO2_PHASE__SHIFT 0x0
+#define DP_DTO2_PHASE__DP_DTO2_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO2_MODULO
+#define DP_DTO2_MODULO__DP_DTO2_MODULO__SHIFT 0x0
+#define DP_DTO2_MODULO__DP_DTO2_MODULO_MASK 0xFFFFFFFFL
+//OTG2_PHYPLL_PIXEL_RATE_CNTL
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG3_PIXEL_RATE_CNTL
+#define OTG3_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG3_PIXEL_RATE_CNTL__DTBCLK_DTO3_ENABLE__SHIFT 0x3
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE__SHIFT 0x4
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE__SHIFT 0x5
+#define OTG3_PIXEL_RATE_CNTL__DTBCLKDTO3_ENABLE_STATUS__SHIFT 0x6
+#define OTG3_PIXEL_RATE_CNTL__DPDTO3_ENABLE_STATUS__SHIFT 0x7
+#define OTG3_PIXEL_RATE_CNTL__OTG3_ADD_PIXEL__SHIFT 0x8
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DROP_PIXEL__SHIFT 0x9
+#define OTG3_PIXEL_RATE_CNTL__PIPE3_DTO_SRC_SEL__SHIFT 0xc
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG3_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG3_PIXEL_RATE_CNTL__DTBCLK_DTO3_ENABLE_MASK 0x00000008L
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE_MASK 0x00000010L
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE_MASK 0x00000020L
+#define OTG3_PIXEL_RATE_CNTL__DTBCLKDTO3_ENABLE_STATUS_MASK 0x00000040L
+#define OTG3_PIXEL_RATE_CNTL__DPDTO3_ENABLE_STATUS_MASK 0x00000080L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_ADD_PIXEL_MASK 0x00000100L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DROP_PIXEL_MASK 0x00000200L
+#define OTG3_PIXEL_RATE_CNTL__PIPE3_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO3_PHASE
+#define DP_DTO3_PHASE__DP_DTO3_PHASE__SHIFT 0x0
+#define DP_DTO3_PHASE__DP_DTO3_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO3_MODULO
+#define DP_DTO3_MODULO__DP_DTO3_MODULO__SHIFT 0x0
+#define DP_DTO3_MODULO__DP_DTO3_MODULO_MASK 0xFFFFFFFFL
+//OTG3_PHYPLL_PIXEL_RATE_CNTL
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//DPPCLK_CGTT_BLK_CTRL_REG
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DPPCLK0_DTO_PARAM
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE__SHIFT 0x0
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO__SHIFT 0x10
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK1_DTO_PARAM
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE__SHIFT 0x0
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO__SHIFT 0x10
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK2_DTO_PARAM
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE__SHIFT 0x0
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO__SHIFT 0x10
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK3_DTO_PARAM
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE__SHIFT 0x0
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO__SHIFT 0x10
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO_MASK 0x00FF0000L
+//DCCG_CAC_STATUS2
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2__SHIFT 0x0
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2_MASK 0x0007FFFFL
+//SYMCLKA_CLOCK_ENABLE
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_EN__SHIFT 0x4
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_SRC_SEL__SHIFT 0x5
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_EN_MASK 0x00000010L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_SRC_SEL_MASK 0x00000700L
+//SYMCLKB_CLOCK_ENABLE
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_EN__SHIFT 0x4
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_SRC_SEL__SHIFT 0x5
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_EN_MASK 0x00000010L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_SRC_SEL_MASK 0x00000700L
+//SYMCLKC_CLOCK_ENABLE
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_EN__SHIFT 0x4
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_SRC_SEL__SHIFT 0x5
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_EN_MASK 0x00000010L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_SRC_SEL_MASK 0x00000700L
+//SYMCLKD_CLOCK_ENABLE
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_EN__SHIFT 0x4
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_SRC_SEL__SHIFT 0x5
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_EN_MASK 0x00000010L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_SRC_SEL_MASK 0x00000700L
+//SYMCLKE_CLOCK_ENABLE
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_EN__SHIFT 0x4
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_SRC_SEL__SHIFT 0x5
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_EN_MASK 0x00000010L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_SRC_SEL_MASK 0x00000700L
+//DCCG_SOFT_RESET
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET__SHIFT 0x0
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO__SHIFT 0x2
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST__SHIFT 0x3
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET__SHIFT 0x4
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET__SHIFT 0x8
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET__SHIFT 0xc
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET__SHIFT 0xd
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET__SHIFT 0xe
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET__SHIFT 0xf
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET__SHIFT 0x10
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET__SHIFT 0x11
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET__SHIFT 0x12
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET__SHIFT 0x13
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET__SHIFT 0x14
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET__SHIFT 0x15
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET_MASK 0x00000001L
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO_MASK 0x00000004L
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST_MASK 0x00000008L
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET_MASK 0x00000010L
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET_MASK 0x00000100L
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET_MASK 0x00001000L
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET_MASK 0x00002000L
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET_MASK 0x00004000L
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET_MASK 0x00008000L
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET_MASK 0x00010000L
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET_MASK 0x00020000L
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET_MASK 0x00040000L
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET_MASK 0x00080000L
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET_MASK 0x00100000L
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET_MASK 0x00200000L
+//DSCCLK_DTO_CTRL
+#define DSCCLK_DTO_CTRL__DSCCLK0_EN__SHIFT 0x0
+#define DSCCLK_DTO_CTRL__DSCCLK1_EN__SHIFT 0x1
+#define DSCCLK_DTO_CTRL__DSCCLK2_EN__SHIFT 0x2
+#define DSCCLK_DTO_CTRL__DSCCLK3_EN__SHIFT 0x3
+#define DSCCLK_DTO_CTRL__DSCCLK4_EN__SHIFT 0x4
+#define DSCCLK_DTO_CTRL__DSCCLK5_EN__SHIFT 0x5
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_DB_EN__SHIFT 0x8
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_DB_EN__SHIFT 0x9
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_DB_EN__SHIFT 0xa
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_DB_EN__SHIFT 0xb
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_DB_EN__SHIFT 0xc
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_DB_EN__SHIFT 0xd
+#define DSCCLK_DTO_CTRL__DSCCLK0_EN_MASK 0x00000001L
+#define DSCCLK_DTO_CTRL__DSCCLK1_EN_MASK 0x00000002L
+#define DSCCLK_DTO_CTRL__DSCCLK2_EN_MASK 0x00000004L
+#define DSCCLK_DTO_CTRL__DSCCLK3_EN_MASK 0x00000008L
+#define DSCCLK_DTO_CTRL__DSCCLK4_EN_MASK 0x00000010L
+#define DSCCLK_DTO_CTRL__DSCCLK5_EN_MASK 0x00000020L
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_DB_EN_MASK 0x00000100L
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_DB_EN_MASK 0x00000200L
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_DB_EN_MASK 0x00000400L
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_DB_EN_MASK 0x00000800L
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_DB_EN_MASK 0x00001000L
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_DB_EN_MASK 0x00002000L
+//DPPCLK_CTRL
+#define DPPCLK_CTRL__DPPCLK0_EN__SHIFT 0x0
+#define DPPCLK_CTRL__DPPCLK1_EN__SHIFT 0x3
+#define DPPCLK_CTRL__DPPCLK2_EN__SHIFT 0x6
+#define DPPCLK_CTRL__DPPCLK3_EN__SHIFT 0x9
+#define DPPCLK_CTRL__DPPCLK0_EN_MASK 0x00000001L
+#define DPPCLK_CTRL__DPPCLK1_EN_MASK 0x00000008L
+#define DPPCLK_CTRL__DPPCLK2_EN_MASK 0x00000040L
+#define DPPCLK_CTRL__DPPCLK3_EN_MASK 0x00000200L
+//DCCG_GATE_DISABLE_CNTL6
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK0_ROOT_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK1_ROOT_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK2_ROOT_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK3_ROOT_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK0_ROOT_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK1_ROOT_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK2_ROOT_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK3_ROOT_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL6__HDMISTREAMCLK0_ROOT_GATE_DISABLE__SHIFT 0xf
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK0_ROOT_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK1_ROOT_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK2_ROOT_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK3_ROOT_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK0_ROOT_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK1_ROOT_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK2_ROOT_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK3_ROOT_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL6__HDMISTREAMCLK0_ROOT_GATE_DISABLE_MASK 0x00008000L
+//SYMCLK_PSP_CNTL
+#define SYMCLK_PSP_CNTL__SYMCLK_PSP_FORCE_ON__SHIFT 0x0
+#define SYMCLK_PSP_CNTL__SYMCLK_PSP_FORCE_ON_MASK 0x00000001L
+//DCCG_AUDIO_DTO_SOURCE
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTBCLK_DTO_USE_512FBR_DTO__SHIFT 0x1d
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000070L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x00100000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x01000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTBCLK_DTO_USE_512FBR_DTO_MASK 0x20000000L
+//DCCG_AUDIO_DTO0_PHASE
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO0_MODULE
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_PHASE
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_MODULE
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG0_LATCH_VALUE
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG1_LATCH_VALUE
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG2_LATCH_VALUE
+#define DCCG_VSYNC_OTG2_LATCH_VALUE__DCCG_VSYNC_CNT_OTG2_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG2_LATCH_VALUE__DCCG_VSYNC_CNT_OTG2_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG3_LATCH_VALUE
+#define DCCG_VSYNC_OTG3_LATCH_VALUE__DCCG_VSYNC_CNT_OTG3_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG3_LATCH_VALUE__DCCG_VSYNC_CNT_OTG3_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG4_LATCH_VALUE
+#define DCCG_VSYNC_OTG4_LATCH_VALUE__DCCG_VSYNC_CNT_OTG4_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG4_LATCH_VALUE__DCCG_VSYNC_CNT_OTG4_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG5_LATCH_VALUE
+#define DCCG_VSYNC_OTG5_LATCH_VALUE__DCCG_VSYNC_CNT_OTG5_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG5_LATCH_VALUE__DCCG_VSYNC_CNT_OTG5_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DPPCLK_DTO_CTRL
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN__SHIFT 0x1
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN__SHIFT 0x5
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN__SHIFT 0x9
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN__SHIFT 0xd
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_DB_EN__SHIFT 0x11
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_DB_EN__SHIFT 0x15
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN_MASK 0x00000002L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN_MASK 0x00000020L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN_MASK 0x00000200L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN_MASK 0x00002000L
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_DB_EN_MASK 0x00020000L
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_DB_EN_MASK 0x00200000L
+//DCCG_VSYNC_CNT_CTRL
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE__SHIFT 0x0
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET__SHIFT 0x2
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL__SHIFT 0x3
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL__SHIFT 0x4
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT__SHIFT 0x8
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN__SHIFT 0x10
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN__SHIFT 0x11
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_LATCH_EN__SHIFT 0x12
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_LATCH_EN__SHIFT 0x13
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_LATCH_EN__SHIFT 0x14
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_LATCH_EN__SHIFT 0x15
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL__SHIFT 0x18
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL__SHIFT 0x19
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_VSYNC_TRIG_SEL__SHIFT 0x1a
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_VSYNC_TRIG_SEL__SHIFT 0x1b
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_VSYNC_TRIG_SEL__SHIFT 0x1c
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_VSYNC_TRIG_SEL__SHIFT 0x1d
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL_MASK 0x000000F0L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT_MASK 0x00000F00L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN_MASK 0x00010000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN_MASK 0x00020000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_LATCH_EN_MASK 0x00040000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_LATCH_EN_MASK 0x00080000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_LATCH_EN_MASK 0x00100000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_LATCH_EN_MASK 0x00200000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL_MASK 0x01000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL_MASK 0x02000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_VSYNC_TRIG_SEL_MASK 0x04000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_VSYNC_TRIG_SEL_MASK 0x08000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_VSYNC_TRIG_SEL_MASK 0x10000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_VSYNC_TRIG_SEL_MASK 0x20000000L
+//DCCG_VSYNC_CNT_INT_CTRL
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT__SHIFT 0x2
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_CLEAR__SHIFT 0x2
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT__SHIFT 0x3
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_CLEAR__SHIFT 0x3
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT__SHIFT 0x4
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_CLEAR__SHIFT 0x4
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT__SHIFT 0x5
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_CLEAR__SHIFT 0x5
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK__SHIFT 0x8
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK__SHIFT 0x9
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_MASK__SHIFT 0xa
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_MASK__SHIFT 0xb
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_MASK__SHIFT 0xc
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_MASK__SHIFT 0xd
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_CLEAR_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_MASK 0x00000010L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_CLEAR_MASK 0x00000010L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_MASK 0x00000020L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_CLEAR_MASK 0x00000020L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK_MASK 0x00000100L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK_MASK 0x00000200L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_MASK_MASK 0x00000400L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_MASK_MASK 0x00000800L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_MASK_MASK 0x00001000L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_MASK_MASK 0x00002000L
+//FORCE_SYMCLK_DISABLE
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKA_DISABLE__SHIFT 0x0
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKB_DISABLE__SHIFT 0x1
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKC_DISABLE__SHIFT 0x2
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKD_DISABLE__SHIFT 0x3
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKE_DISABLE__SHIFT 0x4
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKF_DISABLE__SHIFT 0x5
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKG_DISABLE__SHIFT 0x6
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKA_DISABLE_MASK 0x00000001L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKB_DISABLE_MASK 0x00000002L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKC_DISABLE_MASK 0x00000004L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKD_DISABLE_MASK 0x00000008L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKE_DISABLE_MASK 0x00000010L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKF_DISABLE_MASK 0x00000020L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKG_DISABLE_MASK 0x00000040L
+//DTBCLK_DTO0_PHASE
+#define DTBCLK_DTO0_PHASE__DTBCLK_DTO0_PHASE__SHIFT 0x0
+#define DTBCLK_DTO0_PHASE__DTBCLK_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DTBCLK_DTO1_PHASE
+#define DTBCLK_DTO1_PHASE__DTBCLK_DTO1_PHASE__SHIFT 0x0
+#define DTBCLK_DTO1_PHASE__DTBCLK_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DTBCLK_DTO2_PHASE
+#define DTBCLK_DTO2_PHASE__DTBCLK_DTO2_PHASE__SHIFT 0x0
+#define DTBCLK_DTO2_PHASE__DTBCLK_DTO2_PHASE_MASK 0xFFFFFFFFL
+//DTBCLK_DTO3_PHASE
+#define DTBCLK_DTO3_PHASE__DTBCLK_DTO3_PHASE__SHIFT 0x0
+#define DTBCLK_DTO3_PHASE__DTBCLK_DTO3_PHASE_MASK 0xFFFFFFFFL
+//DTBCLK_DTO0_MODULO
+#define DTBCLK_DTO0_MODULO__DTBCLK_DTO0_MODULO__SHIFT 0x0
+#define DTBCLK_DTO0_MODULO__DTBCLK_DTO0_MODULO_MASK 0xFFFFFFFFL
+//DTBCLK_DTO1_MODULO
+#define DTBCLK_DTO1_MODULO__DTBCLK_DTO1_MODULO__SHIFT 0x0
+#define DTBCLK_DTO1_MODULO__DTBCLK_DTO1_MODULO_MASK 0xFFFFFFFFL
+//DTBCLK_DTO2_MODULO
+#define DTBCLK_DTO2_MODULO__DTBCLK_DTO2_MODULO__SHIFT 0x0
+#define DTBCLK_DTO2_MODULO__DTBCLK_DTO2_MODULO_MASK 0xFFFFFFFFL
+//DTBCLK_DTO3_MODULO
+#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
+#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
+//PHYASYMCLK_CLOCK_CNTL
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_EN__SHIFT 0x0
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_EN_MASK 0x00000001L
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_SRC_SEL_MASK 0x00000030L
+//PHYBSYMCLK_CLOCK_CNTL
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_EN__SHIFT 0x0
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_EN_MASK 0x00000001L
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_SRC_SEL_MASK 0x00000030L
+//PHYCSYMCLK_CLOCK_CNTL
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_EN__SHIFT 0x0
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_EN_MASK 0x00000001L
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_SRC_SEL_MASK 0x00000030L
+//PHYDSYMCLK_CLOCK_CNTL
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_EN__SHIFT 0x0
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_EN_MASK 0x00000001L
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_SRC_SEL_MASK 0x00000030L
+//PHYESYMCLK_CLOCK_CNTL
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_EN__SHIFT 0x0
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_EN_MASK 0x00000001L
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_SRC_SEL_MASK 0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN__SHIFT 0x3
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000007L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN_MASK 0x00000008L
+//DCCG_GATE_DISABLE_CNTL3
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK2_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK3_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK4_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK5_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE0_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE0_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE1_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE1_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE2_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE2_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE3_GATE_DISABLE__SHIFT 0xe
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE3_GATE_DISABLE__SHIFT 0xf
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE0_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK2_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK3_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK4_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK5_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE0_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE0_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE1_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE1_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE2_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE2_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE3_GATE_DISABLE_MASK 0x00004000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE3_GATE_DISABLE_MASK 0x00008000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE0_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE_MASK 0x00800000L
+//HDMISTREAMCLK0_DTO_PARAM
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE__SHIFT 0x0
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO__SHIFT 0x8
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN__SHIFT 0x10
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE_MASK 0x000000FFL
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO_MASK 0x0000FF00L
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN_MASK 0x00010000L
+//DCCG_AUDIO_DTBCLK_DTO_PHASE
+#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTBCLK_DTO_MODULO
+#define DCCG_AUDIO_DTBCLK_DTO_MODULO__DCCG_AUDIO_DTBCLK_DTO_MODULO__SHIFT 0x0
+#define DCCG_AUDIO_DTBCLK_DTO_MODULO__DCCG_AUDIO_DTBCLK_DTO_MODULO_MASK 0xFFFFFFFFL
+//DTBCLK_DTO_DBUF_EN
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO0_DBUF_EN__SHIFT 0x0
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO1_DBUF_EN__SHIFT 0x1
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO2_DBUF_EN__SHIFT 0x2
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO3_DBUF_EN__SHIFT 0x3
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO0_DBUF_EN_MASK 0x00000001L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO1_DBUF_EN_MASK 0x00000002L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO2_DBUF_EN_MASK 0x00000004L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO3_DBUF_EN_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
+//DC_PERFMON0_PERFCOUNTER_CNTL
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFCOUNTER_CNTL2
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFCOUNTER_STATE
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON0_PERFMON_CNTL
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON0_PERFMON_CNTL2
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON0_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON0_PERFMON_CVALUE_LOW
+#define DC_PERFMON0_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON0_PERFMON_HI
+#define DC_PERFMON0_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON0_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFMON_LOW
+#define DC_PERFMON0_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon1_dc_perfmon_dispdec
+//DC_PERFMON1_PERFCOUNTER_CNTL
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFCOUNTER_CNTL2
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFCOUNTER_STATE
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON1_PERFMON_CNTL
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON1_PERFMON_CNTL2
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON1_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON1_PERFMON_CVALUE_LOW
+#define DC_PERFMON1_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON1_PERFMON_HI
+#define DC_PERFMON1_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON1_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFMON_LOW
+#define DC_PERFMON1_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+//DMCUB_RBBMIF_SEC_CNTL
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SEC_LVL__SHIFT 0x0
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_TRUST_LVL__SHIFT 0x4
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SOURCE_ID__SHIFT 0x8
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SEC_LVL_MASK 0x00000007L
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_TRUST_LVL_MASK 0x00000070L
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SOURCE_ID_MASK 0x01FFFF00L
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+//RBBMIF_TIMEOUT
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD__SHIFT 0x14
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY_MASK 0x000FFFFFL
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD_MASK 0xFFF00000L
+//RBBMIF_STATUS
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC__SHIFT 0x0
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC_MASK 0xFFFFFFFFL
+//RBBMIF_STATUS_2
+#define RBBMIF_STATUS_2__RBBMIF_TIMEOUT_CLIENTS_DEC_2__SHIFT 0x0
+#define RBBMIF_STATUS_2__RBBMIF_TIMEOUT_CLIENTS_DEC_2_MASK 0x0000007FL
+//RBBMIF_INT_STATUS
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ADDR__SHIFT 0x2
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_OP__SHIFT 0x1c
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS__SHIFT 0x1d
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ACK__SHIFT 0x1e
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_MASK__SHIFT 0x1f
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ADDR_MASK 0x0003FFFCL
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_OP_MASK 0x10000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS_MASK 0x20000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ACK_MASK 0x40000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_MASK_MASK 0x80000000L
+//RBBMIF_TIMEOUT_DIS
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS__SHIFT 0x7
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS__SHIFT 0x8
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS__SHIFT 0x9
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS__SHIFT 0xa
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS__SHIFT 0xb
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS__SHIFT 0xc
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS__SHIFT 0xd
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS__SHIFT 0xe
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS__SHIFT 0xf
+#define RBBMIF_TIMEOUT_DIS__CLIENT16_TIMEOUT_DIS__SHIFT 0x10
+#define RBBMIF_TIMEOUT_DIS__CLIENT17_TIMEOUT_DIS__SHIFT 0x11
+#define RBBMIF_TIMEOUT_DIS__CLIENT18_TIMEOUT_DIS__SHIFT 0x12
+#define RBBMIF_TIMEOUT_DIS__CLIENT19_TIMEOUT_DIS__SHIFT 0x13
+#define RBBMIF_TIMEOUT_DIS__CLIENT20_TIMEOUT_DIS__SHIFT 0x14
+#define RBBMIF_TIMEOUT_DIS__CLIENT21_TIMEOUT_DIS__SHIFT 0x15
+#define RBBMIF_TIMEOUT_DIS__CLIENT22_TIMEOUT_DIS__SHIFT 0x16
+#define RBBMIF_TIMEOUT_DIS__CLIENT23_TIMEOUT_DIS__SHIFT 0x17
+#define RBBMIF_TIMEOUT_DIS__CLIENT24_TIMEOUT_DIS__SHIFT 0x18
+#define RBBMIF_TIMEOUT_DIS__CLIENT25_TIMEOUT_DIS__SHIFT 0x19
+#define RBBMIF_TIMEOUT_DIS__CLIENT26_TIMEOUT_DIS__SHIFT 0x1a
+#define RBBMIF_TIMEOUT_DIS__CLIENT27_TIMEOUT_DIS__SHIFT 0x1b
+#define RBBMIF_TIMEOUT_DIS__CLIENT28_TIMEOUT_DIS__SHIFT 0x1c
+#define RBBMIF_TIMEOUT_DIS__CLIENT29_TIMEOUT_DIS__SHIFT 0x1d
+#define RBBMIF_TIMEOUT_DIS__CLIENT30_TIMEOUT_DIS__SHIFT 0x1e
+#define RBBMIF_TIMEOUT_DIS__CLIENT31_TIMEOUT_DIS__SHIFT 0x1f
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS_MASK 0x00000001L
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS_MASK 0x00000002L
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS_MASK 0x00000004L
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS_MASK 0x00000008L
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS_MASK 0x00000010L
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS_MASK 0x00000020L
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS_MASK 0x00000040L
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS_MASK 0x00000080L
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS_MASK 0x00000100L
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS_MASK 0x00000200L
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS_MASK 0x00000400L
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS_MASK 0x00000800L
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS_MASK 0x00001000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS_MASK 0x00002000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS_MASK 0x00004000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS_MASK 0x00008000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT16_TIMEOUT_DIS_MASK 0x00010000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT17_TIMEOUT_DIS_MASK 0x00020000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT18_TIMEOUT_DIS_MASK 0x00040000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT19_TIMEOUT_DIS_MASK 0x00080000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT20_TIMEOUT_DIS_MASK 0x00100000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT21_TIMEOUT_DIS_MASK 0x00200000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT22_TIMEOUT_DIS_MASK 0x00400000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT23_TIMEOUT_DIS_MASK 0x00800000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT24_TIMEOUT_DIS_MASK 0x01000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT25_TIMEOUT_DIS_MASK 0x02000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT26_TIMEOUT_DIS_MASK 0x04000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT27_TIMEOUT_DIS_MASK 0x08000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT28_TIMEOUT_DIS_MASK 0x10000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT29_TIMEOUT_DIS_MASK 0x20000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT30_TIMEOUT_DIS_MASK 0x40000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT31_TIMEOUT_DIS_MASK 0x80000000L
+//RBBMIF_TIMEOUT_DIS_2
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT32_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT33_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT34_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT35_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT36_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT37_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT38_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT32_TIMEOUT_DIS_MASK 0x00000001L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT33_TIMEOUT_DIS_MASK 0x00000002L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT34_TIMEOUT_DIS_MASK 0x00000004L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT35_TIMEOUT_DIS_MASK 0x00000008L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT36_TIMEOUT_DIS_MASK 0x00000010L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT37_TIMEOUT_DIS_MASK 0x00000020L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT38_TIMEOUT_DIS_MASK 0x00000040L
+//RBBMIF_STATUS_FLAG
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE__SHIFT 0x0
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT__SHIFT 0x4
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY__SHIFT 0x5
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL__SHIFT 0x6
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_FLAG__SHIFT 0x8
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_TYPE__SHIFT 0x9
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_ADDR__SHIFT 0x10
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE_MASK 0x00000003L
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT_MASK 0x00000010L
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY_MASK 0x00000020L
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL_MASK 0x00000040L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_FLAG_MASK 0x00000100L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_TYPE_MASK 0x00000E00L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_ADDR_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dmu_dmu_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON2_PERFCOUNTER_CNTL
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFCOUNTER_CNTL2
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFCOUNTER_STATE
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON2_PERFMON_CNTL
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON2_PERFMON_CNTL2
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON2_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON2_PERFMON_CVALUE_LOW
+#define DC_PERFMON2_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON2_PERFMON_HI
+#define DC_PERFMON2_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON2_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFMON_LOW
+#define DC_PERFMON2_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dmu_ihc_dispdec
+//DC_GPU_TIMER_START_POSITION_V_UPDATE
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_VSTARTUP
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D1_VSTARTUP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D2_VSTARTUP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D3_VSTARTUP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D4_VSTARTUP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D5_VSTARTUP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D6_VSTARTUP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D1_VSTARTUP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D2_VSTARTUP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D3_VSTARTUP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D4_VSTARTUP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D5_VSTARTUP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D6_VSTARTUP_MASK 0x00700000L
+//DC_GPU_TIMER_READ
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ__SHIFT 0x0
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ_MASK 0xFFFFFFFFL
+//DC_GPU_TIMER_READ_CNTL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT__SHIFT 0x0
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM__SHIFT 0x8
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM__SHIFT 0xb
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM__SHIFT 0xe
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM__SHIFT 0x11
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM__SHIFT 0x14
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM__SHIFT 0x17
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT_MASK 0x0000007FL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM_MASK 0x00000700L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM_MASK 0x00003800L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM_MASK 0x0001C000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM_MASK 0x000E0000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM_MASK 0x00700000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM_MASK 0x03800000L
+//DISP_INTERRUPT_STATUS
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS__DIO_ALPM_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS__DIO_ALPM_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE
+#define DISP_INTERRUPT_STATUS_CONTINUE__OPTC2_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE__OPTC2_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE2
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OPTC3_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OPTC3_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE3
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OPTC4_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_DATA_OVERFLOW_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OPTC4_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_DATA_OVERFLOW_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC5_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC6_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC5_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC6_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT0__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT0_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT1_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT2_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE6
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB0_IHIF_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB1_IHIF_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB0_IHIF_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB1_IHIF_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB2_IHIF_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB0_IHIF_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB1_IHIF_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB0_IHIF_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB1_IHIF_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB2_IHIF_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE7
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER0_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER1_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE8
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_DATA_OVERFLOW_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_DATA_OVERFLOW_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_DATA_OVERFLOW_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_DATA_OVERFLOW_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE10
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG0_LATCH_INT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG1_LATCH_INT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG2_LATCH_INT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG3_LATCH_INT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG4_LATCH_INT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG5_LATCH_INT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG1_IHC_DRR_TIMING_UPDATE__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG2_IHC_DRR_TIMING_UPDATE__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG3_IHC_DRR_TIMING_UPDATE__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG4_IHC_DRR_TIMING_UPDATE__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG5_IHC_DRR_TIMING_UPDATE__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG6_IHC_DRR_TIMING_UPDATE__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DISP_INTERRUPT_STATUS_CONTINUE11__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG0_LATCH_INT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG1_LATCH_INT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG2_LATCH_INT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG3_LATCH_INT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG4_LATCH_INT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG5_LATCH_INT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG1_IHC_DRR_TIMING_UPDATE_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG2_IHC_DRR_TIMING_UPDATE_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG3_IHC_DRR_TIMING_UPDATE_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG4_IHC_DRR_TIMING_UPDATE_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG5_IHC_DRR_TIMING_UPDATE_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG6_IHC_DRR_TIMING_UPDATE_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DISP_INTERRUPT_STATUS_CONTINUE11_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE11
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC0_STALL_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC1_STALL_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC2_STALL_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC3_STALL_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC4_STALL_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC5_STALL_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC6_STALL_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC7_STALL_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE11__DISP_INTERRUPT_STATUS_CONTINUE12__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC0_STALL_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC1_STALL_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC2_STALL_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC3_STALL_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC4_STALL_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC5_STALL_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC6_STALL_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC7_STALL_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__DISP_INTERRUPT_STATUS_CONTINUE12_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE12
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DISP_INTERRUPT_STATUS_CONTINUE13__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DISP_INTERRUPT_STATUS_CONTINUE13_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE13
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_VM_FAULT_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_TIMEOUT_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DISP_INTERRUPT_STATUS_CONTINUE14__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_VM_FAULT_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_TIMEOUT_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DISP_INTERRUPT_STATUS_CONTINUE14_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE14
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE14__DISP_INTERRUPT_STATUS_CONTINUE15__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__DISP_INTERRUPT_STATUS_CONTINUE15_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE15
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE15__DISP_INTERRUPT_STATUS_CONTINUE16__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__DISP_INTERRUPT_STATUS_CONTINUE16_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE16
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VBLANK_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VBLANK_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE2_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VBLANK_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VBLANK_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VBLANK_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE2_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_TIMEOUT_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_TIMEOUT_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE16__DISP_INTERRUPT_STATUS_CONTINUE17__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VBLANK_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE2_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VBLANK_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE2_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VBLANK_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VBLANK_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE2_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VBLANK_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE2_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_TIMEOUT_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_TIMEOUT_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_TIMEOUT_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_TIMEOUT_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_TIMEOUT_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__DISP_INTERRUPT_STATUS_CONTINUE17_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE17
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER0_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE17__DISP_INTERRUPT_STATUS_CONTINUE18__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_AWAY_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_AWAY_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_AWAY_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_AWAY_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__DISP_INTERRUPT_STATUS_CONTINUE18_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE18
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DISP_INTERRUPT_STATUS_CONTINUE19__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DISP_INTERRUPT_STATUS_CONTINUE19_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE19
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DISP_INTERRUPT_STATUS_CONTINUE20__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DISP_INTERRUPT_STATUS_CONTINUE20_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE20
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_CPU_SS_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_CPU_SS_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_CPU_SS_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_CPU_SS_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_CPU_SS_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_CPU_SS_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_V_UPDATE_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_V_UPDATE_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_V_UPDATE_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_V_UPDATE_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_V_UPDATE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_V_UPDATE_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VSTARTUP_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VSTARTUP_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VSTARTUP_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VSTARTUP_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VSTARTUP_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VSTARTUP_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VREADY_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VREADY_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VREADY_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VREADY_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VREADY_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VREADY_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE20__DISP_INTERRUPT_STATUS_CONTINUE21__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_CPU_SS_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_CPU_SS_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_CPU_SS_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_CPU_SS_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_CPU_SS_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_CPU_SS_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_V_UPDATE_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_V_UPDATE_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_V_UPDATE_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_V_UPDATE_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_V_UPDATE_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_V_UPDATE_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VSTARTUP_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VSTARTUP_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VSTARTUP_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VSTARTUP_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VSTARTUP_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VSTARTUP_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VREADY_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VREADY_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VREADY_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VREADY_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VREADY_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VREADY_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__DISP_INTERRUPT_STATUS_CONTINUE21_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE21
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC1_READ_REQUEST_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC2_READ_REQUEST_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC3_READ_REQUEST_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC4_READ_REQUEST_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC5_READ_REQUEST_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC6_READ_REQUEST_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_VGA_READ_REQUEST_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DISP_INTERRUPT_STATUS_CONTINUE22__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_VGA_READ_REQUEST_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DISP_INTERRUPT_STATUS_CONTINUE22_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE22
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DISP_INTERRUPT_STATUS_CONTINUE23__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DISP_INTERRUPT_STATUS_CONTINUE23_MASK 0x80000000L
+//DC_GPU_TIMER_START_POSITION_VREADY
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D1_VREADY__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D2_VREADY__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D3_VREADY__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D4_VREADY__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D5_VREADY__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D6_VREADY__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D1_VREADY_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D2_VREADY_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D3_VREADY_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D4_VREADY_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D5_VREADY_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D6_VREADY_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_FLIP
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D1_FLIP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D2_FLIP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D3_FLIP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D4_FLIP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D5_FLIP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D6_FLIP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D7_FLIP__SHIFT 0x18
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D8_FLIP__SHIFT 0x1c
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D1_FLIP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D2_FLIP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D3_FLIP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D4_FLIP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D5_FLIP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D6_FLIP_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D7_FLIP_MASK 0x07000000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D8_FLIP_MASK 0x70000000L
+//DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_NO_LOCK__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_NO_LOCK__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_NO_LOCK__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_NO_LOCK__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_NO_LOCK__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_NO_LOCK__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_NO_LOCK_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_NO_LOCK_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_NO_LOCK_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_NO_LOCK_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_NO_LOCK_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_NO_LOCK_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_FLIP_AWAY
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D1_FLIP_AWAY__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D2_FLIP_AWAY__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D3_FLIP_AWAY__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D4_FLIP_AWAY__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D5_FLIP_AWAY__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D6_FLIP_AWAY__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D7_FLIP_AWAY__SHIFT 0x18
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D8_FLIP_AWAY__SHIFT 0x1c
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D1_FLIP_AWAY_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D2_FLIP_AWAY_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D3_FLIP_AWAY_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D4_FLIP_AWAY_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D5_FLIP_AWAY_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D6_FLIP_AWAY_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D7_FLIP_AWAY_MASK 0x07000000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D8_FLIP_AWAY_MASK 0x70000000L
+//DISP_INTERRUPT_STATUS_CONTINUE23
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DISP_INTERRUPT_STATUS_CONTINUE24__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DISP_INTERRUPT_STATUS_CONTINUE24_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE24
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_HIGH_PRIORITY_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_LOW_PRIORITY_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_READY_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_DONE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_READY_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN0_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN1_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN2_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN3_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN4_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN5_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN6_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAOUT_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_UNDEFINED_ADDRESS_FAULT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DISP_INTERRUPT_STATUS_CONTINUE25__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_HIGH_PRIORITY_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_LOW_PRIORITY_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_READY_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_DONE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_READY_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_DONE_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_DONE_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN0_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN1_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN2_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN3_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN4_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN5_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN6_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAOUT_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_UNDEFINED_ADDRESS_FAULT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DISP_INTERRUPT_STATUS_CONTINUE25_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE25
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_CORE_ERROR_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_CORE_ERROR_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DPIA_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DMCUB_WHITELIST_INVALID_ACCESS_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE25__MMHUBBUB_WARMUP_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DPIA_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DMCUB_WHITELIST_INVALID_ACCESS_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER0_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER1_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__MMHUBBUB_WARMUP_INTERRUPT_MASK 0x40000000L
+//DCCG_INTERRUPT_DEST
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG0_LATCH_INT_DEST__SHIFT 0x0
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG1_LATCH_INT_DEST__SHIFT 0x1
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG2_LATCH_INT_DEST__SHIFT 0x2
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG3_LATCH_INT_DEST__SHIFT 0x3
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG4_LATCH_INT_DEST__SHIFT 0x4
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG5_LATCH_INT_DEST__SHIFT 0x5
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG0_LATCH_INT_DEST_MASK 0x00000001L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG1_LATCH_INT_DEST_MASK 0x00000002L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG2_LATCH_INT_DEST_MASK 0x00000004L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG3_LATCH_INT_DEST_MASK 0x00000008L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG4_LATCH_INT_DEST_MASK 0x00000010L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG5_LATCH_INT_DEST_MASK 0x00000020L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+//DMU_INTERRUPT_DEST
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER0_INT_DEST__SHIFT 0x0
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER1_INT_DEST__SHIFT 0x1
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT0_INT_DEST__SHIFT 0x2
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT1_INT_DEST__SHIFT 0x3
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT2_INT_DEST__SHIFT 0x4
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT3_INT_DEST__SHIFT 0x5
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT4_INT_DEST__SHIFT 0x6
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT5_INT_DEST__SHIFT 0x7
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT6_INT_DEST__SHIFT 0x8
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT_IH_INT_DEST__SHIFT 0x9
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_READY_INT_DEST__SHIFT 0xa
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_DONE_INT_DEST__SHIFT 0xb
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_READY_INT_DEST__SHIFT 0xc
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_DONE_INT_DEST__SHIFT 0xd
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_READY_INT_DEST__SHIFT 0xe
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_DONE_INT_DEST__SHIFT 0xf
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_READY_INT_DEST__SHIFT 0x10
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_DONE_INT_DEST__SHIFT 0x11
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_UNDEFINED_ADDRESS_FAULT_INT_DEST__SHIFT 0x1a
+#define DMU_INTERRUPT_DEST__RBBMIF_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1b
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER0_INT_DEST_MASK 0x00000001L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER1_INT_DEST_MASK 0x00000002L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT0_INT_DEST_MASK 0x00000004L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT1_INT_DEST_MASK 0x00000008L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT2_INT_DEST_MASK 0x00000010L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT3_INT_DEST_MASK 0x00000020L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT4_INT_DEST_MASK 0x00000040L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT5_INT_DEST_MASK 0x00000080L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT6_INT_DEST_MASK 0x00000100L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT_IH_INT_DEST_MASK 0x00000200L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_READY_INT_DEST_MASK 0x00000400L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_DONE_INT_DEST_MASK 0x00000800L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_READY_INT_DEST_MASK 0x00001000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_DONE_INT_DEST_MASK 0x00002000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_READY_INT_DEST_MASK 0x00004000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_DONE_INT_DEST_MASK 0x00008000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_READY_INT_DEST_MASK 0x00010000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_DONE_INT_DEST_MASK 0x00020000L
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_UNDEFINED_ADDRESS_FAULT_INT_DEST_MASK 0x04000000L
+#define DMU_INTERRUPT_DEST__RBBMIF_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x08000000L
+//DMU_INTERRUPT_DEST2
+#define DMU_INTERRUPT_DEST2__DPIA_IHC_INTERRUPT_DEST__SHIFT 0xc
+#define DMU_INTERRUPT_DEST2__DMCUB_IHC_WHITELIST_INVALID_ACCESS_INTERRUPT_DEST__SHIFT 0xd
+#define DMU_INTERRUPT_DEST2__DPIA_IHC_INTERRUPT_DEST_MASK 0x00001000L
+#define DMU_INTERRUPT_DEST2__DMCUB_IHC_WHITELIST_INVALID_ACCESS_INTERRUPT_DEST_MASK 0x00002000L
+//DCPG_INTERRUPT_DEST
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_DEST__SHIFT 0x0
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_DEST__SHIFT 0x1
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_DEST__SHIFT 0x2
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_DEST__SHIFT 0x3
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_DEST__SHIFT 0x4
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_DEST__SHIFT 0x5
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_DEST__SHIFT 0x6
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_DEST__SHIFT 0x7
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x10
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x11
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x12
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x13
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x14
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x15
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x16
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x17
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_DEST_MASK 0x00000002L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_DEST_MASK 0x00000008L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_DEST_MASK 0x00000020L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_DEST_MASK 0x00000080L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_DEST_MASK 0x00010000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_DEST_MASK 0x00020000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_DEST_MASK 0x00040000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_DEST_MASK 0x00080000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_DEST_MASK 0x00100000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_DEST_MASK 0x00200000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_DEST_MASK 0x00400000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_DEST_MASK 0x00800000L
+//DCPG_INTERRUPT_DEST2
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_DEST__SHIFT 0x0
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_DEST__SHIFT 0x1
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_DEST__SHIFT 0x2
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_DEST__SHIFT 0x3
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_DEST__SHIFT 0x4
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_DEST__SHIFT 0x5
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_DEST__SHIFT 0x6
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_DEST__SHIFT 0x7
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_DEST__SHIFT 0x8
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_DEST__SHIFT 0x9
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xa
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xb
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xc
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xd
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xe
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xf
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x10
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x11
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x12
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x13
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_DEST_MASK 0x00000002L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_DEST_MASK 0x00000008L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_DEST_MASK 0x00000020L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_DEST_MASK 0x00000080L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_DEST_MASK 0x00000100L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_DEST_MASK 0x00000200L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000400L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000800L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_DEST_MASK 0x00001000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_DEST_MASK 0x00002000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_DEST_MASK 0x00004000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_DEST_MASK 0x00008000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_DEST_MASK 0x00010000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_DEST_MASK 0x00020000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_DEST_MASK 0x00040000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_DEST_MASK 0x00080000L
+//MMHUBBUB_INTERRUPT_DEST
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB0_IHIF_INTERRUPT_DEST__SHIFT 0x1
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB1_IHIF_INTERRUPT_DEST__SHIFT 0x2
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB0_IHIF_INTERRUPT_DEST__SHIFT 0x3
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB1_IHIF_INTERRUPT_DEST__SHIFT 0x4
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB2_IHIF_INTERRUPT_DEST__SHIFT 0x5
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_WARMUP_INTERRUPT_DEST__SHIFT 0x8
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB0_IHIF_INTERRUPT_DEST_MASK 0x00000002L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB1_IHIF_INTERRUPT_DEST_MASK 0x00000004L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB0_IHIF_INTERRUPT_DEST_MASK 0x00000008L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB1_IHIF_INTERRUPT_DEST_MASK 0x00000010L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB2_IHIF_INTERRUPT_DEST_MASK 0x00000020L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_WARMUP_INTERRUPT_DEST_MASK 0x00000100L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//WB_INTERRUPT_DEST
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0x1
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0x9
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0xb
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000002L
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000200L
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000800L
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+//DCHUB_INTERRUPT_DEST
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x0
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x1
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x2
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x3
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x4
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x5
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x6
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x7
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x8
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x9
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0xa
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0xb
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x10
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x11
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x12
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x13
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x14
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x15
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x16
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x17
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1b
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x1c
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x1d
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x1e
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1f
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000001L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000002L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000004L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000008L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000010L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000020L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000040L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000080L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000100L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000200L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000400L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000800L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00010000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00040000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00080000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00100000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE_INTERRUPT_DEST_MASK 0x00200000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00400000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00800000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VBLANK_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE2_INTERRUPT_DEST_MASK 0x04000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x08000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VBLANK_INTERRUPT_DEST_MASK 0x10000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE_INTERRUPT_DEST_MASK 0x20000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE2_INTERRUPT_DEST_MASK 0x40000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x80000000L
+//DCHUB_PERFCOUNTER_INTERRUPT_DEST
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x14
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x15
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1b
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1c
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1d
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00100000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00200000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x04000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x08000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x10000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x20000000L
+//DCHUB_INTERRUPT_DEST2
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x0
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x1
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x2
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x3
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x4
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x5
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x6
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x7
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x8
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x9
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xa
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xb
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_VM_FAULT_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000002L
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000008L
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000020L
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000080L
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000100L
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000200L
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000400L
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000800L
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_VM_FAULT_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_DEST_MASK 0x04000000L
+//DPP_PERFCOUNTER_INTERRUPT_DEST
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x14
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x15
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x18
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x19
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1a
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1b
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00100000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00200000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x01000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x02000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x04000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x08000000L
+//MPC_INTERRUPT_DEST
+#define MPC_INTERRUPT_DEST__MPCC0_STALL_INTERRUPT_DEST__SHIFT 0x0
+#define MPC_INTERRUPT_DEST__MPCC1_STALL_INTERRUPT_DEST__SHIFT 0x1
+#define MPC_INTERRUPT_DEST__MPCC2_STALL_INTERRUPT_DEST__SHIFT 0x2
+#define MPC_INTERRUPT_DEST__MPCC3_STALL_INTERRUPT_DEST__SHIFT 0x3
+#define MPC_INTERRUPT_DEST__MPCC4_STALL_INTERRUPT_DEST__SHIFT 0x4
+#define MPC_INTERRUPT_DEST__MPCC5_STALL_INTERRUPT_DEST__SHIFT 0x5
+#define MPC_INTERRUPT_DEST__MPCC6_STALL_INTERRUPT_DEST__SHIFT 0x6
+#define MPC_INTERRUPT_DEST__MPCC7_STALL_INTERRUPT_DEST__SHIFT 0x7
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define MPC_INTERRUPT_DEST__MPCC0_STALL_INTERRUPT_DEST_MASK 0x00000001L
+#define MPC_INTERRUPT_DEST__MPCC1_STALL_INTERRUPT_DEST_MASK 0x00000002L
+#define MPC_INTERRUPT_DEST__MPCC2_STALL_INTERRUPT_DEST_MASK 0x00000004L
+#define MPC_INTERRUPT_DEST__MPCC3_STALL_INTERRUPT_DEST_MASK 0x00000008L
+#define MPC_INTERRUPT_DEST__MPCC4_STALL_INTERRUPT_DEST_MASK 0x00000010L
+#define MPC_INTERRUPT_DEST__MPCC5_STALL_INTERRUPT_DEST_MASK 0x00000020L
+#define MPC_INTERRUPT_DEST__MPCC6_STALL_INTERRUPT_DEST_MASK 0x00000040L
+#define MPC_INTERRUPT_DEST__MPCC7_STALL_INTERRUPT_DEST_MASK 0x00000080L
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//OPP_INTERRUPT_DEST
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//OPTC_INTERRUPT_DEST
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define OPTC_INTERRUPT_DEST__OPTC0_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x18
+#define OPTC_INTERRUPT_DEST__OPTC1_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x19
+#define OPTC_INTERRUPT_DEST__OPTC2_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1a
+#define OPTC_INTERRUPT_DEST__OPTC3_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1b
+#define OPTC_INTERRUPT_DEST__OPTC4_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1c
+#define OPTC_INTERRUPT_DEST__OPTC5_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1d
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define OPTC_INTERRUPT_DEST__OPTC0_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x01000000L
+#define OPTC_INTERRUPT_DEST__OPTC1_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x02000000L
+#define OPTC_INTERRUPT_DEST__OPTC2_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x04000000L
+#define OPTC_INTERRUPT_DEST__OPTC3_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x08000000L
+#define OPTC_INTERRUPT_DEST__OPTC4_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x10000000L
+#define OPTC_INTERRUPT_DEST__OPTC5_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x20000000L
+//OTG0_INTERRUPT_DEST
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG0_INTERRUPT_DEST__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG0_INTERRUPT_DEST__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG1_INTERRUPT_DEST
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG1_INTERRUPT_DEST__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG1_INTERRUPT_DEST__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG2_INTERRUPT_DEST
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG2_INTERRUPT_DEST__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG2_INTERRUPT_DEST__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG3_INTERRUPT_DEST
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG3_INTERRUPT_DEST__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG3_INTERRUPT_DEST__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG4_INTERRUPT_DEST
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG4_INTERRUPT_DEST__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG4_INTERRUPT_DEST__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG5_INTERRUPT_DEST
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG5_INTERRUPT_DEST__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG5_INTERRUPT_DEST__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//DIG_INTERRUPT_DEST
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x0
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x1
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x2
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x3
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x4
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x5
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x6
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x7
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0x8
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0x9
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xa
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xb
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xc
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xd
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xe
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xf
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000001L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000002L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000004L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000008L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000010L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000020L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000040L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000080L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000100L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000200L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000400L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000800L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00001000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00002000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00004000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00008000L
+//I2C_DDC_HPD_INTERRUPT_DEST
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_SW_DONE_INTERRUPT_DEST__SHIFT 0x0
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_DEST__SHIFT 0x1
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_DEST__SHIFT 0x2
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_DEST__SHIFT 0x4
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_DEST__SHIFT 0x5
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_DEST__SHIFT 0x6
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x10
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x11
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x12
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x13
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x14
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x15
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDCVGA_READ_REQUEST_INTERRPUT_DEST__SHIFT 0x16
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_SW_DONE_INTERRUPT_DEST_MASK 0x00000001L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_DEST_MASK 0x00000002L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_DEST_MASK 0x00000004L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_DEST_MASK 0x00000010L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_DEST_MASK 0x00000020L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_DEST_MASK 0x00000040L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_DEST_MASK 0x00010000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_DEST_MASK 0x00020000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_DEST_MASK 0x00040000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_DEST_MASK 0x00080000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_DEST_MASK 0x00100000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_DEST_MASK 0x00200000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDCVGA_READ_REQUEST_INTERRPUT_DEST_MASK 0x00400000L
+//HDCP_INTERRUPT_DEST
+//DIO_INTERRUPT_DEST
+#define DIO_INTERRUPT_DEST__DIO_ALPM_INTERRUPT_DEST__SHIFT 0x4
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DIO_INTERRUPT_DEST__DIO_ALPM_INTERRUPT_DEST_MASK 0x00000010L
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//DCIO_INTERRUPT_DEST
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x0
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x1
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x2
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x3
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x4
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x5
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x6
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x10
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000001L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000002L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000004L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000008L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000010L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000020L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000040L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_DEST_MASK 0x00010000L
+//HPD_INTERRUPT_DEST
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_INTERRUPT_DEST__SHIFT 0x0
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_INTERRUPT_DEST__SHIFT 0x1
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_INTERRUPT_DEST__SHIFT 0x2
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_INTERRUPT_DEST__SHIFT 0x3
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_INTERRUPT_DEST__SHIFT 0x4
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_INTERRUPT_DEST__SHIFT 0x5
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_RX_INTERRUPT_DEST__SHIFT 0x8
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_RX_INTERRUPT_DEST__SHIFT 0x9
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_RX_INTERRUPT_DEST__SHIFT 0xa
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_RX_INTERRUPT_DEST__SHIFT 0xb
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_RX_INTERRUPT_DEST__SHIFT 0xc
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_RX_INTERRUPT_DEST__SHIFT 0xd
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_INTERRUPT_DEST_MASK 0x00000001L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_INTERRUPT_DEST_MASK 0x00000002L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_INTERRUPT_DEST_MASK 0x00000004L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_INTERRUPT_DEST_MASK 0x00000008L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_INTERRUPT_DEST_MASK 0x00000010L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_INTERRUPT_DEST_MASK 0x00000020L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_RX_INTERRUPT_DEST_MASK 0x00000100L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_RX_INTERRUPT_DEST_MASK 0x00000200L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_RX_INTERRUPT_DEST_MASK 0x00000400L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_RX_INTERRUPT_DEST_MASK 0x00000800L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_RX_INTERRUPT_DEST_MASK 0x00001000L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_RX_INTERRUPT_DEST_MASK 0x00002000L
+//AZ_INTERRUPT_DEST
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x0
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x1
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x2
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x3
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x4
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x5
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x6
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x7
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_DEST__SHIFT 0x8
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_DEST__SHIFT 0x9
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_DEST__SHIFT 0xa
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_DEST__SHIFT 0xb
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_DEST__SHIFT 0xc
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_DEST__SHIFT 0xd
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_DEST__SHIFT 0xe
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_DEST__SHIFT 0xf
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_DEST__SHIFT 0x10
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_DEST__SHIFT 0x11
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_DEST__SHIFT 0x12
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_DEST__SHIFT 0x13
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_DEST__SHIFT 0x14
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_DEST__SHIFT 0x15
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_DEST__SHIFT 0x16
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_DEST__SHIFT 0x17
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1e
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1f
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000001L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000002L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000004L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000008L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000010L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000020L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000040L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000080L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_DEST_MASK 0x00000100L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_DEST_MASK 0x00000200L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_DEST_MASK 0x00000400L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_DEST_MASK 0x00000800L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_DEST_MASK 0x00001000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_DEST_MASK 0x00002000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_DEST_MASK 0x00004000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_DEST_MASK 0x00008000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_DEST_MASK 0x00010000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_DEST_MASK 0x00020000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_DEST_MASK 0x00040000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_DEST_MASK 0x00080000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_DEST_MASK 0x00100000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_DEST_MASK 0x00200000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_DEST_MASK 0x00400000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_DEST_MASK 0x00800000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x40000000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x80000000L
+//AUX_INTERRUPT_DEST
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_SW_DONE_INTERRUPT_DEST__SHIFT 0x0
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_LS_DONE_INTERRUPT_DEST__SHIFT 0x1
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_SW_DONE_INTERRUPT_DEST__SHIFT 0x2
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_LS_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_SW_DONE_INTERRUPT_DEST__SHIFT 0x4
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_LS_DONE_INTERRUPT_DEST__SHIFT 0x5
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_SW_DONE_INTERRUPT_DEST__SHIFT 0x6
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_LS_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_SW_DONE_INTERRUPT_DEST__SHIFT 0x8
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_LS_DONE_INTERRUPT_DEST__SHIFT 0x9
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_SW_DONE_INTERRUPT_DEST__SHIFT 0xa
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_LS_DONE_INTERRUPT_DEST__SHIFT 0xb
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x10
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x11
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x12
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x13
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x14
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x15
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x16
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x17
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x18
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x19
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x1a
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x1b
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_SW_DONE_INTERRUPT_DEST_MASK 0x00000001L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_LS_DONE_INTERRUPT_DEST_MASK 0x00000002L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_SW_DONE_INTERRUPT_DEST_MASK 0x00000004L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_LS_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_SW_DONE_INTERRUPT_DEST_MASK 0x00000010L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_LS_DONE_INTERRUPT_DEST_MASK 0x00000020L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_SW_DONE_INTERRUPT_DEST_MASK 0x00000040L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_LS_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_SW_DONE_INTERRUPT_DEST_MASK 0x00000100L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_LS_DONE_INTERRUPT_DEST_MASK 0x00000200L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_SW_DONE_INTERRUPT_DEST_MASK 0x00000400L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_LS_DONE_INTERRUPT_DEST_MASK 0x00000800L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00010000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00020000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00040000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00080000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00100000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00200000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00400000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00800000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x01000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x02000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x04000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x08000000L
+//DSC_INTERRUPT_DEST
+#define DSC_INTERRUPT_DEST__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x0
+#define DSC_INTERRUPT_DEST__DSC0_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x1
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x2
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x3
+#define DSC_INTERRUPT_DEST__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x4
+#define DSC_INTERRUPT_DEST__DSC1_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x5
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x6
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x7
+#define DSC_INTERRUPT_DEST__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x8
+#define DSC_INTERRUPT_DEST__DSC2_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x9
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xa
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xb
+#define DSC_INTERRUPT_DEST__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0xc
+#define DSC_INTERRUPT_DEST__DSC3_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0xd
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DSC_INTERRUPT_DEST__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x10
+#define DSC_INTERRUPT_DEST__DSC4_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x11
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DSC_INTERRUPT_DEST__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x14
+#define DSC_INTERRUPT_DEST__DSC5_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x15
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DSC_INTERRUPT_DEST__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000001L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000002L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000004L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000008L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000010L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000020L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000040L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000080L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000100L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000200L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000400L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000800L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00001000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00002000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00010000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00020000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00100000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00200000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+//HPO_INTERRUPT_DEST
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x2
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x3
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000004L
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dmu_dmu_misc_dispdec
+//CC_DC_PIPE_DIS
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS__SHIFT 0x0
+#define CC_DC_PIPE_DIS__DC_FULL_DIS__SHIFT 0xc
+#define CC_DC_PIPE_DIS__DC_DMCUB_ENABLE__SHIFT 0x10
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS_MASK 0x000000FFL
+#define CC_DC_PIPE_DIS__DC_FULL_DIS_MASK 0x00001000L
+#define CC_DC_PIPE_DIS__DC_DMCUB_ENABLE_MASK 0x00010000L
+//DMU_CLK_CNTL
+#define DMU_CLK_CNTL__DMU_TEST_CLK_SEL__SHIFT 0x0
+#define DMU_CLK_CNTL__DISPCLK_R_DMU_GATE_DIS__SHIFT 0x4
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_GATE_DIS__SHIFT 0x5
+#define DMU_CLK_CNTL__DISPCLK_R_CLOCK_ON__SHIFT 0x6
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_CLOCK_ON__SHIFT 0x7
+#define DMU_CLK_CNTL__RIOMMU_CLK_SEL__SHIFT 0x8
+#define DMU_CLK_CNTL__RBBMIF_FGCG_REP_DIS__SHIFT 0xc
+#define DMU_CLK_CNTL__DMCUB_DMCUBCLK_SRC_SEL__SHIFT 0xd
+#define DMU_CLK_CNTL__DPREFCLK_ALLOW_DS_CLKSTOP__SHIFT 0x10
+#define DMU_CLK_CNTL__DISPCLK_ALLOW_DS_CLKSTOP__SHIFT 0x12
+#define DMU_CLK_CNTL__DPPCLK_ALLOW_DS_CLKSTOP__SHIFT 0x14
+#define DMU_CLK_CNTL__DTBCLK_ALLOW_DS_CLKSTOP__SHIFT 0x16
+#define DMU_CLK_CNTL__DCFCLK_ALLOW_DS_CLKSTOP__SHIFT 0x18
+#define DMU_CLK_CNTL__DPIACLK_ALLOW_DS_CLKSTOP__SHIFT 0x1a
+#define DMU_CLK_CNTL__LONO_FGCG_REP_DIS__SHIFT 0x1c
+#define DMU_CLK_CNTL__LONO_DISPCLK_GATE_DISABLE__SHIFT 0x1d
+#define DMU_CLK_CNTL__LONO_SOCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DMU_CLK_CNTL__LONO_DMCUBCLK_GATE_DISABLE__SHIFT 0x1f
+#define DMU_CLK_CNTL__DMU_TEST_CLK_SEL_MASK 0x0000000FL
+#define DMU_CLK_CNTL__DISPCLK_R_DMU_GATE_DIS_MASK 0x00000010L
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_GATE_DIS_MASK 0x00000020L
+#define DMU_CLK_CNTL__DISPCLK_R_CLOCK_ON_MASK 0x00000040L
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_CLOCK_ON_MASK 0x00000080L
+#define DMU_CLK_CNTL__RIOMMU_CLK_SEL_MASK 0x00000100L
+#define DMU_CLK_CNTL__RBBMIF_FGCG_REP_DIS_MASK 0x00001000L
+#define DMU_CLK_CNTL__DMCUB_DMCUBCLK_SRC_SEL_MASK 0x00006000L
+#define DMU_CLK_CNTL__DPREFCLK_ALLOW_DS_CLKSTOP_MASK 0x00030000L
+#define DMU_CLK_CNTL__DISPCLK_ALLOW_DS_CLKSTOP_MASK 0x000C0000L
+#define DMU_CLK_CNTL__DPPCLK_ALLOW_DS_CLKSTOP_MASK 0x00300000L
+#define DMU_CLK_CNTL__DTBCLK_ALLOW_DS_CLKSTOP_MASK 0x00C00000L
+#define DMU_CLK_CNTL__DCFCLK_ALLOW_DS_CLKSTOP_MASK 0x03000000L
+#define DMU_CLK_CNTL__DPIACLK_ALLOW_DS_CLKSTOP_MASK 0x0C000000L
+#define DMU_CLK_CNTL__LONO_FGCG_REP_DIS_MASK 0x10000000L
+#define DMU_CLK_CNTL__LONO_DISPCLK_GATE_DISABLE_MASK 0x20000000L
+#define DMU_CLK_CNTL__LONO_SOCCLK_GATE_DISABLE_MASK 0x40000000L
+#define DMU_CLK_CNTL__LONO_DMCUBCLK_GATE_DISABLE_MASK 0x80000000L
+//DMCUB_SMU_INTERRUPT_CNTL
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_INT__SHIFT 0x0
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG__SHIFT 0x10
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_INT_MASK 0x00000001L
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_MASK 0xFFFF0000L
+//SMU_INTERRUPT_CONTROL
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE__SHIFT 0x0
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS__SHIFT 0x4
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT__SHIFT 0x10
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE_MASK 0x00000001L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS_MASK 0x00000010L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT_MASK 0xFFFF0000L
+//ZSC_CNTL
+#define ZSC_CNTL__FORCE_SOC_ACCESS__SHIFT 0x0
+#define ZSC_CNTL__LONO_PWR_DN__SHIFT 0x8
+#define ZSC_CNTL__FORCE_SOC_ACCESS_MASK 0x00000003L
+#define ZSC_CNTL__LONO_PWR_DN_MASK 0x00000100L
+//ZSC_CNTL2
+#define ZSC_CNTL2__ALLOW_Z10__SHIFT 0x0
+#define ZSC_CNTL2__ALLOW_Z10_MASK 0x00000001L
+//DMU_MISC_ALLOW_DS_FORCE
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_EN__SHIFT 0x0
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_VALUE__SHIFT 0x4
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_EN_MASK 0x00000001L
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_VALUE_MASK 0x00000010L
+//ZSC_STATUS
+#define ZSC_STATUS__SOC_ACCESS_TRIGGER_STATUS__SHIFT 0x0
+#define ZSC_STATUS__SOC_ACCESS_STICKY_TRIGGER_STATUS__SHIFT 0x4
+#define ZSC_STATUS__FENCE_REQ_STATUS__SHIFT 0x8
+#define ZSC_STATUS__FENCE_ACK_STATUS__SHIFT 0x9
+#define ZSC_STATUS__FENCE_STATUS__SHIFT 0xa
+#define ZSC_STATUS__SOC_ACCESS_TRIGGER_STATUS_MASK 0x00000007L
+#define ZSC_STATUS__SOC_ACCESS_STICKY_TRIGGER_STATUS_MASK 0x00000070L
+#define ZSC_STATUS__FENCE_REQ_STATUS_MASK 0x00000100L
+#define ZSC_STATUS__FENCE_ACK_STATUS_MASK 0x00000200L
+#define ZSC_STATUS__FENCE_STATUS_MASK 0x00000C00L
+//DMU_DISPCLK_CGTT_BLK_CTRL_REG
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DMU_SOCCLK_CGTT_BLK_CTRL_REG
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//ZPR_CLK_UNGATE_DELAY
+#define ZPR_CLK_UNGATE_DELAY__ZPR_CLK_UNGATE_DELAY__SHIFT 0x0
+#define ZPR_CLK_UNGATE_DELAY__ZPR_CLK_UNGATE_DELAY_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dmu_dc_pg_dispdec
+//DOMAIN0_PG_CONFIG
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN0_PG_STATUS
+#define DOMAIN0_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN0_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN0_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN0_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN1_PG_CONFIG
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN1_PG_STATUS
+#define DOMAIN1_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN1_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN1_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN1_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN2_PG_CONFIG
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN2_PG_STATUS
+#define DOMAIN2_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN2_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN2_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN2_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN3_PG_CONFIG
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN3_PG_STATUS
+#define DOMAIN3_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN3_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN3_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN3_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN16_PG_CONFIG
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN16_PG_STATUS
+#define DOMAIN16_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN16_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN16_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN16_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN17_PG_CONFIG
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN17_PG_STATUS
+#define DOMAIN17_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN17_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN17_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN17_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN18_PG_CONFIG
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN18_PG_STATUS
+#define DOMAIN18_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN18_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN18_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN18_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN19_PG_CONFIG
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN19_PG_STATUS
+#define DOMAIN19_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN19_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN19_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN19_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN22_PG_CONFIG
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN22_PG_STATUS
+#define DOMAIN22_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN22_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN22_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN22_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN23_PG_CONFIG
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN23_PG_STATUS
+#define DOMAIN23_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN23_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN23_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN23_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN24_PG_CONFIG
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN24_PG_STATUS
+#define DOMAIN24_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN24_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN24_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN24_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN25_PG_CONFIG
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN25_PG_STATUS
+#define DOMAIN25_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN25_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN25_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN25_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DCPG_INTERRUPT_STATUS
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+//DCPG_INTERRUPT_STATUS_2
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+//DCPG_INTERRUPT_STATUS_3
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+//DCPG_INTERRUPT_CONTROL_1
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+//DCPG_INTERRUPT_CONTROL_2
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+//DCPG_INTERRUPT_CONTROL_3
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+//DC_IP_REQUEST_CNTL
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN__SHIFT 0x0
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN_MASK 0x00000001L
+//LONO_MEM_PWR_REQ_CNTL
+#define LONO_MEM_PWR_REQ_CNTL__LONO_MEM_PWR_REQ_DIS__SHIFT 0x0
+#define LONO_MEM_PWR_REQ_CNTL__LONO_MEM_PWR_REQ_DIS_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dmu_dmcub_dispdec
+//DMCUB_REGION0_OFFSET
+#define DMCUB_REGION0_OFFSET__DMCUB_REGION0_OFFSET__SHIFT 0x8
+#define DMCUB_REGION0_OFFSET__DMCUB_REGION0_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION0_OFFSET_HIGH
+#define DMCUB_REGION0_OFFSET_HIGH__DMCUB_REGION0_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION0_OFFSET_HIGH__DMCUB_REGION0_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION1_OFFSET
+#define DMCUB_REGION1_OFFSET__DMCUB_REGION1_OFFSET__SHIFT 0x8
+#define DMCUB_REGION1_OFFSET__DMCUB_REGION1_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION1_OFFSET_HIGH
+#define DMCUB_REGION1_OFFSET_HIGH__DMCUB_REGION1_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION1_OFFSET_HIGH__DMCUB_REGION1_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION2_OFFSET
+#define DMCUB_REGION2_OFFSET__DMCUB_REGION2_OFFSET__SHIFT 0x8
+#define DMCUB_REGION2_OFFSET__DMCUB_REGION2_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION2_OFFSET_HIGH
+#define DMCUB_REGION2_OFFSET_HIGH__DMCUB_REGION2_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION2_OFFSET_HIGH__DMCUB_REGION2_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION4_OFFSET
+#define DMCUB_REGION4_OFFSET__DMCUB_REGION4_OFFSET__SHIFT 0x8
+#define DMCUB_REGION4_OFFSET__DMCUB_REGION4_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION4_OFFSET_HIGH
+#define DMCUB_REGION4_OFFSET_HIGH__DMCUB_REGION4_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION4_OFFSET_HIGH__DMCUB_REGION4_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION5_OFFSET
+#define DMCUB_REGION5_OFFSET__DMCUB_REGION5_OFFSET__SHIFT 0x8
+#define DMCUB_REGION5_OFFSET__DMCUB_REGION5_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION5_OFFSET_HIGH
+#define DMCUB_REGION5_OFFSET_HIGH__DMCUB_REGION5_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION5_OFFSET_HIGH__DMCUB_REGION5_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION6_OFFSET
+#define DMCUB_REGION6_OFFSET__DMCUB_REGION6_OFFSET__SHIFT 0x8
+#define DMCUB_REGION6_OFFSET__DMCUB_REGION6_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION6_OFFSET_HIGH
+#define DMCUB_REGION6_OFFSET_HIGH__DMCUB_REGION6_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION6_OFFSET_HIGH__DMCUB_REGION6_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION7_OFFSET
+#define DMCUB_REGION7_OFFSET__DMCUB_REGION7_OFFSET__SHIFT 0x8
+#define DMCUB_REGION7_OFFSET__DMCUB_REGION7_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION7_OFFSET_HIGH
+#define DMCUB_REGION7_OFFSET_HIGH__DMCUB_REGION7_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION7_OFFSET_HIGH__DMCUB_REGION7_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION0_TOP_ADDRESS
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_ENABLE_MASK 0x80000000L
+//DMCUB_REGION1_TOP_ADDRESS
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_ENABLE_MASK 0x80000000L
+//DMCUB_REGION2_TOP_ADDRESS
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_ENABLE_MASK 0x80000000L
+//DMCUB_REGION4_TOP_ADDRESS
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_ENABLE_MASK 0x80000000L
+//DMCUB_REGION5_TOP_ADDRESS
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_ENABLE_MASK 0x80000000L
+//DMCUB_REGION6_TOP_ADDRESS
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_ENABLE_MASK 0x80000000L
+//DMCUB_REGION7_TOP_ADDRESS
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW0_BASE_ADDRESS
+#define DMCUB_REGION3_CW0_BASE_ADDRESS__DMCUB_REGION3_CW0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW0_BASE_ADDRESS__DMCUB_REGION3_CW0_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW1_BASE_ADDRESS
+#define DMCUB_REGION3_CW1_BASE_ADDRESS__DMCUB_REGION3_CW1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW1_BASE_ADDRESS__DMCUB_REGION3_CW1_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW2_BASE_ADDRESS
+#define DMCUB_REGION3_CW2_BASE_ADDRESS__DMCUB_REGION3_CW2_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW2_BASE_ADDRESS__DMCUB_REGION3_CW2_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW3_BASE_ADDRESS
+#define DMCUB_REGION3_CW3_BASE_ADDRESS__DMCUB_REGION3_CW3_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW3_BASE_ADDRESS__DMCUB_REGION3_CW3_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW4_BASE_ADDRESS
+#define DMCUB_REGION3_CW4_BASE_ADDRESS__DMCUB_REGION3_CW4_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW4_BASE_ADDRESS__DMCUB_REGION3_CW4_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW5_BASE_ADDRESS
+#define DMCUB_REGION3_CW5_BASE_ADDRESS__DMCUB_REGION3_CW5_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW5_BASE_ADDRESS__DMCUB_REGION3_CW5_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW6_BASE_ADDRESS
+#define DMCUB_REGION3_CW6_BASE_ADDRESS__DMCUB_REGION3_CW6_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW6_BASE_ADDRESS__DMCUB_REGION3_CW6_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW7_BASE_ADDRESS
+#define DMCUB_REGION3_CW7_BASE_ADDRESS__DMCUB_REGION3_CW7_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW7_BASE_ADDRESS__DMCUB_REGION3_CW7_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW0_TOP_ADDRESS
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW1_TOP_ADDRESS
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW2_TOP_ADDRESS
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW3_TOP_ADDRESS
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW4_TOP_ADDRESS
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW5_TOP_ADDRESS
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW6_TOP_ADDRESS
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW7_TOP_ADDRESS
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW0_OFFSET
+#define DMCUB_REGION3_CW0_OFFSET__DMCUB_REGION3_CW0_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW0_OFFSET__DMCUB_REGION3_CW0_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW0_OFFSET_HIGH
+#define DMCUB_REGION3_CW0_OFFSET_HIGH__DMCUB_REGION3_CW0_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW0_OFFSET_HIGH__DMCUB_REGION3_CW0_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW1_OFFSET
+#define DMCUB_REGION3_CW1_OFFSET__DMCUB_REGION3_CW1_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW1_OFFSET__DMCUB_REGION3_CW1_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW1_OFFSET_HIGH
+#define DMCUB_REGION3_CW1_OFFSET_HIGH__DMCUB_REGION3_CW1_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW1_OFFSET_HIGH__DMCUB_REGION3_CW1_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW2_OFFSET
+#define DMCUB_REGION3_CW2_OFFSET__DMCUB_REGION3_CW2_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW2_OFFSET__DMCUB_REGION3_CW2_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW2_OFFSET_HIGH
+#define DMCUB_REGION3_CW2_OFFSET_HIGH__DMCUB_REGION3_CW2_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW2_OFFSET_HIGH__DMCUB_REGION3_CW2_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW3_OFFSET
+#define DMCUB_REGION3_CW3_OFFSET__DMCUB_REGION3_CW3_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW3_OFFSET__DMCUB_REGION3_CW3_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW3_OFFSET_HIGH
+#define DMCUB_REGION3_CW3_OFFSET_HIGH__DMCUB_REGION3_CW3_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW3_OFFSET_HIGH__DMCUB_REGION3_CW3_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW4_OFFSET
+#define DMCUB_REGION3_CW4_OFFSET__DMCUB_REGION3_CW4_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW4_OFFSET__DMCUB_REGION3_CW4_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW4_OFFSET_HIGH
+#define DMCUB_REGION3_CW4_OFFSET_HIGH__DMCUB_REGION3_CW4_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW4_OFFSET_HIGH__DMCUB_REGION3_CW4_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW5_OFFSET
+#define DMCUB_REGION3_CW5_OFFSET__DMCUB_REGION3_CW5_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW5_OFFSET__DMCUB_REGION3_CW5_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW5_OFFSET_HIGH
+#define DMCUB_REGION3_CW5_OFFSET_HIGH__DMCUB_REGION3_CW5_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW5_OFFSET_HIGH__DMCUB_REGION3_CW5_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW6_OFFSET
+#define DMCUB_REGION3_CW6_OFFSET__DMCUB_REGION3_CW6_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW6_OFFSET__DMCUB_REGION3_CW6_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW6_OFFSET_HIGH
+#define DMCUB_REGION3_CW6_OFFSET_HIGH__DMCUB_REGION3_CW6_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW6_OFFSET_HIGH__DMCUB_REGION3_CW6_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW7_OFFSET
+#define DMCUB_REGION3_CW7_OFFSET__DMCUB_REGION3_CW7_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW7_OFFSET__DMCUB_REGION3_CW7_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW7_OFFSET_HIGH
+#define DMCUB_REGION3_CW7_OFFSET_HIGH__DMCUB_REGION3_CW7_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW7_OFFSET_HIGH__DMCUB_REGION3_CW7_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_INTERRUPT_ENABLE
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER0_INT_EN__SHIFT 0x0
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER1_INT_EN__SHIFT 0x1
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_READY_INT_EN__SHIFT 0x2
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_DONE_INT_EN__SHIFT 0x3
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_READY_INT_EN__SHIFT 0x4
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_DONE_INT_EN__SHIFT 0x5
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_READY_INT_EN__SHIFT 0x6
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_DONE_INT_EN__SHIFT 0x7
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_READY_INT_EN__SHIFT 0x8
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_DONE_INT_EN__SHIFT 0x9
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT0_INT_EN__SHIFT 0xa
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT1_INT_EN__SHIFT 0xb
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT2_INT_EN__SHIFT 0xc
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT3_INT_EN__SHIFT 0xd
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT4_INT_EN__SHIFT 0xe
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT5_INT_EN__SHIFT 0xf
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT6_INT_EN__SHIFT 0x10
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT_IH_INT_EN__SHIFT 0x11
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_EN__SHIFT 0x12
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER0_INT_EN_MASK 0x00000001L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER1_INT_EN_MASK 0x00000002L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_READY_INT_EN_MASK 0x00000004L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_DONE_INT_EN_MASK 0x00000008L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_READY_INT_EN_MASK 0x00000010L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_DONE_INT_EN_MASK 0x00000020L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_READY_INT_EN_MASK 0x00000040L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_DONE_INT_EN_MASK 0x00000080L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_READY_INT_EN_MASK 0x00000100L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_DONE_INT_EN_MASK 0x00000200L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT0_INT_EN_MASK 0x00000400L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT1_INT_EN_MASK 0x00000800L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT2_INT_EN_MASK 0x00001000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT3_INT_EN_MASK 0x00002000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT4_INT_EN_MASK 0x00004000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT5_INT_EN_MASK 0x00008000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT6_INT_EN_MASK 0x00010000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT_IH_INT_EN_MASK 0x00020000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_EN_MASK 0x00040000L
+//DMCUB_INTERRUPT_ACK
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER0_INT_ACK__SHIFT 0x0
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER1_INT_ACK__SHIFT 0x1
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_READY_INT_ACK__SHIFT 0x2
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_DONE_INT_ACK__SHIFT 0x3
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_READY_INT_ACK__SHIFT 0x4
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_DONE_INT_ACK__SHIFT 0x5
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_READY_INT_ACK__SHIFT 0x6
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_DONE_INT_ACK__SHIFT 0x7
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_READY_INT_ACK__SHIFT 0x8
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_DONE_INT_ACK__SHIFT 0x9
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT0_INT_ACK__SHIFT 0xa
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT1_INT_ACK__SHIFT 0xb
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT2_INT_ACK__SHIFT 0xc
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT3_INT_ACK__SHIFT 0xd
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT4_INT_ACK__SHIFT 0xe
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT5_INT_ACK__SHIFT 0xf
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT6_INT_ACK__SHIFT 0x10
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT_IH_INT_ACK__SHIFT 0x11
+#define DMCUB_INTERRUPT_ACK__DMCUB_UNDEFINED_ADDRESS_FAULT_ACK__SHIFT 0x12
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER0_INT_ACK_MASK 0x00000001L
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER1_INT_ACK_MASK 0x00000002L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_READY_INT_ACK_MASK 0x00000004L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_DONE_INT_ACK_MASK 0x00000008L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_READY_INT_ACK_MASK 0x00000010L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_DONE_INT_ACK_MASK 0x00000020L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_READY_INT_ACK_MASK 0x00000040L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_DONE_INT_ACK_MASK 0x00000080L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_READY_INT_ACK_MASK 0x00000100L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_DONE_INT_ACK_MASK 0x00000200L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT0_INT_ACK_MASK 0x00000400L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT1_INT_ACK_MASK 0x00000800L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT2_INT_ACK_MASK 0x00001000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT3_INT_ACK_MASK 0x00002000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT4_INT_ACK_MASK 0x00004000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT5_INT_ACK_MASK 0x00008000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT6_INT_ACK_MASK 0x00010000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT_IH_INT_ACK_MASK 0x00020000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_UNDEFINED_ADDRESS_FAULT_ACK_MASK 0x00040000L
+//DMCUB_INTERRUPT_STATUS
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER0_INT_STAT__SHIFT 0x0
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER1_INT_STAT__SHIFT 0x1
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_READY_INT_STAT__SHIFT 0x2
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_DONE_INT_STAT__SHIFT 0x3
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_READY_INT_STAT__SHIFT 0x4
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_DONE_INT_STAT__SHIFT 0x5
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_READY_INT_STAT__SHIFT 0x6
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_DONE_INT_STAT__SHIFT 0x7
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_READY_INT_STAT__SHIFT 0x8
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_DONE_INT_STAT__SHIFT 0x9
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT0_INT_STAT__SHIFT 0xa
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT1_INT_STAT__SHIFT 0xb
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT2_INT_STAT__SHIFT 0xc
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT3_INT_STAT__SHIFT 0xd
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT4_INT_STAT__SHIFT 0xe
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT5_INT_STAT__SHIFT 0xf
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT6_INT_STAT__SHIFT 0x10
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT_IH_INT_STAT__SHIFT 0x11
+#define DMCUB_INTERRUPT_STATUS__DMCUB_UNDEFINED_ADDRESS_FAULT__SHIFT 0x12
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INST_FETCH_FAULT__SHIFT 0x13
+#define DMCUB_INTERRUPT_STATUS__DMCUB_DATA_WRITE_FAULT__SHIFT 0x14
+#define DMCUB_INTERRUPT_STATUS__DMCUB_PWR_UP_TRIG_INT_STAT__SHIFT 0x15
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OTG_RESYNC_TRIG_INT_STAT__SHIFT 0x16
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER0_INT_STAT_MASK 0x00000001L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER1_INT_STAT_MASK 0x00000002L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_READY_INT_STAT_MASK 0x00000004L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_DONE_INT_STAT_MASK 0x00000008L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_READY_INT_STAT_MASK 0x00000010L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_DONE_INT_STAT_MASK 0x00000020L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_READY_INT_STAT_MASK 0x00000040L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_DONE_INT_STAT_MASK 0x00000080L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_READY_INT_STAT_MASK 0x00000100L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_DONE_INT_STAT_MASK 0x00000200L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT0_INT_STAT_MASK 0x00000400L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT1_INT_STAT_MASK 0x00000800L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT2_INT_STAT_MASK 0x00001000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT3_INT_STAT_MASK 0x00002000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT4_INT_STAT_MASK 0x00004000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT5_INT_STAT_MASK 0x00008000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT6_INT_STAT_MASK 0x00010000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT_IH_INT_STAT_MASK 0x00020000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_UNDEFINED_ADDRESS_FAULT_MASK 0x00040000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INST_FETCH_FAULT_MASK 0x00080000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_DATA_WRITE_FAULT_MASK 0x00100000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_PWR_UP_TRIG_INT_STAT_MASK 0x00200000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OTG_RESYNC_TRIG_INT_STAT_MASK 0x00400000L
+//DMCUB_INTERRUPT_TYPE
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER0_INT_TYPE__SHIFT 0x0
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER1_INT_TYPE__SHIFT 0x1
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_READY_INT_TYPE__SHIFT 0x2
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_DONE_INT_TYPE__SHIFT 0x3
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_READY_INT_TYPE__SHIFT 0x4
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_DONE_INT_TYPE__SHIFT 0x5
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_READY_INT_TYPE__SHIFT 0x6
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_DONE_INT_TYPE__SHIFT 0x7
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_READY_INT_TYPE__SHIFT 0x8
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_DONE_INT_TYPE__SHIFT 0x9
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT0_INT_TYPE__SHIFT 0xa
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT1_INT_TYPE__SHIFT 0xb
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT2_INT_TYPE__SHIFT 0xc
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT3_INT_TYPE__SHIFT 0xd
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT4_INT_TYPE__SHIFT 0xe
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT5_INT_TYPE__SHIFT 0xf
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT6_INT_TYPE__SHIFT 0x10
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT_IH_INT_TYPE__SHIFT 0x11
+#define DMCUB_INTERRUPT_TYPE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_TYPE__SHIFT 0x12
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER0_INT_TYPE_MASK 0x00000001L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER1_INT_TYPE_MASK 0x00000002L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_READY_INT_TYPE_MASK 0x00000004L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_DONE_INT_TYPE_MASK 0x00000008L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_READY_INT_TYPE_MASK 0x00000010L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_DONE_INT_TYPE_MASK 0x00000020L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_READY_INT_TYPE_MASK 0x00000040L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_DONE_INT_TYPE_MASK 0x00000080L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_READY_INT_TYPE_MASK 0x00000100L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_DONE_INT_TYPE_MASK 0x00000200L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT0_INT_TYPE_MASK 0x00000400L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT1_INT_TYPE_MASK 0x00000800L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT2_INT_TYPE_MASK 0x00001000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT3_INT_TYPE_MASK 0x00002000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT4_INT_TYPE_MASK 0x00004000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT5_INT_TYPE_MASK 0x00008000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT6_INT_TYPE_MASK 0x00010000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT_IH_INT_TYPE_MASK 0x00020000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_TYPE_MASK 0x00040000L
+//DMCUB_EXT_INTERRUPT_STATUS
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_COUNT__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_ID__SHIFT 0x8
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_COUNT_MASK 0x000000FFL
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_ID_MASK 0x0000FF00L
+//DMCUB_EXT_INTERRUPT_CTXID
+#define DMCUB_EXT_INTERRUPT_CTXID__DMCUB_EXT_INTERRUPT_CTXID__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_CTXID__DMCUB_EXT_INTERRUPT_CTXID_MASK 0x0FFFFFFFL
+//DMCUB_EXT_INTERRUPT_ACK
+#define DMCUB_EXT_INTERRUPT_ACK__DMCUB_EXT_INTERRUPT_ACK__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_ACK__DMCUB_EXT_INTERRUPT_ACK_MASK 0x00000001L
+//DMCUB_INST_FETCH_FAULT_ADDR
+#define DMCUB_INST_FETCH_FAULT_ADDR__DMCUB_INST_FETCH_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_INST_FETCH_FAULT_ADDR__DMCUB_INST_FETCH_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_DATA_WRITE_FAULT_ADDR
+#define DMCUB_DATA_WRITE_FAULT_ADDR__DMCUB_DATA_WRITE_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_DATA_WRITE_FAULT_ADDR__DMCUB_DATA_WRITE_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_SEC_CNTL
+#define DMCUB_SEC_CNTL__DMCUB_MEM_SEC_LVL__SHIFT 0x0
+#define DMCUB_SEC_CNTL__DMCUB_MEM_UNIT_ID__SHIFT 0x8
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET__SHIFT 0x10
+#define DMCUB_SEC_CNTL__DMCUB_DATA_FAULT_INT_DISABLE__SHIFT 0x11
+#define DMCUB_SEC_CNTL__DMCUB_AUTO_RESET_STATUS__SHIFT 0x14
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_STATUS__SHIFT 0x15
+#define DMCUB_SEC_CNTL__DMCUB_INST_FETCH_FAULT_CLEAR__SHIFT 0x18
+#define DMCUB_SEC_CNTL__DMCUB_DATA_WRITE_FAULT_CLEAR__SHIFT 0x19
+#define DMCUB_SEC_CNTL__DMCUB_MEM_SEC_LVL_MASK 0x00000007L
+#define DMCUB_SEC_CNTL__DMCUB_MEM_UNIT_ID_MASK 0x00003F00L
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_MASK 0x00010000L
+#define DMCUB_SEC_CNTL__DMCUB_DATA_FAULT_INT_DISABLE_MASK 0x00020000L
+#define DMCUB_SEC_CNTL__DMCUB_AUTO_RESET_STATUS_MASK 0x00100000L
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_STATUS_MASK 0x00200000L
+#define DMCUB_SEC_CNTL__DMCUB_INST_FETCH_FAULT_CLEAR_MASK 0x01000000L
+#define DMCUB_SEC_CNTL__DMCUB_DATA_WRITE_FAULT_CLEAR_MASK 0x02000000L
+//DMCUB_MEM_CNTL
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_QOS__SHIFT 0x0
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_QOS__SHIFT 0x4
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_QOS_MASK 0x0000000FL
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_QOS_MASK 0x000000F0L
+//DMCUB_INBOX0_BASE_ADDRESS
+#define DMCUB_INBOX0_BASE_ADDRESS__DMCUB_INBOX0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_INBOX0_BASE_ADDRESS__DMCUB_INBOX0_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_SIZE
+#define DMCUB_INBOX0_SIZE__DMCUB_INBOX0_SIZE__SHIFT 0x0
+#define DMCUB_INBOX0_SIZE__DMCUB_INBOX0_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_WPTR
+#define DMCUB_INBOX0_WPTR__DMCUB_INBOX0_WPTR__SHIFT 0x0
+#define DMCUB_INBOX0_WPTR__DMCUB_INBOX0_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_RPTR
+#define DMCUB_INBOX0_RPTR__DMCUB_INBOX0_RPTR__SHIFT 0x0
+#define DMCUB_INBOX0_RPTR__DMCUB_INBOX0_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_BASE_ADDRESS
+#define DMCUB_INBOX1_BASE_ADDRESS__DMCUB_INBOX1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_INBOX1_BASE_ADDRESS__DMCUB_INBOX1_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_SIZE
+#define DMCUB_INBOX1_SIZE__DMCUB_INBOX1_SIZE__SHIFT 0x0
+#define DMCUB_INBOX1_SIZE__DMCUB_INBOX1_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_WPTR
+#define DMCUB_INBOX1_WPTR__DMCUB_INBOX1_WPTR__SHIFT 0x0
+#define DMCUB_INBOX1_WPTR__DMCUB_INBOX1_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_RPTR
+#define DMCUB_INBOX1_RPTR__DMCUB_INBOX1_RPTR__SHIFT 0x0
+#define DMCUB_INBOX1_RPTR__DMCUB_INBOX1_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_BASE_ADDRESS
+#define DMCUB_OUTBOX0_BASE_ADDRESS__DMCUB_OUTBOX0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_OUTBOX0_BASE_ADDRESS__DMCUB_OUTBOX0_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_SIZE
+#define DMCUB_OUTBOX0_SIZE__DMCUB_OUTBOX0_SIZE__SHIFT 0x0
+#define DMCUB_OUTBOX0_SIZE__DMCUB_OUTBOX0_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_WPTR
+#define DMCUB_OUTBOX0_WPTR__DMCUB_OUTBOX0_WPTR__SHIFT 0x0
+#define DMCUB_OUTBOX0_WPTR__DMCUB_OUTBOX0_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_RPTR
+#define DMCUB_OUTBOX0_RPTR__DMCUB_OUTBOX0_RPTR__SHIFT 0x0
+#define DMCUB_OUTBOX0_RPTR__DMCUB_OUTBOX0_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_BASE_ADDRESS
+#define DMCUB_OUTBOX1_BASE_ADDRESS__DMCUB_OUTBOX1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_OUTBOX1_BASE_ADDRESS__DMCUB_OUTBOX1_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_SIZE
+#define DMCUB_OUTBOX1_SIZE__DMCUB_OUTBOX1_SIZE__SHIFT 0x0
+#define DMCUB_OUTBOX1_SIZE__DMCUB_OUTBOX1_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_WPTR
+#define DMCUB_OUTBOX1_WPTR__DMCUB_OUTBOX1_WPTR__SHIFT 0x0
+#define DMCUB_OUTBOX1_WPTR__DMCUB_OUTBOX1_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_RPTR
+#define DMCUB_OUTBOX1_RPTR__DMCUB_OUTBOX1_RPTR__SHIFT 0x0
+#define DMCUB_OUTBOX1_RPTR__DMCUB_OUTBOX1_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_TRIGGER0
+#define DMCUB_TIMER_TRIGGER0__DMCUB_TIMER_TRIGGER0__SHIFT 0x0
+#define DMCUB_TIMER_TRIGGER0__DMCUB_TIMER_TRIGGER0_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_TRIGGER1
+#define DMCUB_TIMER_TRIGGER1__DMCUB_TIMER_TRIGGER1__SHIFT 0x0
+#define DMCUB_TIMER_TRIGGER1__DMCUB_TIMER_TRIGGER1_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_WINDOW
+#define DMCUB_TIMER_WINDOW__DMCUB_TIMER_WINDOW__SHIFT 0x0
+#define DMCUB_TIMER_WINDOW__DMCUB_TIMER_WINDOW_MASK 0x00000007L
+//DMCUB_SCRATCH0
+#define DMCUB_SCRATCH0__DMCUB_SCRATCH0__SHIFT 0x0
+#define DMCUB_SCRATCH0__DMCUB_SCRATCH0_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH1
+#define DMCUB_SCRATCH1__DMCUB_SCRATCH1__SHIFT 0x0
+#define DMCUB_SCRATCH1__DMCUB_SCRATCH1_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH2
+#define DMCUB_SCRATCH2__DMCUB_SCRATCH2__SHIFT 0x0
+#define DMCUB_SCRATCH2__DMCUB_SCRATCH2_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH3
+#define DMCUB_SCRATCH3__DMCUB_SCRATCH3__SHIFT 0x0
+#define DMCUB_SCRATCH3__DMCUB_SCRATCH3_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH4
+#define DMCUB_SCRATCH4__DMCUB_SCRATCH4__SHIFT 0x0
+#define DMCUB_SCRATCH4__DMCUB_SCRATCH4_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH5
+#define DMCUB_SCRATCH5__DMCUB_SCRATCH5__SHIFT 0x0
+#define DMCUB_SCRATCH5__DMCUB_SCRATCH5_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH6
+#define DMCUB_SCRATCH6__DMCUB_SCRATCH6__SHIFT 0x0
+#define DMCUB_SCRATCH6__DMCUB_SCRATCH6_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH7
+#define DMCUB_SCRATCH7__DMCUB_SCRATCH7__SHIFT 0x0
+#define DMCUB_SCRATCH7__DMCUB_SCRATCH7_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH8
+#define DMCUB_SCRATCH8__DMCUB_SCRATCH8__SHIFT 0x0
+#define DMCUB_SCRATCH8__DMCUB_SCRATCH8_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH9
+#define DMCUB_SCRATCH9__DMCUB_SCRATCH9__SHIFT 0x0
+#define DMCUB_SCRATCH9__DMCUB_SCRATCH9_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH10
+#define DMCUB_SCRATCH10__DMCUB_SCRATCH10__SHIFT 0x0
+#define DMCUB_SCRATCH10__DMCUB_SCRATCH10_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH11
+#define DMCUB_SCRATCH11__DMCUB_SCRATCH11__SHIFT 0x0
+#define DMCUB_SCRATCH11__DMCUB_SCRATCH11_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH12
+#define DMCUB_SCRATCH12__DMCUB_SCRATCH12__SHIFT 0x0
+#define DMCUB_SCRATCH12__DMCUB_SCRATCH12_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH13
+#define DMCUB_SCRATCH13__DMCUB_SCRATCH13__SHIFT 0x0
+#define DMCUB_SCRATCH13__DMCUB_SCRATCH13_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH14
+#define DMCUB_SCRATCH14__DMCUB_SCRATCH14__SHIFT 0x0
+#define DMCUB_SCRATCH14__DMCUB_SCRATCH14_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH15
+#define DMCUB_SCRATCH15__DMCUB_SCRATCH15__SHIFT 0x0
+#define DMCUB_SCRATCH15__DMCUB_SCRATCH15_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH16
+#define DMCUB_SCRATCH16__DMCUB_SCRATCH16__SHIFT 0x0
+#define DMCUB_SCRATCH16__DMCUB_SCRATCH16_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH17
+#define DMCUB_SCRATCH17__DMCUB_SCRATCH17__SHIFT 0x0
+#define DMCUB_SCRATCH17__DMCUB_SCRATCH17_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH18
+#define DMCUB_SCRATCH18__DMCUB_SCRATCH18__SHIFT 0x0
+#define DMCUB_SCRATCH18__DMCUB_SCRATCH18_MASK 0xFFFFFFFFL
+//DMCUB_CNTL
+#define DMCUB_CNTL__DMCUB_LS_WAKE_DELAY__SHIFT 0x0
+#define DMCUB_CNTL__DMCUB_DMCUBCLK_R_GATE_DIS__SHIFT 0x8
+#define DMCUB_CNTL__DMCUB_ENABLE__SHIFT 0x10
+#define DMCUB_CNTL__DMCUB_MEM_LIGHT_SLEEP_DISABLE__SHIFT 0x12
+#define DMCUB_CNTL__DMCUB_TRACEPORT_EN__SHIFT 0x13
+#define DMCUB_CNTL__DMCUB_PWAIT_MODE_STATUS__SHIFT 0x14
+#define DMCUB_CNTL__DMCUB_LS_WAKE_DELAY_MASK 0x000000FFL
+#define DMCUB_CNTL__DMCUB_DMCUBCLK_R_GATE_DIS_MASK 0x00000100L
+#define DMCUB_CNTL__DMCUB_ENABLE_MASK 0x00010000L
+#define DMCUB_CNTL__DMCUB_MEM_LIGHT_SLEEP_DISABLE_MASK 0x00040000L
+#define DMCUB_CNTL__DMCUB_TRACEPORT_EN_MASK 0x00080000L
+#define DMCUB_CNTL__DMCUB_PWAIT_MODE_STATUS_MASK 0x00100000L
+//DMCUB_GPINT_DATAIN0
+#define DMCUB_GPINT_DATAIN0__DMCUB_GPINT_DATAIN0__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN0__DMCUB_GPINT_DATAIN0_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN1
+#define DMCUB_GPINT_DATAIN1__DMCUB_GPINT_DATAIN1__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN1__DMCUB_GPINT_DATAIN1_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAOUT
+#define DMCUB_GPINT_DATAOUT__DMCUB_GPINT_DATAOUT__SHIFT 0x0
+#define DMCUB_GPINT_DATAOUT__DMCUB_GPINT_DATAOUT_MASK 0xFFFFFFFFL
+//DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR
+#define DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_LS_WAKE_INT_ENABLE
+#define DMCUB_LS_WAKE_INT_ENABLE__DMCUB_LS_WAKE_INT_ENABLE__SHIFT 0x0
+#define DMCUB_LS_WAKE_INT_ENABLE__DMCUB_LS_WAKE_INT_ENABLE_MASK 0xFFFFFFFFL
+//DMCUB_MEM_PWR_CNTL
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_FORCE__SHIFT 0x1
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_DIS__SHIFT 0x3
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_STATE__SHIFT 0x4
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_FORCE_MASK 0x00000006L
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_DIS_MASK 0x00000008L
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_STATE_MASK 0x00000030L
+//DMCUB_TIMER_CURRENT
+#define DMCUB_TIMER_CURRENT__DMCUB_TIMER_CURRENT__SHIFT 0x0
+#define DMCUB_TIMER_CURRENT__DMCUB_TIMER_CURRENT_MASK 0xFFFFFFFFL
+//DMCUB_DBG_BUS_SELECT
+//DMCUB_PROC_ID
+#define DMCUB_PROC_ID__DMCUB_PROC_ID__SHIFT 0x0
+#define DMCUB_PROC_ID__DMCUB_PROC_ID_MASK 0x0000FFFFL
+//DMCUB_CNTL2
+#define DMCUB_CNTL2__DMCUB_SOFT_RESET__SHIFT 0x0
+#define DMCUB_CNTL2__DMCUB_SOFT_RESET_MASK 0x00000001L
+//DMCUB_GPINT_DATAIN2
+#define DMCUB_GPINT_DATAIN2__DMCUB_GPINT_DATAIN2__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN2__DMCUB_GPINT_DATAIN2_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN3
+#define DMCUB_GPINT_DATAIN3__DMCUB_GPINT_DATAIN3__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN3__DMCUB_GPINT_DATAIN3_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN4
+#define DMCUB_GPINT_DATAIN4__DMCUB_GPINT_DATAIN4__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN4__DMCUB_GPINT_DATAIN4_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN5
+#define DMCUB_GPINT_DATAIN5__DMCUB_GPINT_DATAIN5__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN5__DMCUB_GPINT_DATAIN5_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN6
+#define DMCUB_GPINT_DATAIN6__DMCUB_GPINT_DATAIN6__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN6__DMCUB_GPINT_DATAIN6_MASK 0xFFFFFFFFL
+//DMCUB_REGION3_TMR_AXI_SPACE
+#define DMCUB_REGION3_TMR_AXI_SPACE__DMCUB_REGION3_TMR_AXI_SPACE__SHIFT 0x0
+#define DMCUB_REGION3_TMR_AXI_SPACE__DMCUB_REGION3_TMR_AXI_SPACE_MASK 0x07L
+//DMCUB_SCRATCH19
+#define DMCUB_SCRATCH19__DMCUB_SCRATCH19__SHIFT 0x0
+#define DMCUB_SCRATCH19__DMCUB_SCRATCH19_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH20
+#define DMCUB_SCRATCH20__DMCUB_SCRATCH20__SHIFT 0x0
+#define DMCUB_SCRATCH20__DMCUB_SCRATCH20_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH21
+#define DMCUB_SCRATCH21__DMCUB_SCRATCH21__SHIFT 0x0
+#define DMCUB_SCRATCH21__DMCUB_SCRATCH21_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH22
+#define DMCUB_SCRATCH22__DMCUB_SCRATCH22__SHIFT 0x0
+#define DMCUB_SCRATCH22__DMCUB_SCRATCH22_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH23
+#define DMCUB_SCRATCH23__DMCUB_SCRATCH23__SHIFT 0x0
+#define DMCUB_SCRATCH23__DMCUB_SCRATCH23_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwb_top_dispdec
+//DWB_ENABLE_CLK_CTRL
+#define DWB_ENABLE_CLK_CTRL__DWB_ENABLE__SHIFT 0x0
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_R_DWB_GATE_DIS__SHIFT 0x4
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_G_DWB_GATE_DIS__SHIFT 0x8
+#define DWB_ENABLE_CLK_CTRL__DWB_TEST_CLK_SEL__SHIFT 0xc
+#define DWB_ENABLE_CLK_CTRL__DWB_FGCG_REP_DIS__SHIFT 0x18
+#define DWB_ENABLE_CLK_CTRL__DWB_ENABLE_MASK 0x00000001L
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_R_DWB_GATE_DIS_MASK 0x00000010L
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_G_DWB_GATE_DIS_MASK 0x00000100L
+#define DWB_ENABLE_CLK_CTRL__DWB_TEST_CLK_SEL_MASK 0x00003000L
+#define DWB_ENABLE_CLK_CTRL__DWB_FGCG_REP_DIS_MASK 0x01000000L
+//DWB_MEM_PWR_CTRL
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_FORCE__SHIFT 0x8
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_DIS__SHIFT 0xa
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_STATE__SHIFT 0xc
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_FORCE__SHIFT 0x10
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_DIS__SHIFT 0x12
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_STATE__SHIFT 0x14
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_FORCE_MASK 0x00000300L
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_DIS_MASK 0x00000400L
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_STATE_MASK 0x00003000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_DIS_MASK 0x00040000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_STATE_MASK 0x00300000L
+//FC_MODE_CTRL
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN__SHIFT 0x0
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_RATE__SHIFT 0x4
+#define FC_MODE_CTRL__FC_WINDOW_CROP_EN__SHIFT 0x8
+#define FC_MODE_CTRL__FC_EYE_SELECTION__SHIFT 0xc
+#define FC_MODE_CTRL__FC_STEREO_EYE_POLARITY__SHIFT 0x10
+#define FC_MODE_CTRL__FC_NEW_CONTENT__SHIFT 0x14
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_CURRENT__SHIFT 0x1f
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_MASK 0x00000001L
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_RATE_MASK 0x00000030L
+#define FC_MODE_CTRL__FC_WINDOW_CROP_EN_MASK 0x00000100L
+#define FC_MODE_CTRL__FC_EYE_SELECTION_MASK 0x00003000L
+#define FC_MODE_CTRL__FC_STEREO_EYE_POLARITY_MASK 0x00010000L
+#define FC_MODE_CTRL__FC_NEW_CONTENT_MASK 0x00100000L
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_CURRENT_MASK 0x80000000L
+//FC_FLOW_CTRL
+#define FC_FLOW_CTRL__FC_FIRST_PIXEL_DELAY_COUNT__SHIFT 0x0
+#define FC_FLOW_CTRL__FC_FIRST_PIXEL_DELAY_COUNT_MASK 0x00000FFFL
+//FC_WINDOW_START
+#define FC_WINDOW_START__FC_WINDOW_START_X__SHIFT 0x0
+#define FC_WINDOW_START__FC_WINDOW_START_Y__SHIFT 0x10
+#define FC_WINDOW_START__FC_WINDOW_START_X_MASK 0x00001FFFL
+#define FC_WINDOW_START__FC_WINDOW_START_Y_MASK 0x1FFF0000L
+//FC_WINDOW_SIZE
+#define FC_WINDOW_SIZE__FC_WINDOW_WIDTH__SHIFT 0x0
+#define FC_WINDOW_SIZE__FC_WINDOW_HEIGHT__SHIFT 0x10
+#define FC_WINDOW_SIZE__FC_WINDOW_WIDTH_MASK 0x00000FFFL
+#define FC_WINDOW_SIZE__FC_WINDOW_HEIGHT_MASK 0x0FFF0000L
+//FC_SOURCE_SIZE
+#define FC_SOURCE_SIZE__FC_SOURCE_WIDTH__SHIFT 0x0
+#define FC_SOURCE_SIZE__FC_SOURCE_HEIGHT__SHIFT 0x10
+#define FC_SOURCE_SIZE__FC_SOURCE_WIDTH_MASK 0x00007FFFL
+#define FC_SOURCE_SIZE__FC_SOURCE_HEIGHT_MASK 0x7FFF0000L
+//DWB_UPDATE_CTRL
+#define DWB_UPDATE_CTRL__DWB_UPDATE_LOCK__SHIFT 0x0
+#define DWB_UPDATE_CTRL__DWB_UPDATE_PENDING__SHIFT 0x4
+#define DWB_UPDATE_CTRL__DWB_UPDATE_LOCK_MASK 0x00000001L
+#define DWB_UPDATE_CTRL__DWB_UPDATE_PENDING_MASK 0x00000010L
+//DWB_CRC_CTRL
+#define DWB_CRC_CTRL__DWB_CRC_EN__SHIFT 0x0
+#define DWB_CRC_CTRL__DWB_CRC_CONT_EN__SHIFT 0x4
+#define DWB_CRC_CTRL__DWB_CRC_SRC_SEL__SHIFT 0x8
+#define DWB_CRC_CTRL__DWB_CRC_EN_MASK 0x00000001L
+#define DWB_CRC_CTRL__DWB_CRC_CONT_EN_MASK 0x00000010L
+#define DWB_CRC_CTRL__DWB_CRC_SRC_SEL_MASK 0x00000300L
+//DWB_CRC_MASK_R_G
+#define DWB_CRC_MASK_R_G__DWB_CRC_RED_MASK__SHIFT 0x0
+#define DWB_CRC_MASK_R_G__DWB_CRC_GREEN_MASK__SHIFT 0x10
+#define DWB_CRC_MASK_R_G__DWB_CRC_RED_MASK_MASK 0x0000FFFFL
+#define DWB_CRC_MASK_R_G__DWB_CRC_GREEN_MASK_MASK 0xFFFF0000L
+//DWB_CRC_MASK_B_A
+#define DWB_CRC_MASK_B_A__DWB_CRC_BLUE_MASK__SHIFT 0x0
+#define DWB_CRC_MASK_B_A__DWB_CRC_A_MASK__SHIFT 0x10
+#define DWB_CRC_MASK_B_A__DWB_CRC_BLUE_MASK_MASK 0x0000FFFFL
+#define DWB_CRC_MASK_B_A__DWB_CRC_A_MASK_MASK 0xFFFF0000L
+//DWB_CRC_VAL_R_G
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_RED__SHIFT 0x0
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_GREEN__SHIFT 0x10
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_RED_MASK 0x0000FFFFL
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_GREEN_MASK 0xFFFF0000L
+//DWB_CRC_VAL_B_A
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_BLUE__SHIFT 0x0
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_A__SHIFT 0x10
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_BLUE_MASK 0x0000FFFFL
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_A_MASK 0xFFFF0000L
+//DWB_OUT_CTRL
+#define DWB_OUT_CTRL__OUT_FORMAT__SHIFT 0x0
+#define DWB_OUT_CTRL__OUT_DENORM__SHIFT 0x4
+#define DWB_OUT_CTRL__OUT_MAX__SHIFT 0x8
+#define DWB_OUT_CTRL__OUT_MIN__SHIFT 0x14
+#define DWB_OUT_CTRL__OUT_FORMAT_MASK 0x00000003L
+#define DWB_OUT_CTRL__OUT_DENORM_MASK 0x00000030L
+#define DWB_OUT_CTRL__OUT_MAX_MASK 0x0003FF00L
+#define DWB_OUT_CTRL__OUT_MIN_MASK 0x3FF00000L
+//DWB_MMHUBBUB_BACKPRESSURE_CNT_EN
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__SHIFT 0x0
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__DWB_MMHUBBUB_BACKPRESSURE_CNT_EN_MASK 0x00000001L
+//DWB_MMHUBBUB_BACKPRESSURE_CNT
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT__DWB_MMHUBBUB_MAX_BACKPRESSURE__SHIFT 0x0
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT__DWB_MMHUBBUB_MAX_BACKPRESSURE_MASK 0x0000FFFFL
+//DWB_HOST_READ_CONTROL
+#define DWB_HOST_READ_CONTROL__DWB_HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DWB_HOST_READ_CONTROL__DWB_HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+//DWB_OVERFLOW_STATUS
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_FLAG__SHIFT 0x0
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_ACK__SHIFT 0x8
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_MASK__SHIFT 0xc
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_STATUS__SHIFT 0x10
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_TYPE__SHIFT 0x14
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_FLAG_MASK 0x00000001L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_ACK_MASK 0x00000100L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_MASK_MASK 0x00001000L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_STATUS_MASK 0x00010000L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_TYPE_MASK 0x00100000L
+//DWB_OVERFLOW_COUNTER
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_TYPE__SHIFT 0x0
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_X_CNT__SHIFT 0x4
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_Y_CNT__SHIFT 0x10
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_TYPE_MASK 0x00000003L
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_X_CNT_MASK 0x0000FFF0L
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_Y_CNT_MASK 0x0FFF0000L
+//DWB_SOFT_RESET
+#define DWB_SOFT_RESET__DWB_SOFT_RESET__SHIFT 0x0
+#define DWB_SOFT_RESET__DWB_SOFT_RESET_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwbcp_dispdec
+//DWB_HDR_MULT_COEF
+#define DWB_HDR_MULT_COEF__DWB_HDR_MULT_COEF__SHIFT 0x0
+#define DWB_HDR_MULT_COEF__DWB_HDR_MULT_COEF_MASK 0x0007FFFFL
+//DWB_GAMUT_REMAP_MODE
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE__SHIFT 0x0
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x18
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_CURRENT_MASK 0x03000000L
+//DWB_GAMUT_REMAP_COEF_FORMAT
+#define DWB_GAMUT_REMAP_COEF_FORMAT__DWB_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define DWB_GAMUT_REMAP_COEF_FORMAT__DWB_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//DWB_GAMUT_REMAPA_C11_C12
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C11__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C12__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C11_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C12_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C13_C14
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C13__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C14__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C13_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C14_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C21_C22
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C21__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C22__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C21_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C22_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C23_C24
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C23__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C24__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C23_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C24_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C31_C32
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C31__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C32__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C31_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C32_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C33_C34
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C33__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C34__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C33_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C34_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C11_C12
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C11__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C12__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C11_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C12_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C13_C14
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C13__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C14__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C13_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C14_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C21_C22
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C21__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C22__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C21_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C22_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C23_C24
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C23__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C24__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C23_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C24_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C31_C32
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C31__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C32__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C31_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C32_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C33_C34
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C33__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C34__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C33_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C34_MASK 0xFFFF0000L
+//DWB_OGAM_CONTROL
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE__SHIFT 0x0
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT__SHIFT 0x4
+#define DWB_OGAM_CONTROL__DWB_OGAM_PWL_DISABLE__SHIFT 0x8
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_CURRENT__SHIFT 0x18
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_CURRENT__SHIFT 0x1c
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_MASK 0x00000003L
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_MASK 0x00000010L
+#define DWB_OGAM_CONTROL__DWB_OGAM_PWL_DISABLE_MASK 0x00000100L
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_CURRENT_MASK 0x03000000L
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_CURRENT_MASK 0x10000000L
+//DWB_OGAM_LUT_INDEX
+#define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX__SHIFT 0x0
+#define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX_MASK 0x000001FFL
+//DWB_OGAM_LUT_DATA
+#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA__SHIFT 0x0
+#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//DWB_OGAM_LUT_CONTROL
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG__SHIFT 0x8
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE__SHIFT 0x10
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000030L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG_MASK 0x00000100L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL_MASK 0x00001000L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE_MASK 0x00010000L
+//DWB_OGAM_RAMA_START_CNTL_B
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//DWB_OGAM_RAMA_START_CNTL_G
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//DWB_OGAM_RAMA_START_CNTL_R
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//DWB_OGAM_RAMA_START_BASE_CNTL_B
+#define DWB_OGAM_RAMA_START_BASE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_SLOPE_CNTL_B
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_BASE_CNTL_G
+#define DWB_OGAM_RAMA_START_BASE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_SLOPE_CNTL_G
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_BASE_CNTL_R
+#define DWB_OGAM_RAMA_START_BASE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_SLOPE_CNTL_R
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_END_CNTL1_B
+#define DWB_OGAM_RAMA_END_CNTL1_B__DWB_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_B__DWB_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_END_CNTL2_B
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//DWB_OGAM_RAMA_END_CNTL1_G
+#define DWB_OGAM_RAMA_END_CNTL1_G__DWB_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_G__DWB_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_END_CNTL2_G
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//DWB_OGAM_RAMA_END_CNTL1_R
+#define DWB_OGAM_RAMA_END_CNTL1_R__DWB_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_R__DWB_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_END_CNTL2_R
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//DWB_OGAM_RAMA_OFFSET_B
+#define DWB_OGAM_RAMA_OFFSET_B__DWB_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_B__DWB_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//DWB_OGAM_RAMA_OFFSET_G
+#define DWB_OGAM_RAMA_OFFSET_G__DWB_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_G__DWB_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//DWB_OGAM_RAMA_OFFSET_R
+#define DWB_OGAM_RAMA_OFFSET_R__DWB_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_R__DWB_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//DWB_OGAM_RAMA_REGION_0_1
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_2_3
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_4_5
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_6_7
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_8_9
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_10_11
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_12_13
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_14_15
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_16_17
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_18_19
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_20_21
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_22_23
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_24_25
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_26_27
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_28_29
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_30_31
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_32_33
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_START_CNTL_B
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//DWB_OGAM_RAMB_START_CNTL_G
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//DWB_OGAM_RAMB_START_CNTL_R
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//DWB_OGAM_RAMB_START_BASE_CNTL_B
+#define DWB_OGAM_RAMB_START_BASE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_SLOPE_CNTL_B
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_BASE_CNTL_G
+#define DWB_OGAM_RAMB_START_BASE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_SLOPE_CNTL_G
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_BASE_CNTL_R
+#define DWB_OGAM_RAMB_START_BASE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_SLOPE_CNTL_R
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_END_CNTL1_B
+#define DWB_OGAM_RAMB_END_CNTL1_B__DWB_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_B__DWB_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_END_CNTL2_B
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//DWB_OGAM_RAMB_END_CNTL1_G
+#define DWB_OGAM_RAMB_END_CNTL1_G__DWB_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_G__DWB_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_END_CNTL2_G
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//DWB_OGAM_RAMB_END_CNTL1_R
+#define DWB_OGAM_RAMB_END_CNTL1_R__DWB_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_R__DWB_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_END_CNTL2_R
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//DWB_OGAM_RAMB_OFFSET_B
+#define DWB_OGAM_RAMB_OFFSET_B__DWB_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_B__DWB_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//DWB_OGAM_RAMB_OFFSET_G
+#define DWB_OGAM_RAMB_OFFSET_G__DWB_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_G__DWB_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//DWB_OGAM_RAMB_OFFSET_R
+#define DWB_OGAM_RAMB_OFFSET_R__DWB_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_R__DWB_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//DWB_OGAM_RAMB_REGION_0_1
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_2_3
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_4_5
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_6_7
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_8_9
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_10_11
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_12_13
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_14_15
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_16_17
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_18_19
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_20_21
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_22_23
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_24_25
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_26_27
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_28_29
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_30_31
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_32_33
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_wb0_dispdec_wb_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON3_PERFCOUNTER_CNTL
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFCOUNTER_CNTL2
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFCOUNTER_STATE
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON3_PERFMON_CNTL
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON3_PERFMON_CNTL2
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON3_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON3_PERFMON_CVALUE_LOW
+#define DC_PERFMON3_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON3_PERFMON_HI
+#define DC_PERFMON3_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON3_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFMON_LOW
+#define DC_PERFMON3_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb0_dispdec
+//MCIF_WB_BUFMGR_SW_CONTROL
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE__SHIFT 0x0
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN__SHIFT 0x4
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK__SHIFT 0x5
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN__SHIFT 0x7
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK__SHIFT 0x8
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN__SHIFT 0x18
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN_MASK 0x00000010L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK_MASK 0x00000020L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN_MASK 0x00000080L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN_MASK 0x01000000L
+//MCIF_WB_BUFMGR_STATUS
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS__SHIFT 0x0
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS__SHIFT 0x1
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF__SHIFT 0x4
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS__SHIFT 0x7
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L__SHIFT 0xc
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF__SHIFT 0x1c
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS_MASK 0x00000002L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF_MASK 0x00000070L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS_MASK 0x00000080L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L_MASK 0x01FFF000L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF_MASK 0x70000000L
+//MCIF_WB_BUF_PITCH
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH__SHIFT 0x8
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH__SHIFT 0x18
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH_MASK 0x0000FF00L
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH_MASK 0xFF000000L
+//MCIF_WB_BUF_1_STATUS
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L_MASK 0x1FFF0000L
+//MCIF_WB_BUF_1_STATUS2
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB_BUF_2_STATUS
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L_MASK 0x1FFF0000L
+//MCIF_WB_BUF_2_STATUS2
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB_BUF_3_STATUS
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L_MASK 0x1FFF0000L
+//MCIF_WB_BUF_3_STATUS2
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB_BUF_4_STATUS
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L_MASK 0x1FFF0000L
+//MCIF_WB_BUF_4_STATUS2
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB_ARBITRATION_CONTROL
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE__SHIFT 0x0
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL__SHIFT 0x14
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE_MASK 0x00000003L
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL_MASK 0xFFF00000L
+//MCIF_WB_SCLK_CHANGE
+#define MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON__SHIFT 0x0
+#define MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON_MASK 0x00000001L
+//MCIF_WB_BUF_1_ADDR_Y
+#define MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_1_ADDR_C
+#define MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_2_ADDR_Y
+#define MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_2_ADDR_C
+#define MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_3_ADDR_Y
+#define MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_3_ADDR_C
+#define MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_4_ADDR_Y
+#define MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_4_ADDR_C
+#define MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB_BUFMGR_VCE_CONTROL
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE__SHIFT 0x0
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK__SHIFT 0x8
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE__SHIFT 0x10
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE_MASK 0x1FFF0000L
+//MCIF_WB_NB_PSTATE_CONTROL
+#define MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x1
+#define MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x00000002L
+//MCIF_WB_CLOCK_GATER_CONTROL
+#define MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE__SHIFT 0x0
+#define MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE_MASK 0x00000001L
+//MCIF_WB_SELF_REFRESH_CONTROL
+#define MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH__SHIFT 0x1
+#define MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH_MASK 0x00000002L
+//MULTI_LEVEL_QOS_CTRL
+#define MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT__SHIFT 0x0
+#define MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT_MASK 0x003FFFFFL
+//MCIF_WB_SECURITY_LEVEL
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SECURITY_LEVEL__SHIFT 0x0
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SPACE__SHIFT 0x4
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SECURITY_LEVEL_MASK 0x00000007L
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SPACE_MASK 0x00000070L
+//MCIF_WB_BUF_LUMA_SIZE
+#define MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE__SHIFT 0x0
+#define MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB_BUF_CHROMA_SIZE
+#define MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE__SHIFT 0x0
+#define MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB_BUF_1_ADDR_Y_HIGH
+#define MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_1_ADDR_C_HIGH
+#define MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_2_ADDR_Y_HIGH
+#define MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_2_ADDR_C_HIGH
+#define MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_3_ADDR_Y_HIGH
+#define MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_3_ADDR_C_HIGH
+#define MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_4_ADDR_Y_HIGH
+#define MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_4_ADDR_C_HIGH
+#define MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_1_RESOLUTION
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB_BUF_2_RESOLUTION
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB_BUF_3_RESOLUTION
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB_BUF_4_RESOLUTION
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB_PSTATE_CHANGE_DURATION_VBI
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_UCLK_PSTATE_CHANGE_DURATION_VBI__SHIFT 0x0
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_FCLK_PSTATE_CHANGE_DURATION_VBI__SHIFT 0x10
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_UCLK_PSTATE_CHANGE_DURATION_VBI_MASK 0x0000FFFFL
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_FCLK_PSTATE_CHANGE_DURATION_VBI_MASK 0xFFFF0000L
+//MCIF_WB_VMID_CONTROL
+#define MCIF_WB_VMID_CONTROL__MCIF_WB_P_VMID__SHIFT 0x0
+#define MCIF_WB_VMID_CONTROL__MCIF_WB_P_VMID_MASK 0x0000000FL
+//MCIF_WB_MIN_TTO
+#define MCIF_WB_MIN_TTO__MCIF_WB_MIN_TTO__SHIFT 0x0
+#define MCIF_WB_MIN_TTO__MCIF_WB_MIN_TTO_MASK 0x0007FFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON4_PERFCOUNTER_CNTL
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFCOUNTER_CNTL2
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFCOUNTER_STATE
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON4_PERFMON_CNTL
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON4_PERFMON_CNTL2
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON4_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON4_PERFMON_CVALUE_LOW
+#define DC_PERFMON4_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON4_PERFMON_HI
+#define DC_PERFMON4_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON4_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFMON_LOW
+#define DC_PERFMON4_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dispdec
+//MCIF_WB_NB_PSTATE_LATENCY_WATERMARK
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK__SHIFT 0x0
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x18
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_TYPE__SHIFT 0x1f
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK_MASK 0x001FFFFFL
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x07000000L
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_TYPE_MASK 0x80000000L
+//MCIF_WB_WATERMARK
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK__SHIFT 0x0
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK__SHIFT 0x18
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK 0x001FFFFFL
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK_MASK 0x07000000L
+//MMHUBBUB_WARMUP_CONFIG
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_QOS__SHIFT 0x10
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_AWID__SHIFT 0x14
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_QOS_MASK 0x000F0000L
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_AWID_MASK 0x00F00000L
+//MMHUBBUB_WARMUP_CONTROL_STATUS
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_EN__SHIFT 0x0
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_EN__SHIFT 0x4
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_STATUS__SHIFT 0x5
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_ACK__SHIFT 0x6
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_INC_ADDR__SHIFT 0x8
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_EN_MASK 0x00000001L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_EN_MASK 0x00000010L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_STATUS_MASK 0x00000020L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_ACK_MASK 0x00000040L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_INC_ADDR_MASK 0x03FFFF00L
+//MMHUBBUB_WARMUP_BASE_ADDR_LOW
+#define MMHUBBUB_WARMUP_BASE_ADDR_LOW__MMHUBBUB_WARMUP_BASE_ADDR_LOW__SHIFT 0x0
+#define MMHUBBUB_WARMUP_BASE_ADDR_LOW__MMHUBBUB_WARMUP_BASE_ADDR_LOW_MASK 0xFFFFFFFFL
+//MMHUBBUB_WARMUP_BASE_ADDR_HIGH
+#define MMHUBBUB_WARMUP_BASE_ADDR_HIGH__MMHUBBUB_WARMUP_BASE_ADDR_HIGH__SHIFT 0x0
+#define MMHUBBUB_WARMUP_BASE_ADDR_HIGH__MMHUBBUB_WARMUP_BASE_ADDR_HIGH_MASK 0x000007FFL
+//MMHUBBUB_WARMUP_ADDR_REGION
+#define MMHUBBUB_WARMUP_ADDR_REGION__MMHUBBUB_WARMUP_ADDR_REGION__SHIFT 0x0
+#define MMHUBBUB_WARMUP_ADDR_REGION__MMHUBBUB_WARMUP_ADDR_REGION_MASK 0x07FFFFFFL
+//MMHUBBUB_MIN_TTO
+#define MMHUBBUB_MIN_TTO__MMHUBBUB_MIN_TTO__SHIFT 0x0
+#define MMHUBBUB_MIN_TTO__MMHUBBUB_MIN_TTO_MASK 0x0007FFFFL
+//MMHUBBUB_CTRL
+#define MMHUBBUB_CTRL__MMHUB_SOCCLK_DS_MODE__SHIFT 0x0
+#define MMHUBBUB_CTRL__MMHUB_SOCCLK_DS_MODE_MASK 0x00000003L
+//WBIF_SMU_WM_CONTROL
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_SEL__SHIFT 0x14
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_REQ__SHIFT 0x16
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_SEL_MASK 0x00300000L
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_REQ_MASK 0x00400000L
+//WBIF0_MISC_CTRL
+#define WBIF0_MISC_CTRL__MCIFWB0_WR_COMBINE_TIMEOUT_THRESH__SHIFT 0x0
+#define WBIF0_MISC_CTRL__MCIF_WB0_SOCCLK_DS_ENABLE__SHIFT 0x10
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_DIS__SHIFT 0x18
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_STATUS__SHIFT 0x19
+#define WBIF0_MISC_CTRL__MCIFWB0_WR_COMBINE_TIMEOUT_THRESH_MASK 0x000003FFL
+#define WBIF0_MISC_CTRL__MCIF_WB0_SOCCLK_DS_ENABLE_MASK 0x00010000L
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_DIS_MASK 0x01000000L
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_STATUS_MASK 0x02000000L
+//WBIF0_PHASE0_OUTSTANDING_COUNTER
+#define WBIF0_PHASE0_OUTSTANDING_COUNTER__MCIF_WB0_PHASE0_OUTSTANDING_COUNTER__SHIFT 0x0
+#define WBIF0_PHASE0_OUTSTANDING_COUNTER__MCIF_WB0_PHASE0_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+//WBIF0_PHASE1_OUTSTANDING_COUNTER
+#define WBIF0_PHASE1_OUTSTANDING_COUNTER__MCIF_WB0_PHASE1_OUTSTANDING_COUNTER__SHIFT 0x0
+#define WBIF0_PHASE1_OUTSTANDING_COUNTER__MCIF_WB0_PHASE1_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+//MMHUBBUB_MEM_PWR_STATUS
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM0_PWR_STATE__SHIFT 0x0
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM1_PWR_STATE__SHIFT 0x2
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM0_PWR_STATE__SHIFT 0x4
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM1_PWR_STATE__SHIFT 0x6
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM0_PWR_STATE_MASK 0x00000003L
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM1_PWR_STATE_MASK 0x0000000CL
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM0_PWR_STATE_MASK 0x00000030L
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM1_PWR_STATE_MASK 0x000000C0L
+//MMHUBBUB_MEM_PWR_CNTL
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_FORCE__SHIFT 0x2
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_DIS__SHIFT 0x4
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_MODE_SEL__SHIFT 0x5
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_LUMA_MEM_EN_NUM__SHIFT 0x7
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_CHROMA_MEM_EN_NUM__SHIFT 0x8
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_FORCE_MASK 0x0000000CL
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_DIS_MASK 0x00000010L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_MODE_SEL_MASK 0x00000060L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_LUMA_MEM_EN_NUM_MASK 0x00000080L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_CHROMA_MEM_EN_NUM_MASK 0x00000100L
+//MMHUBBUB_CLOCK_CNTL
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_TEST_CLK_SEL__SHIFT 0x0
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_R_MMHUBBUB_GATE_DIS__SHIFT 0x5
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_WBIF0_GATE_DIS__SHIFT 0x9
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_WBIF0_GATE_DIS__SHIFT 0xa
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_FGCG_REP_DIS__SHIFT 0x11
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_TEST_CLK_SEL_MASK 0x0000001FL
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_R_MMHUBBUB_GATE_DIS_MASK 0x00000020L
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_WBIF0_GATE_DIS_MASK 0x00000200L
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_WBIF0_GATE_DIS_MASK 0x00000400L
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_FGCG_REP_DIS_MASK 0x00020000L
+//MMHUBBUB_SOFT_RESET
+#define MMHUBBUB_SOFT_RESET__WBIF0_SOFT_RESET__SHIFT 0x2
+#define MMHUBBUB_SOFT_RESET__DMUIF_SOFT_RESET__SHIFT 0x8
+#define MMHUBBUB_SOFT_RESET__WBIF0_SOFT_RESET_MASK 0x00000004L
+#define MMHUBBUB_SOFT_RESET__DMUIF_SOFT_RESET_MASK 0x00000100L
+//DMU_IF_ERR_STATUS
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR__SHIFT 0x0
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_CLR__SHIFT 0x4
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_MASK 0x00000001L
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_CLR_MASK 0x00000010L
+//MMHUBBUB_CLIENT_UNIT_ID
+#define MMHUBBUB_CLIENT_UNIT_ID__WBIF0_UNIT_ID__SHIFT 0x8
+#define MMHUBBUB_CLIENT_UNIT_ID__WBIF0_UNIT_ID_MASK 0x00003F00L
+//MMHUBBUB_WARMUP_VMID_CONTROL
+#define MMHUBBUB_WARMUP_VMID_CONTROL__MMHUBBUB_WARMUP_P_VMID__SHIFT 0x0
+#define MMHUBBUB_WARMUP_VMID_CONTROL__MMHUBBUB_WARMUP_P_VMID_MASK 0x0000000FL
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+//AZALIA_CONTROLLER_CLOCK_GATING
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x00000001L
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x00000010L
+//AZALIA_AUDIO_DTO
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000FFFFL
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xFFFF0000L
+//AZALIA_AUDIO_DTO_CONTROL
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L
+//AZALIA_SOCCLK_CONTROL
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN__SHIFT 0x1
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN_MASK 0x00000002L
+//AZALIA_UNDERFLOW_FILLER_SAMPLE
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xFFFFFFFFL
+//AZALIA_DATA_DMA_CONTROL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0x000000C0L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L
+//AZALIA_BDL_DMA_CONTROL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0x000000C0L
+//AZALIA_RIRB_AND_DP_CONTROL
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x000001E0L
+//AZALIA_CORB_DMA_CONTROL
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS_MASK 0x00000010L
+//AZALIA_GLOBAL_CAPABILITIES
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x00000006L
+//AZALIA_OUTPUT_PAYLOAD_CAPABILITY
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x10
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0x0000FFFFL
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xFFFF0000L
+//AZALIA_OUTPUT_STREAM_ARBITER_CONTROL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL__SHIFT 0x0
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE__SHIFT 0x8
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL__SHIFT 0x10
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL_MASK 0x000000FFL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE_MASK 0x00000100L
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL_MASK 0x00FF0000L
+//AZALIA_INPUT_PAYLOAD_CAPABILITY
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x10
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0x0000FFFFL
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xFFFF0000L
+//AZALIA_CONTROLLER_DEBUG
+//AZALIA_INPUT_CRC0_CONTROL0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN_MASK 0x00000001L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC0_CONTROL1
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CONTROL2
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_INPUT_CRC0_CONTROL3
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC0_RESULT
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CONTROL0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN_MASK 0x00000001L
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC1_CONTROL1
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CONTROL2
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_INPUT_CRC1_CONTROL3
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC1_RESULT
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CONTROL0
+#define AZALIA_CRC0_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC0_CONTROL0__CRC_EN_MASK 0x00000001L
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL_MASK 0x00001000L
+//AZALIA_CRC0_CONTROL1
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CONTROL2
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_CRC0_CONTROL3
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_CRC0_RESULT
+#define AZALIA_CRC0_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC0_RESULT__CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CONTROL0
+#define AZALIA_CRC1_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC1_CONTROL0__CRC_EN_MASK 0x00000001L
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL_MASK 0x00001000L
+//AZALIA_CRC1_CONTROL1
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CONTROL2
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_CRC1_CONTROL3
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_CRC1_RESULT
+#define AZALIA_CRC1_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC1_RESULT__CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_MEM_PWR_CTRL
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE__SHIFT 0x0
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS__SHIFT 0x2
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE__SHIFT 0x3
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS__SHIFT 0x5
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE__SHIFT 0x6
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS__SHIFT 0x8
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE__SHIFT 0x9
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS__SHIFT 0xb
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE__SHIFT 0xc
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS__SHIFT 0xe
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE__SHIFT 0xf
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS__SHIFT 0x11
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE__SHIFT 0x12
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS__SHIFT 0x14
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL__SHIFT 0x1c
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE_MASK 0x00000003L
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS_MASK 0x00000004L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE_MASK 0x00000018L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS_MASK 0x00000020L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE_MASK 0x000000C0L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS_MASK 0x00000100L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE_MASK 0x00000600L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS_MASK 0x00000800L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE_MASK 0x00003000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS_MASK 0x00004000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE_MASK 0x00018000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS_MASK 0x00020000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE_MASK 0x000C0000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS_MASK 0x00100000L
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL_MASK 0x30000000L
+//AZALIA_MEM_PWR_STATUS
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE__SHIFT 0x0
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE__SHIFT 0x2
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE__SHIFT 0x4
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE__SHIFT 0x6
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE__SHIFT 0x8
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE__SHIFT 0xa
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE__SHIFT 0xc
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE_MASK 0x00000003L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE_MASK 0x0000000CL
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE_MASK 0x00000030L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE_MASK 0x000000C0L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE_MASK 0x00000300L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE_MASK 0x00000C00L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+//AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L
+//AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003FL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+//CC_RCU_DC_AUDIO_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//AZALIA_F0_GTC_GROUP_OFFSET0
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET1
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET2
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET3
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET4
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET5
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET6
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6_MASK 0xFFFFFFFFL
+//REG_DC_AUDIO_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+//AZ_CLOCK_CNTL
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x0
+#define AZ_CLOCK_CNTL__AZ_GLOBAL_FGCG_REP_DIS__SHIFT 0x1
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x4
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x8
+#define AZ_CLOCK_CNTL__DCIPG_TEST_CLK_SEL__SHIFT 0xc
+#define AZ_CLOCK_CNTL__SCLK_GATE_DIS__SHIFT 0x10
+#define AZ_CLOCK_CNTL__SCLK_TURN_ON_DELAY__SHIFT 0x14
+#define AZ_CLOCK_CNTL__SCLK_TURN_OFF_DELAY__SHIFT 0x18
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x00000001L
+#define AZ_CLOCK_CNTL__AZ_GLOBAL_FGCG_REP_DIS_MASK 0x00000002L
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000010L
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x00000100L
+#define AZ_CLOCK_CNTL__DCIPG_TEST_CLK_SEL_MASK 0x0000F000L
+#define AZ_CLOCK_CNTL__SCLK_GATE_DIS_MASK 0x00010000L
+#define AZ_CLOCK_CNTL__SCLK_TURN_ON_DELAY_MASK 0x00F00000L
+#define AZ_CLOCK_CNTL__SCLK_TURN_OFF_DELAY_MASK 0xFF000000L
+//AZ_MEM_GLOBAL_PWR_REQ_CNTL
+#define AZ_MEM_GLOBAL_PWR_REQ_CNTL__AZ_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define AZ_MEM_GLOBAL_PWR_REQ_CNTL__AZ_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_hda_az_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON5_PERFCOUNTER_CNTL
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFCOUNTER_CNTL2
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFCOUNTER_STATE
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON5_PERFMON_CNTL
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON5_PERFMON_CNTL2
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON5_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON5_PERFMON_CVALUE_LOW
+#define DC_PERFMON5_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON5_PERFMON_HI
+#define DC_PERFMON5_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON5_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFMON_LOW
+#define DC_PERFMON5_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream0_dispdec
+//AZF0STREAM0_AZALIA_STREAM_INDEX
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM0_AZALIA_STREAM_DATA
+#define AZF0STREAM0_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream1_dispdec
+//AZF0STREAM1_AZALIA_STREAM_INDEX
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM1_AZALIA_STREAM_DATA
+#define AZF0STREAM1_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream2_dispdec
+//AZF0STREAM2_AZALIA_STREAM_INDEX
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM2_AZALIA_STREAM_DATA
+#define AZF0STREAM2_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream3_dispdec
+//AZF0STREAM3_AZALIA_STREAM_INDEX
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM3_AZALIA_STREAM_DATA
+#define AZF0STREAM3_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream4_dispdec
+//AZF0STREAM4_AZALIA_STREAM_INDEX
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM4_AZALIA_STREAM_DATA
+#define AZF0STREAM4_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream5_dispdec
+//AZF0STREAM5_AZALIA_STREAM_INDEX
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM5_AZALIA_STREAM_DATA
+#define AZF0STREAM5_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream6_dispdec
+//AZF0STREAM6_AZALIA_STREAM_INDEX
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM6_AZALIA_STREAM_DATA
+#define AZF0STREAM6_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream7_dispdec
+//AZF0STREAM7_AZALIA_STREAM_INDEX
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM7_AZALIA_STREAM_DATA
+#define AZF0STREAM7_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream8_dispdec
+//AZF0STREAM8_AZALIA_STREAM_INDEX
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM8_AZALIA_STREAM_DATA
+#define AZF0STREAM8_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream9_dispdec
+//AZF0STREAM9_AZALIA_STREAM_INDEX
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM9_AZALIA_STREAM_DATA
+#define AZF0STREAM9_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream10_dispdec
+//AZF0STREAM10_AZALIA_STREAM_INDEX
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM10_AZALIA_STREAM_DATA
+#define AZF0STREAM10_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream11_dispdec
+//AZF0STREAM11_AZALIA_STREAM_INDEX
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM11_AZALIA_STREAM_DATA
+#define AZF0STREAM11_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream12_dispdec
+//AZF0STREAM12_AZALIA_STREAM_INDEX
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM12_AZALIA_STREAM_DATA
+#define AZF0STREAM12_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream13_dispdec
+//AZF0STREAM13_AZALIA_STREAM_INDEX
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM13_AZALIA_STREAM_DATA
+#define AZF0STREAM13_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream14_dispdec
+//AZF0STREAM14_AZALIA_STREAM_INDEX
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM14_AZALIA_STREAM_DATA
+#define AZF0STREAM14_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream15_dispdec
+//AZF0STREAM15_AZALIA_STREAM_INDEX
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM15_AZALIA_STREAM_DATA
+#define AZF0STREAM15_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint2_dispdec
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint3_dispdec
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint4_dispdec
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint5_dispdec
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint6_dispdec
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint7_dispdec
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint3_dispdec
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint4_dispdec
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint5_dispdec
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint6_dispdec
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint7_dispdec
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_dispdec
+//DCHUBBUB_ARB_DF_REQ_OUTSTAND
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND__SHIFT 0x0
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND__SHIFT 0xa
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD__SHIFT 0x16
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND_MASK 0x000001FFL
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_MASK 0x0007FC00L
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD_MASK 0x7FC00000L
+//DCHUBBUB_ARB_SAT_LEVEL
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL__SHIFT 0x0
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL_MASK 0xFFFFFFFFL
+//DCHUBBUB_ARB_QOS_FORCE
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE__SHIFT 0x8
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0x9
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_HOSTVM_STALL_QOS__SHIFT 0xc
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE_MASK 0x0000000FL
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE_MASK 0x00000100L
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000200L
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_HOSTVM_STALL_QOS_MASK 0x0000F000L
+//DCHUBBUB_ARB_DRAM_STATE_CNTL
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE__SHIFT 0x1
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_CSTATE_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0x2
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE__SHIFT 0x4
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE__SHIFT 0x5
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__ENABLE_QOS_FORCE_PSTATE__SHIFT 0x7
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE__SHIFT 0xc
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE__SHIFT 0xd
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DISABLE_HOSTVM_FORCE_DCFCLK_DEEP_SLEEP__SHIFT 0xf
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_VALUE__SHIFT 0x10
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_ENABLE__SHIFT 0x11
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_LEGACY__SHIFT 0x12
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DCFCLK_DEEP_SLEEP_HYSTERESIS__SHIFT 0x18
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE_MASK 0x00000001L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE_MASK 0x00000002L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_CSTATE_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000004L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE_MASK 0x00000010L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE_MASK 0x00000020L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__ENABLE_QOS_FORCE_PSTATE_MASK 0x00000080L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE_MASK 0x00001000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE_MASK 0x00002000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DISABLE_HOSTVM_FORCE_DCFCLK_DEEP_SLEEP_MASK 0x00008000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_VALUE_MASK 0x00010000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_ENABLE_MASK 0x00020000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_LEGACY_MASK 0x00040000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DCFCLK_DEEP_SLEEP_HYSTERESIS_MASK 0xFF000000L
+//DCHUBBUB_ARB_USR_RETRAINING_CNTL
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__USR_RETRAINING_REQUEST__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__ALLOW_USR_RETRAINING__SHIFT 0x1
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE__SHIFT 0x8
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE__SHIFT 0x9
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0xa
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE__SHIFT 0xb
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__USR_RETRAINING_REQUEST_MASK 0x00000001L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__ALLOW_USR_RETRAINING_MASK 0x00000002L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE_MASK 0x00000100L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE_MASK 0x00000200L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000400L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE_MASK 0x00000800L
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_MASK 0x00003FFFL
+//DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A_MASK 0x00003FFFL
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_A
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__DCHUBBUB_ARB_FRAC_URG_BW_NOM_A_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A_MASK 0x000003FFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_MASK 0x00003FFFL
+//DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B_MASK 0x00003FFFL
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_B
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__DCHUBBUB_ARB_FRAC_URG_BW_NOM_B_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B_MASK 0x000003FFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_MASK 0x00003FFFL
+//DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C_MASK 0x00003FFFL
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_C
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__DCHUBBUB_ARB_FRAC_URG_BW_NOM_C_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C_MASK 0x000003FFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_MASK 0x00003FFFL
+//DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D_MASK 0x00003FFFL
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_D
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__DCHUBBUB_ARB_FRAC_URG_BW_NOM_D_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D_MASK 0x000003FFL
+//DCHUBBUB_ARB_HOSTVM_CNTL
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_CSTATE__SHIFT 0x0
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_PSTATE__SHIFT 0x1
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_USR_RETRAINING__SHIFT 0x2
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SLACK_MASK__SHIFT 0x3
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SPACE_OK_STATUS__SHIFT 0x4
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_GID_FREE_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHVM_RET_FIFO_FREE_STATUS__SHIFT 0x6
+#define DCHUBBUB_ARB_HOSTVM_CNTL__NON_PRQ_CLIENT_WINNER_STATUS__SHIFT 0x7
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_ALLOCATED_GROUPS__SHIFT 0x8
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_RD_FIFO_ENTRIES__SHIFT 0x10
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_QOS__SHIFT 0x18
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD__SHIFT 0x1c
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_CSTATE_MASK 0x00000001L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_PSTATE_MASK 0x00000002L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_USR_RETRAINING_MASK 0x00000004L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SLACK_MASK_MASK 0x00000008L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SPACE_OK_STATUS_MASK 0x00000010L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_GID_FREE_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHVM_RET_FIFO_FREE_STATUS_MASK 0x00000040L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__NON_PRQ_CLIENT_WINNER_STATUS_MASK 0x00000080L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_ALLOCATED_GROUPS_MASK 0x00003F00L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_RD_FIFO_ENTRIES_MASK 0x00FF0000L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_QOS_MASK 0x0F000000L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD_MASK 0xF0000000L
+//DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT__SHIFT 0x0
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE__SHIFT 0x4
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_Z8__SHIFT 0x10
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__PSTATE_CHANGE_TYPE__SHIFT 0x18
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_MASK 0x00000003L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE_MASK 0x00000010L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST_MASK 0x00000100L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_Z8_MASK 0x00010000L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__PSTATE_CHANGE_TYPE_MASK 0x01000000L
+//DCHUBBUB_ARB_MALL_CNTL
+#define DCHUBBUB_ARB_MALL_CNTL__GLOBAL_USE_MALL_FOR_SS__SHIFT 0x0
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_IN_USE__SHIFT 0x4
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define DCHUBBUB_ARB_MALL_CNTL__GLOBAL_USE_MALL_FOR_SS_MASK 0x00000001L
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_IN_USE_MASK 0x00000010L
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+//DCHUBBUB_ARB_TIMEOUT_ENABLE
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE_MASK 0x00000001L
+//DCHUBBUB_GLOBAL_TIMER_CNTL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV__SHIFT 0x0
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE__SHIFT 0xc
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT__SHIFT 0x10
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV_MASK 0x0000000FL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE_MASK 0x00001000L
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT_MASK 0xFFFF0000L
+//SURFACE_CHECK0_ADDRESS_LSB
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK0_ADDRESS_MSB
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK1_ADDRESS_LSB
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK1_ADDRESS_MSB
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK2_ADDRESS_LSB
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK2_ADDRESS_MSB
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK3_ADDRESS_LSB
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK3_ADDRESS_MSB
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE_MASK 0x80000000L
+//VTG0_CONTROL
+#define VTG0_CONTROL__VTG0_FP2__SHIFT 0x0
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT__SHIFT 0x10
+#define VTG0_CONTROL__VTG0_ENABLE__SHIFT 0x1f
+#define VTG0_CONTROL__VTG0_FP2_MASK 0x00007FFFL
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG0_CONTROL__VTG0_ENABLE_MASK 0x80000000L
+//VTG1_CONTROL
+#define VTG1_CONTROL__VTG1_FP2__SHIFT 0x0
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT__SHIFT 0x10
+#define VTG1_CONTROL__VTG1_ENABLE__SHIFT 0x1f
+#define VTG1_CONTROL__VTG1_FP2_MASK 0x00007FFFL
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG1_CONTROL__VTG1_ENABLE_MASK 0x80000000L
+//VTG2_CONTROL
+#define VTG2_CONTROL__VTG2_FP2__SHIFT 0x0
+#define VTG2_CONTROL__VTG2_VCOUNT_INIT__SHIFT 0x10
+#define VTG2_CONTROL__VTG2_ENABLE__SHIFT 0x1f
+#define VTG2_CONTROL__VTG2_FP2_MASK 0x00007FFFL
+#define VTG2_CONTROL__VTG2_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG2_CONTROL__VTG2_ENABLE_MASK 0x80000000L
+//VTG3_CONTROL
+#define VTG3_CONTROL__VTG3_FP2__SHIFT 0x0
+#define VTG3_CONTROL__VTG3_VCOUNT_INIT__SHIFT 0x10
+#define VTG3_CONTROL__VTG3_ENABLE__SHIFT 0x1f
+#define VTG3_CONTROL__VTG3_FP2_MASK 0x00007FFFL
+#define VTG3_CONTROL__VTG3_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG3_CONTROL__VTG3_ENABLE_MASK 0x80000000L
+//DCHUBBUB_SOFT_RESET
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET__SHIFT 0x0
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET__SHIFT 0x1
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET__SHIFT 0x4
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET_MASK 0x00000001L
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET_MASK 0x00000002L
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET_MASK 0x00000010L
+//DCHUBBUB_CLOCK_CNTL
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_TEST_CLK_SEL__SHIFT 0x0
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x5
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x6
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_FGCG_REP_DIS__SHIFT 0x7
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_TEST_CLK_SEL_MASK 0x0000001FL
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000020L
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000040L
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_FGCG_REP_DIS_MASK 0x00000080L
+//DCFCLK_CNTL
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS__SHIFT 0x1f
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS_MASK 0x80000000L
+//DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_CNT_EN__SHIFT 0x0
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_EVENT_SHORT_PULSE_FILTER_EN__SHIFT 0x1
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_DF_REQ_CMD_LATENCY_SEL__SHIFT 0x2
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_PIPE_SEL__SHIFT 0x3
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_REQ_TYPE_SEL__SHIFT 0x7
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DF_LATENCY_URGENT_ONLY__SHIFT 0xa
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ROB_FIFO_LEVEL__SHIFT 0xb
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_CNT_EN_MASK 0x00000001L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_EVENT_SHORT_PULSE_FILTER_EN_MASK 0x00000002L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_DF_REQ_CMD_LATENCY_SEL_MASK 0x00000004L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_PIPE_SEL_MASK 0x00000078L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_REQ_TYPE_SEL_MASK 0x00000380L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DF_LATENCY_URGENT_ONLY_MASK 0x00000400L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ROB_FIFO_LEVEL_MASK 0x003FF800L
+//DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_EN__SHIFT 0x0
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_SRC_SEL__SHIFT 0x1
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_DUR__SHIFT 0x4
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__LATENCY_SOURCE_SEL__SHIFT 0xc
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL__SHIFT 0x14
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_RESET__SHIFT 0x1f
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_EN_MASK 0x00000001L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_SRC_SEL_MASK 0x0000000EL
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_DUR_MASK 0x00000FF0L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__LATENCY_SOURCE_SEL_MASK 0x00007000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_MASK 0x7FF00000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_RESET_MASK 0x80000000L
+//DCHUBBUB_VLINE_SNAPSHOT
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT__SHIFT 0x0
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT_MASK 0x00000001L
+//DCHUBBUB_CTRL_STATUS
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN__SHIFT 0x0
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS__SHIFT 0x2
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR__SHIFT 0x3
+#define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE__SHIFT 0x1f
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN_MASK 0x00000001L
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS_MASK 0x00000004L
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR_MASK 0x00000008L
+#define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE_MASK 0x80000000L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL1
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD__SHIFT 0x6
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS_MASK 0x0000003FL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD_MASK 0xFFFFFFC0L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL2
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN__SHIFT 0x1b
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET__SHIFT 0x1c
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD_MASK 0x07FFFFFFL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN_MASK 0x08000000L
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET_MASK 0x10000000L
+//DCHUBBUB_TIMEOUT_INTERRUPT_STATUS
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS__SHIFT 0x1
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR__SHIFT 0x2
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK__SHIFT 0x3
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE_MASK 0x00000001L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS_MASK 0x00000002L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK_MASK 0x000000F8L
+//FMON_CTRL
+#define FMON_CTRL__FMON_START__SHIFT 0x0
+#define FMON_CTRL__FMON_MODE__SHIFT 0x1
+#define FMON_CTRL__FMON_PSTATE_IGNORE__SHIFT 0x4
+#define FMON_CTRL__FMON_STATUS_IGNORE__SHIFT 0x5
+#define FMON_CTRL__FMON_URG_MODE_GREATER__SHIFT 0x6
+#define FMON_CTRL__FMON_FILTER_UID_EN__SHIFT 0x7
+#define FMON_CTRL__FMON_STATE__SHIFT 0x9
+#define FMON_CTRL__FMON_URG_FILTER__SHIFT 0xc
+#define FMON_CTRL__FMON_URG_THRESHOLD__SHIFT 0xd
+#define FMON_CTRL__FMON_FILTER_UID_1__SHIFT 0x11
+#define FMON_CTRL__FMON_FILTER_UID_2__SHIFT 0x16
+#define FMON_CTRL__FMON_SOF_SEL__SHIFT 0x1b
+#define FMON_CTRL__FMON_START_MASK 0x00000001L
+#define FMON_CTRL__FMON_MODE_MASK 0x00000006L
+#define FMON_CTRL__FMON_PSTATE_IGNORE_MASK 0x00000010L
+#define FMON_CTRL__FMON_STATUS_IGNORE_MASK 0x00000020L
+#define FMON_CTRL__FMON_URG_MODE_GREATER_MASK 0x00000040L
+#define FMON_CTRL__FMON_FILTER_UID_EN_MASK 0x00000180L
+#define FMON_CTRL__FMON_STATE_MASK 0x00000600L
+#define FMON_CTRL__FMON_URG_FILTER_MASK 0x00001000L
+#define FMON_CTRL__FMON_URG_THRESHOLD_MASK 0x0001E000L
+#define FMON_CTRL__FMON_FILTER_UID_1_MASK 0x003E0000L
+#define FMON_CTRL__FMON_FILTER_UID_2_MASK 0x07C00000L
+#define FMON_CTRL__FMON_SOF_SEL_MASK 0x38000000L
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec
+//DCHUBBUB_SDPIF_CFG0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS__SHIFT 0x3
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS__SHIFT 0x6
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR__SHIFT 0xa
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR__SHIFT 0xb
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR__SHIFT 0xc
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN__SHIFT 0xd
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN__SHIFT 0xe
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL__SHIFT 0xf
+#define DCHUBBUB_SDPIF_CFG0__DF_CSTATE_DISALLOW__SHIFT 0x10
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY__SHIFT 0x19
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS_MASK 0x00000006L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_MASK 0x000003C0L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_MASK 0x00000400L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR_MASK 0x00000800L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR_MASK 0x00001000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN_MASK 0x00002000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN_MASK 0x00004000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL_MASK 0x00008000L
+#define DCHUBBUB_SDPIF_CFG0__DF_CSTATE_DISALLOW_MASK 0x00010000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY_MASK 0x7E000000L
+//DCHUBBUB_SDPIF_CFG1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_MAX_NUM_OUTSTANDING__SHIFT 0x9
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP_MASK 0x00000100L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_MAX_NUM_OUTSTANDING_MASK 0x00000200L
+//DCHUBBUB_SDPIF_CFG2
+#define DCHUBBUB_SDPIF_CFG2__dGPU_ADDR_PRESENT__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_HOSTVM_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_UNIT_ID_BITMASK__SHIFT 0x10
+#define DCHUBBUB_SDPIF_CFG2__dGPU_ADDR_PRESENT_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_HOSTVM_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_UNIT_ID_BITMASK_MASK 0x01FF0000L
+//VM_REQUEST_PHYSICAL
+#define VM_REQUEST_PHYSICAL__PDE_REQUEST_PHYSICAL__SHIFT 0x0
+#define VM_REQUEST_PHYSICAL__PTE_REQUEST_PHYSICAL__SHIFT 0x3
+#define VM_REQUEST_PHYSICAL__PDE_REQUEST_PHYSICAL_MASK 0x00000001L
+#define VM_REQUEST_PHYSICAL__PTE_REQUEST_PHYSICAL_MASK 0x00000008L
+//DCHUBBUB_FORCE_IO_STATUS_0
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS__SHIFT 0x0
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_STICKY__SHIFT 0x1
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_PIPE_ID__SHIFT 0x3
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_REQUEST_TYPE__SHIFT 0x7
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_ADDR_LO__SHIFT 0xa
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_MASK 0x00000001L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_STICKY_MASK 0x00000002L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_PIPE_ID_MASK 0x00000078L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_REQUEST_TYPE_MASK 0x00000380L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_ADDR_LO_MASK 0xFFFFFC00L
+//DCHUBBUB_FORCE_IO_STATUS_1
+#define DCHUBBUB_FORCE_IO_STATUS_1__SDPIF_FORCE_IO_STATUS_ADDR_HI__SHIFT 0x0
+#define DCHUBBUB_FORCE_IO_STATUS_1__SDPIF_FORCE_IO_STATUS_ADDR_HI_MASK 0x001FFFFFL
+//DCN_VM_FB_LOCATION_BASE
+#define DCN_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define DCN_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//DCN_VM_FB_LOCATION_TOP
+#define DCN_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define DCN_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//DCN_VM_FB_OFFSET
+#define DCN_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define DCN_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//DCN_VM_AGP_BOT
+#define DCN_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define DCN_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//DCN_VM_AGP_TOP
+#define DCN_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define DCN_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//DCN_VM_AGP_BASE
+#define DCN_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define DCN_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_START
+#define DCN_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_START__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_START_MASK 0x000FFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_END
+#define DCN_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_END__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_END_MASK 0x000FFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//DCHUBBUB_SDPIF_PIPE_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL_MASK 0x0000F000L
+//DCHUBBUB_SDPIF_PIPE_NOALLOC
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE0_NOALLOC__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE1_NOALLOC__SHIFT 0x1
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE2_NOALLOC__SHIFT 0x2
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE3_NOALLOC__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE0_NOALLOC_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE1_NOALLOC_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE2_NOALLOC_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE3_NOALLOC_MASK 0x00000008L
+//DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL_MASK 0x0000F000L
+//DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE0_DCCMETA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE1_DCCMETA_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE2_DCCMETA_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE3_DCCMETA_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE0_DCCMETA_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE1_DCCMETA_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE2_DCCMETA_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE3_DCCMETA_SEC_LVL_MASK 0x0000F000L
+//DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE0_CURSOR0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE1_CURSOR0_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE2_CURSOR0_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE3_CURSOR0_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE0_CURSOR0_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE1_CURSOR0_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE2_CURSOR0_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE3_CURSOR0_SEC_LVL_MASK 0x0000F000L
+//DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE0_GPUVM_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE1_GPUVM_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE2_GPUVM_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE3_GPUVM_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE0_GPUVM_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE1_GPUVM_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE2_GPUVM_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE3_GPUVM_SEC_LVL_MASK 0x0000F000L
+//SDPIF_REQUEST_RATE_LIMIT
+#define SDPIF_REQUEST_RATE_LIMIT__SDPIF_REQUEST_RATE_LIMIT__SHIFT 0x0
+#define SDPIF_REQUEST_RATE_LIMIT__SDPIF_REQUEST_RATE_LIMIT_MASK 0x00000FFFL
+//DCHUBBUB_SDPIF_MEM_PWR_CTRL
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_SDPIF_MEM_PWR_STATUS
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE_MASK 0x00000003L
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_ret_path_dispdec
+//DCHUBBUB_RET_PATH_MEM_PWR_CTRL
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_RET_PATH_MEM_PWR_STATUS
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE_MASK 0x00000003L
+//DCHUBBUB_CRC_CTRL
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_EN__SHIFT 0x0
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_CONT_EN__SHIFT 0x1
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_ONE_SHOT_PENDING__SHIFT 0x2
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_ONE_SHOT_PENDING__SHIFT 0x3
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_SRC_SEL__SHIFT 0x4
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_SRC_SEL__SHIFT 0x6
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_PIPE_SEL__SHIFT 0x8
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_SURF_SEL__SHIFT 0xc
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_DATA_SRC_SEL__SHIFT 0x14
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_EN_MASK 0x00000001L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_CONT_EN_MASK 0x00000002L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_ONE_SHOT_PENDING_MASK 0x00000008L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_SRC_SEL_MASK 0x00000030L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_SRC_SEL_MASK 0x000000C0L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_PIPE_SEL_MASK 0x00000F00L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_SURF_SEL_MASK 0x00001000L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_DATA_SRC_SEL_MASK 0x00100000L
+//DCHUBBUB_CRC0_VAL_R
+#define DCHUBBUB_CRC0_VAL_R__DCHUBBUB_CRC0_R_CR__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_R__DCHUBBUB_CRC0_R_CR_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC0_VAL_G
+#define DCHUBBUB_CRC0_VAL_G__DCHUBBUB_CRC0_G_Y__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_G__DCHUBBUB_CRC0_G_Y_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC0_VAL_B
+#define DCHUBBUB_CRC0_VAL_B__DCHUBBUB_CRC0_B_CB__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_B__DCHUBBUB_CRC0_B_CB_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC0_VAL_A
+#define DCHUBBUB_CRC0_VAL_A__DCHUBBUB_CRC0_ALPHA__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_A__DCHUBBUB_CRC0_ALPHA_MASK 0xFFFFFFFFL
+//DCHUBBUB_DCC_STAT_CNTL
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_MODE__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_EN__SHIFT 0x1
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_DONE__SHIFT 0x2
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_PIPE_SEL__SHIFT 0x4
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_FRAME_CNT__SHIFT 0x10
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_MODE_MASK 0x00000001L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_EN_MASK 0x00000002L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_DONE_MASK 0x00000004L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_PIPE_SEL_MASK 0x000000F0L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_FRAME_CNT_MASK 0xFFFF0000L
+//DCHUBBUB_DCC_STAT0
+#define DCHUBBUB_DCC_STAT0__DCHUBBUB_DCC_STAT_TOTAL_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT0__DCHUBBUB_DCC_STAT_TOTAL_REQ_MASK 0xFFFFFFFFL
+//DCHUBBUB_DCC_STAT1
+#define DCHUBBUB_DCC_STAT1__DCHUBBUB_DCC_STAT_ZS_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT1__DCHUBBUB_DCC_STAT_ZS_REQ_MASK 0xFFFFFFFFL
+//DCHUBBUB_DCC_STAT2
+#define DCHUBBUB_DCC_STAT2__DCHUBBUB_DCC_STAT_DCC_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT2__DCHUBBUB_DCC_STAT_DCC_REQ_MASK 0xFFFFFFFFL
+//DCHUBBUB_COMPBUF_CTRL
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE__SHIFT 0x0
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_ENABLE__SHIFT 0x10
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_STATUS__SHIFT 0x12
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_CLEAR__SHIFT 0x13
+#define DCHUBBUB_COMPBUF_CTRL__CONFIG_ERROR__SHIFT 0x1f
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CURRENT_MASK 0x00001F00L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_ENABLE_MASK 0x00010000L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_STATUS_MASK 0x00040000L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_CLEAR_MASK 0x00080000L
+#define DCHUBBUB_COMPBUF_CTRL__CONFIG_ERROR_MASK 0x80000000L
+//DCHUBBUB_DET0_CTRL
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_CURRENT_MASK 0x00001F00L
+//DCHUBBUB_DET1_CTRL
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_CURRENT_MASK 0x00001F00L
+//DCHUBBUB_DET2_CTRL
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_CURRENT_MASK 0x00001F00L
+//DCHUBBUB_DET3_CTRL
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_CURRENT_MASK 0x00001F00L
+//DCHUBBUB_MEM_PWR_MODE_CTRL
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACCESS_MEM_PWR_MODE__SHIFT 0x0
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACTIVE_MEM_PWR_MODE__SHIFT 0x2
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_IDLE_MEM_PWR_MODE__SHIFT 0x4
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_FORCE__SHIFT 0x6
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_FORCE__SHIFT 0x8
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__UNALLOCATED_MEM_PWR_MODE__SHIFT 0xa
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x10
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_IDLE_MEM_PWR_MODE__SHIFT 0x12
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x14
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__SEGMENT_MEM_PWR_DIS__SHIFT 0x18
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_DIS__SHIFT 0x19
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_DIS__SHIFT 0x1a
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACCESS_MEM_PWR_MODE_MASK 0x00000003L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACTIVE_MEM_PWR_MODE_MASK 0x0000000CL
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_IDLE_MEM_PWR_MODE_MASK 0x00000030L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_FORCE_MASK 0x000000C0L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_FORCE_MASK 0x00000300L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__UNALLOCATED_MEM_PWR_MODE_MASK 0x00000C00L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_FORCE_MASK 0x00030000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_IDLE_MEM_PWR_MODE_MASK 0x000C0000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00300000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__SEGMENT_MEM_PWR_DIS_MASK 0x01000000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_DIS_MASK 0x02000000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_DIS_MASK 0x04000000L
+//COMPBUF_MEM_PWR_CTRL_1
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_WAKE_LATENCY__SHIFT 0x0
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_SLEEP_LATENCY__SHIFT 0x8
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_WAKE_LATENCY__SHIFT 0x10
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_SLEEP_LATENCY__SHIFT 0x18
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_WAKE_LATENCY_MASK 0x000000FFL
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_SLEEP_LATENCY_MASK 0x0000FF00L
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_WAKE_LATENCY_MASK 0x00FF0000L
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_SLEEP_LATENCY_MASK 0xFF000000L
+//COMPBUF_MEM_PWR_CTRL_2
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_UNALLOCATED_WAKE_LATENCY__SHIFT 0x0
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_ACTIVE_ENTER_LATENCY__SHIFT 0x8
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_IDLE_ENTER_LATENCY__SHIFT 0xc
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_UNALLOCATED_WAKE_LATENCY_MASK 0x000000FFL
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_ACTIVE_ENTER_LATENCY_MASK 0x00000F00L
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_IDLE_ENTER_LATENCY_MASK 0x0000F000L
+//DCHUBBUB_MEM_PWR_STATUS
+#define DCHUBBUB_MEM_PWR_STATUS__COMPBUF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_MEM_PWR_STATUS__METAFIFO_MEM_PWR_STATE__SHIFT 0x2
+#define DCHUBBUB_MEM_PWR_STATUS__UNALLOCATED_MEM_PWR_STATE__SHIFT 0x4
+#define DCHUBBUB_MEM_PWR_STATUS__DCC_SKID_MEM_PWR_STATE__SHIFT 0x6
+#define DCHUBBUB_MEM_PWR_STATUS__DET0_MEM_PWR_STATE__SHIFT 0x8
+#define DCHUBBUB_MEM_PWR_STATUS__DET1_MEM_PWR_STATE__SHIFT 0xa
+#define DCHUBBUB_MEM_PWR_STATUS__DET2_MEM_PWR_STATE__SHIFT 0xc
+#define DCHUBBUB_MEM_PWR_STATUS__DET3_MEM_PWR_STATE__SHIFT 0xe
+#define DCHUBBUB_MEM_PWR_STATUS__COMPBUF_MEM_PWR_STATE_MASK 0x00000003L
+#define DCHUBBUB_MEM_PWR_STATUS__METAFIFO_MEM_PWR_STATE_MASK 0x0000000CL
+#define DCHUBBUB_MEM_PWR_STATUS__UNALLOCATED_MEM_PWR_STATE_MASK 0x00000030L
+#define DCHUBBUB_MEM_PWR_STATUS__DCC_SKID_MEM_PWR_STATE_MASK 0x000000C0L
+#define DCHUBBUB_MEM_PWR_STATUS__DET0_MEM_PWR_STATE_MASK 0x00000300L
+#define DCHUBBUB_MEM_PWR_STATUS__DET1_MEM_PWR_STATE_MASK 0x00000C00L
+#define DCHUBBUB_MEM_PWR_STATUS__DET2_MEM_PWR_STATE_MASK 0x00003000L
+#define DCHUBBUB_MEM_PWR_STATUS__DET3_MEM_PWR_STATE_MASK 0x0000C000L
+//COMPBUF_RESERVED_SPACE
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_64B__SHIFT 0x0
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_ZS__SHIFT 0x10
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_64B_MASK 0x00000FFFL
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_ZS_MASK 0x0FFF0000L
+//DCHUBBUB_DEBUG_CTRL_0
+#define DCHUBBUB_DEBUG_CTRL_0__METAFIFO_DEPTH__SHIFT 0x0
+#define DCHUBBUB_DEBUG_CTRL_0__COMPBUF_SEG_DEPTH__SHIFT 0x8
+#define DCHUBBUB_DEBUG_CTRL_0__DET_SEG_DEPTH__SHIFT 0xc
+#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
+#define DCHUBBUB_DEBUG_CTRL_0__DELAY_COMPBUF_DEALLOC_ON_DRQ_STOP_DISABLE__SHIFT 0x1b
+#define DCHUBBUB_DEBUG_CTRL_0__SEG_ALLOC_ERR_PIPE_BLANK_ENABLE__SHIFT 0x1c
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_RESET_OPTIMIZATION_DISABLE__SHIFT 0x1d
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_ALLOC_ENABLE__SHIFT 0x1e
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_DEALLOC_ENABLE__SHIFT 0x1f
+#define DCHUBBUB_DEBUG_CTRL_0__METAFIFO_DEPTH_MASK 0x000000FFL
+#define DCHUBBUB_DEBUG_CTRL_0__COMPBUF_SEG_DEPTH_MASK 0x00000F00L
+#define DCHUBBUB_DEBUG_CTRL_0__DET_SEG_DEPTH_MASK 0x0000F000L
+#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x07FF0000L
+#define DCHUBBUB_DEBUG_CTRL_0__DELAY_COMPBUF_DEALLOC_ON_DRQ_STOP_DISABLE_MASK 0x08000000L
+#define DCHUBBUB_DEBUG_CTRL_0__SEG_ALLOC_ERR_PIPE_BLANK_ENABLE_MASK 0x10000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_RESET_OPTIMIZATION_DISABLE_MASK 0x20000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_ALLOC_ENABLE_MASK 0x40000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_DEALLOC_ENABLE_MASK 0x80000000L
+//DCHUBBUB_CRC1_VAL_R
+#define DCHUBBUB_CRC1_VAL_R__DCHUBBUB_CRC1_R_CR__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_R__DCHUBBUB_CRC1_R_CR_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC1_VAL_G
+#define DCHUBBUB_CRC1_VAL_G__DCHUBBUB_CRC1_G_Y__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_G__DCHUBBUB_CRC1_G_Y_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC1_VAL_B
+#define DCHUBBUB_CRC1_VAL_B__DCHUBBUB_CRC1_B_CB__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_B__DCHUBBUB_CRC1_B_CB_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC1_VAL_A
+#define DCHUBBUB_CRC1_VAL_A__DCHUBBUB_CRC1_ALPHA__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_A__DCHUBBUB_CRC1_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_vmrq_if_dispdec
+//DCN_VM_CONTEXT0_CNTL
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_CNTL
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_CNTL
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_CNTL
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_CNTL
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_CNTL
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_CNTL
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_CNTL
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_CNTL
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_CNTL
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_CNTL
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_CNTL
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_CNTL
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_CNTL
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_CNTL
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_CNTL
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_DEFAULT_ADDR_MSB
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_ADDR_MSB__SHIFT 0x0
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SPA__SHIFT 0x1c
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SNOOP__SHIFT 0x1d
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_ADDR_MSB_MASK 0x0000000FL
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SPA_MASK 0x10000000L
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SNOOP_MASK 0x20000000L
+//DCN_VM_DEFAULT_ADDR_LSB
+#define DCN_VM_DEFAULT_ADDR_LSB__DCN_VM_DEFAULT_ADDR_LSB__SHIFT 0x0
+#define DCN_VM_DEFAULT_ADDR_LSB__DCN_VM_DEFAULT_ADDR_LSB_MASK 0xFFFFFFFFL
+//DCN_VM_FAULT_CNTL
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_CLEAR__SHIFT 0x0
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_MODE__SHIFT 0x1
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_INTERRUPT_ENABLE__SHIFT 0x2
+#define DCN_VM_FAULT_CNTL__DCN_VM_RANGE_FAULT_DISABLE__SHIFT 0x8
+#define DCN_VM_FAULT_CNTL__DCN_VM_PRQ_FAULT_DISABLE__SHIFT 0x9
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_CLEAR_MASK 0x00000001L
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_MODE_MASK 0x00000002L
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_INTERRUPT_ENABLE_MASK 0x00000004L
+#define DCN_VM_FAULT_CNTL__DCN_VM_RANGE_FAULT_DISABLE_MASK 0x00000100L
+#define DCN_VM_FAULT_CNTL__DCN_VM_PRQ_FAULT_DISABLE_MASK 0x00000200L
+//DCN_VM_FAULT_STATUS
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_STATUS__SHIFT 0x0
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_VMID__SHIFT 0x10
+#define DCN_VM_FAULT_STATUS__DCN_VM_TR_RESP_ERROR_VMID__SHIFT 0x14
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_TABLE_LEVEL__SHIFT 0x18
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_PIPE__SHIFT 0x1a
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_INTERRUPT_STATUS__SHIFT 0x1f
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_STATUS_MASK 0x0000FFFFL
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_VMID_MASK 0x000F0000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_TR_RESP_ERROR_VMID_MASK 0x00F00000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_TABLE_LEVEL_MASK 0x03000000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_PIPE_MASK 0x3C000000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_INTERRUPT_STATUS_MASK 0x80000000L
+//DCN_VM_FAULT_ADDR_MSB
+#define DCN_VM_FAULT_ADDR_MSB__DCN_VM_FAULT_ADDR_MSB__SHIFT 0x0
+#define DCN_VM_FAULT_ADDR_MSB__DCN_VM_FAULT_ADDR_MSB_MASK 0x0000000FL
+//DCN_VM_FAULT_ADDR_LSB
+#define DCN_VM_FAULT_ADDR_LSB__DCN_VM_FAULT_ADDR_LSB__SHIFT 0x0
+#define DCN_VM_FAULT_ADDR_LSB__DCN_VM_FAULT_ADDR_LSB_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbubl_dchubbub_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON6_PERFCOUNTER_CNTL
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFCOUNTER_CNTL2
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFCOUNTER_STATE
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON6_PERFMON_CNTL
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON6_PERFMON_CNTL2
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON6_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON6_PERFMON_CVALUE_LOW
+#define DC_PERFMON6_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON6_PERFMON_HI
+#define DC_PERFMON6_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON6_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFMON_LOW
+#define DC_PERFMON6_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+//HUBP0_DCSURF_SURFACE_CONFIG
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP0_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP0_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+//HUBP0_DCSURF_ADDR_CONFIG
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+//HUBP0_DCSURF_TILING_CONFIG
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP0_DCSURF_PRI_VIEWPORT_START
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP0_DCHUBP_CNTL
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP0_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP0_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP0_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP0_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP0_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP0_HUBP_CLK_CNTL
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP0_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP0_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP0_DCHUBP_VMPG_CONFIG
+#define HUBP0_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP0_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP0_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP0_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP0_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP0_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP0_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP0_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+//HUBP0_DCHUBP_MALL_CONFIG
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+//HUBP0_DCHUBP_MALL_SUB_VP
+#define HUBP0_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP0_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+//HUBP0_HUBPREQ_DEBUG_DB
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP0_HUBPREQ_DEBUG
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+//HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+//HUBP0_HUBP_MALL_STATUS
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP0_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP0_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP0_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP0_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP0_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP0_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP0_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP0_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP0_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP0_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP0_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP0_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP0_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP0_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP0_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+//HUBPREQ0_DCSURF_SURFACE_PITCH
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ0_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ0_VMID_SETTINGS_0
+#define HUBPREQ0_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ0_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_CONTROL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL2
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ0_DCSURF_SURFACE_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ0_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ0_DCN_EXPANSION_MODE
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ0_DCN_TTU_QOS_WM
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ0_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_DMDATA_VM_CNTL
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+//HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ0_BLANK_OFFSET_0
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ0_BLANK_OFFSET_1
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ0_DST_DIMENSIONS
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ0_DST_AFTER_SCALER
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ0_PREFETCH_SETTINGS
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ0_PREFETCH_SETTINGS_C
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ0_VBLANK_PARAMETERS_1
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_2
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_3
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_4
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ0_FLIP_PARAMETERS_1
+#define HUBPREQ0_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_2
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_0
+#define HUBPREQ0_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_1
+#define HUBPREQ0_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_2
+#define HUBPREQ0_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_3
+#define HUBPREQ0_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_4
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_5
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_6
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_7
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ0_PER_LINE_DELIVERY_PRE
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ0_PER_LINE_DELIVERY
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ0_CURSOR_SETTINGS
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ0_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ0_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ0_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ0_VBLANK_PARAMETERS_5
+#define HUBPREQ0_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_6
+#define HUBPREQ0_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_3
+#define HUBPREQ0_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_4
+#define HUBPREQ0_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_5
+#define HUBPREQ0_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_6
+#define HUBPREQ0_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ0_UCLK_PSTATE_FORCE
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+//HUBPREQ0_HUBPREQ_STATUS_REG0
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+//HUBPREQ0_HUBPREQ_STATUS_REG1
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+//HUBPREQ0_HUBPREQ_STATUS_REG2
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+//HUBPRET0_HUBPRET_CONTROL
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET0_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET0_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET0_HUBPRET_READ_LINE0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE1
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_INTERRUPT
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET0_HUBPRET_READ_LINE_VALUE
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_STATUS
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+//CURSOR0_0_CURSOR_CONTROL
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_0_CURSOR_SIZE
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_0_CURSOR_POSITION
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_0_CURSOR_HOT_SPOT
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_0_CURSOR_STEREO_CONTROL
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_0_CURSOR_DST_OFFSET
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_0_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_0_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_0_DMDATA_ADDRESS_HIGH
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_0_DMDATA_ADDRESS_LOW
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_0_DMDATA_CNTL
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_QOS_CNTL
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_0_DMDATA_STATUS
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_0_DMDATA_SW_CNTL
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_SW_DATA
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON7_PERFCOUNTER_CNTL
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFCOUNTER_CNTL2
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFCOUNTER_STATE
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON7_PERFMON_CNTL
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON7_PERFMON_CNTL2
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON7_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON7_PERFMON_CVALUE_LOW
+#define DC_PERFMON7_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON7_PERFMON_HI
+#define DC_PERFMON7_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON7_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFMON_LOW
+#define DC_PERFMON7_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+//HUBP1_DCSURF_SURFACE_CONFIG
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP1_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP1_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+//HUBP1_DCSURF_ADDR_CONFIG
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+//HUBP1_DCSURF_TILING_CONFIG
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP1_DCSURF_PRI_VIEWPORT_START
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP1_DCHUBP_CNTL
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP1_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP1_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP1_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP1_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP1_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP1_HUBP_CLK_CNTL
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP1_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP1_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP1_DCHUBP_VMPG_CONFIG
+#define HUBP1_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP1_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP1_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP1_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP1_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP1_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP1_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP1_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+//HUBP1_DCHUBP_MALL_CONFIG
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+//HUBP1_DCHUBP_MALL_SUB_VP
+#define HUBP1_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP1_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+//HUBP1_HUBPREQ_DEBUG_DB
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP1_HUBPREQ_DEBUG
+#define HUBP1_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP1_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP1_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP1_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+//HUBP1_HUBP_MALL_STATUS
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP1_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP1_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP1_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP1_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP1_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP1_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP1_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP1_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP1_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP1_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP1_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP1_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP1_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP1_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP1_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+//HUBPREQ1_DCSURF_SURFACE_PITCH
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ1_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ1_VMID_SETTINGS_0
+#define HUBPREQ1_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ1_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_CONTROL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL2
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ1_DCSURF_SURFACE_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ1_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ1_DCN_EXPANSION_MODE
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ1_DCN_TTU_QOS_WM
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ1_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_DMDATA_VM_CNTL
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+//HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ1_BLANK_OFFSET_0
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ1_BLANK_OFFSET_1
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ1_DST_DIMENSIONS
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ1_DST_AFTER_SCALER
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ1_PREFETCH_SETTINGS
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ1_PREFETCH_SETTINGS_C
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ1_VBLANK_PARAMETERS_1
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_2
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_3
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_4
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ1_FLIP_PARAMETERS_1
+#define HUBPREQ1_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_2
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_0
+#define HUBPREQ1_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_1
+#define HUBPREQ1_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_2
+#define HUBPREQ1_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_3
+#define HUBPREQ1_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_4
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_5
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_6
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_7
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ1_PER_LINE_DELIVERY_PRE
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ1_PER_LINE_DELIVERY
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ1_CURSOR_SETTINGS
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ1_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ1_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ1_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ1_VBLANK_PARAMETERS_5
+#define HUBPREQ1_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_6
+#define HUBPREQ1_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_3
+#define HUBPREQ1_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_4
+#define HUBPREQ1_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_5
+#define HUBPREQ1_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_6
+#define HUBPREQ1_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ1_UCLK_PSTATE_FORCE
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+//HUBPREQ1_HUBPREQ_STATUS_REG0
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+//HUBPREQ1_HUBPREQ_STATUS_REG1
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+//HUBPREQ1_HUBPREQ_STATUS_REG2
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+//HUBPRET1_HUBPRET_CONTROL
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET1_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET1_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET1_HUBPRET_READ_LINE0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE1
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_INTERRUPT
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET1_HUBPRET_READ_LINE_VALUE
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_STATUS
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+//CURSOR0_1_CURSOR_CONTROL
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_1_CURSOR_SIZE
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_1_CURSOR_POSITION
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_1_CURSOR_HOT_SPOT
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_1_CURSOR_STEREO_CONTROL
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_1_CURSOR_DST_OFFSET
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_1_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_1_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_1_DMDATA_ADDRESS_HIGH
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_1_DMDATA_ADDRESS_LOW
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_1_DMDATA_CNTL
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_QOS_CNTL
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_1_DMDATA_STATUS
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_1_DMDATA_SW_CNTL
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_SW_DATA
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON8_PERFCOUNTER_CNTL
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFCOUNTER_CNTL2
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFCOUNTER_STATE
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON8_PERFMON_CNTL
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON8_PERFMON_CNTL2
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON8_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON8_PERFMON_CVALUE_LOW
+#define DC_PERFMON8_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON8_PERFMON_HI
+#define DC_PERFMON8_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON8_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFMON_LOW
+#define DC_PERFMON8_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+//HUBP2_DCSURF_SURFACE_CONFIG
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP2_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP2_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+//HUBP2_DCSURF_ADDR_CONFIG
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+//HUBP2_DCSURF_TILING_CONFIG
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP2_DCSURF_PRI_VIEWPORT_START
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP2_DCHUBP_CNTL
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP2_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP2_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP2_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP2_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP2_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP2_HUBP_CLK_CNTL
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP2_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP2_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP2_DCHUBP_VMPG_CONFIG
+#define HUBP2_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP2_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP2_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP2_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP2_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP2_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP2_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP2_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+//HUBP2_DCHUBP_MALL_CONFIG
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+//HUBP2_DCHUBP_MALL_SUB_VP
+#define HUBP2_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP2_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+//HUBP2_HUBPREQ_DEBUG_DB
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP2_HUBPREQ_DEBUG
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+//HUBP2_HUBP_MALL_STATUS
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP2_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP2_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP2_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP2_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP2_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP2_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP2_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP2_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP2_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP2_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP2_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP2_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP2_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP2_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP2_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+//HUBPREQ2_DCSURF_SURFACE_PITCH
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ2_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ2_VMID_SETTINGS_0
+#define HUBPREQ2_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ2_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_CONTROL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL2
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ2_DCSURF_SURFACE_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ2_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ2_DCN_EXPANSION_MODE
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ2_DCN_TTU_QOS_WM
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ2_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_DMDATA_VM_CNTL
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+//HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ2_BLANK_OFFSET_0
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ2_BLANK_OFFSET_1
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ2_DST_DIMENSIONS
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ2_DST_AFTER_SCALER
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ2_PREFETCH_SETTINGS
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ2_PREFETCH_SETTINGS_C
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ2_VBLANK_PARAMETERS_1
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_2
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_3
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_4
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ2_FLIP_PARAMETERS_1
+#define HUBPREQ2_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_2
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_0
+#define HUBPREQ2_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_1
+#define HUBPREQ2_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_2
+#define HUBPREQ2_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_3
+#define HUBPREQ2_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_4
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_5
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_6
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_7
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ2_PER_LINE_DELIVERY_PRE
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ2_PER_LINE_DELIVERY
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ2_CURSOR_SETTINGS
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ2_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ2_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ2_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ2_VBLANK_PARAMETERS_5
+#define HUBPREQ2_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_6
+#define HUBPREQ2_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_3
+#define HUBPREQ2_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_4
+#define HUBPREQ2_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_5
+#define HUBPREQ2_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_6
+#define HUBPREQ2_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ2_UCLK_PSTATE_FORCE
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+//HUBPREQ2_HUBPREQ_STATUS_REG0
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+//HUBPREQ2_HUBPREQ_STATUS_REG1
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+//HUBPREQ2_HUBPREQ_STATUS_REG2
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+//HUBPRET2_HUBPRET_CONTROL
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET2_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET2_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET2_HUBPRET_READ_LINE0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE1
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_INTERRUPT
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET2_HUBPRET_READ_LINE_VALUE
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_STATUS
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+//CURSOR0_2_CURSOR_CONTROL
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_2_CURSOR_SIZE
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_2_CURSOR_POSITION
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_2_CURSOR_HOT_SPOT
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_2_CURSOR_STEREO_CONTROL
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_2_CURSOR_DST_OFFSET
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_2_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_2_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_2_DMDATA_ADDRESS_HIGH
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_2_DMDATA_ADDRESS_LOW
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_2_DMDATA_CNTL
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_QOS_CNTL
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_2_DMDATA_STATUS
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_2_DMDATA_SW_CNTL
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_SW_DATA
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON9_PERFCOUNTER_CNTL
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFCOUNTER_CNTL2
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFCOUNTER_STATE
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON9_PERFMON_CNTL
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON9_PERFMON_CNTL2
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON9_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON9_PERFMON_CVALUE_LOW
+#define DC_PERFMON9_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON9_PERFMON_HI
+#define DC_PERFMON9_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON9_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFMON_LOW
+#define DC_PERFMON9_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+//HUBP3_DCSURF_SURFACE_CONFIG
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP3_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP3_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+//HUBP3_DCSURF_ADDR_CONFIG
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+//HUBP3_DCSURF_TILING_CONFIG
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP3_DCSURF_PRI_VIEWPORT_START
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP3_DCHUBP_CNTL
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP3_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP3_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP3_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP3_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP3_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP3_HUBP_CLK_CNTL
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP3_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP3_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP3_DCHUBP_VMPG_CONFIG
+#define HUBP3_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP3_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP3_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP3_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP3_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP3_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP3_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP3_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+//HUBP3_DCHUBP_MALL_CONFIG
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+//HUBP3_DCHUBP_MALL_SUB_VP
+#define HUBP3_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP3_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+//HUBP3_HUBPREQ_DEBUG_DB
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP3_HUBPREQ_DEBUG
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+//HUBP3_HUBP_MALL_STATUS
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP3_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP3_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP3_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP3_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP3_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP3_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP3_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP3_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP3_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP3_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP3_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP3_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP3_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP3_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP3_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+//HUBPREQ3_DCSURF_SURFACE_PITCH
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ3_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ3_VMID_SETTINGS_0
+#define HUBPREQ3_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ3_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_CONTROL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL2
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ3_DCSURF_SURFACE_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ3_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ3_DCN_EXPANSION_MODE
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ3_DCN_TTU_QOS_WM
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ3_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_DMDATA_VM_CNTL
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+//HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ3_BLANK_OFFSET_0
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ3_BLANK_OFFSET_1
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ3_DST_DIMENSIONS
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ3_DST_AFTER_SCALER
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ3_PREFETCH_SETTINGS
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ3_PREFETCH_SETTINGS_C
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ3_VBLANK_PARAMETERS_1
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_2
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_3
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_4
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ3_FLIP_PARAMETERS_1
+#define HUBPREQ3_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_2
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_0
+#define HUBPREQ3_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_1
+#define HUBPREQ3_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_2
+#define HUBPREQ3_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_3
+#define HUBPREQ3_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_4
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_5
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_6
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_7
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ3_PER_LINE_DELIVERY_PRE
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ3_PER_LINE_DELIVERY
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ3_CURSOR_SETTINGS
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ3_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ3_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ3_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ3_VBLANK_PARAMETERS_5
+#define HUBPREQ3_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_6
+#define HUBPREQ3_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_3
+#define HUBPREQ3_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_4
+#define HUBPREQ3_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_5
+#define HUBPREQ3_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_6
+#define HUBPREQ3_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ3_UCLK_PSTATE_FORCE
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+//HUBPREQ3_HUBPREQ_STATUS_REG0
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+//HUBPREQ3_HUBPREQ_STATUS_REG1
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+//HUBPREQ3_HUBPREQ_STATUS_REG2
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+//HUBPRET3_HUBPRET_CONTROL
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET3_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET3_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET3_HUBPRET_READ_LINE0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE1
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_INTERRUPT
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET3_HUBPRET_READ_LINE_VALUE
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_STATUS
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+//CURSOR0_3_CURSOR_CONTROL
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_3_CURSOR_SIZE
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_3_CURSOR_POSITION
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_3_CURSOR_HOT_SPOT
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_3_CURSOR_STEREO_CONTROL
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_3_CURSOR_DST_OFFSET
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_3_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_3_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_3_DMDATA_ADDRESS_HIGH
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_3_DMDATA_ADDRESS_LOW
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_3_DMDATA_CNTL
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_QOS_CNTL
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_3_DMDATA_STATUS
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_3_DMDATA_SW_CNTL
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_SW_DATA
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON10_PERFCOUNTER_CNTL
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFCOUNTER_CNTL2
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFCOUNTER_STATE
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON10_PERFMON_CNTL
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON10_PERFMON_CNTL2
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON10_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON10_PERFMON_CVALUE_LOW
+#define DC_PERFMON10_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON10_PERFMON_HI
+#define DC_PERFMON10_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON10_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFMON_LOW
+#define DC_PERFMON10_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+//CNVC_CFG0_FORMAT_CONTROL
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+//CNVC_CFG0_FCNV_FP_BIAS_R
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_G
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_B
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_R
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_G
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_B
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG0_COLOR_KEYER_CONTROL
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG0_COLOR_KEYER_ALPHA
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_RED
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_GREEN
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_BLUE
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_ALPHA_2BIT_LUT
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+//CNVC_CFG0_PRE_DEALPHA
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+//CNVC_CFG0_PRE_CSC_MODE
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CNVC_CFG0_PRE_CSC_C11_C12
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C13_C14
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C21_C22
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C23_C24
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C31_C32
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C33_C34
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C11_C12
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C13_C14
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C21_C22
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C23_C24
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C31_C32
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C33_C34
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+//CNVC_CFG0_CNVC_COEF_FORMAT
+#define CNVC_CFG0_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+//CNVC_CFG0_PRE_DEGAM
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+//CNVC_CFG0_PRE_REALPHA
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+//CNVC_CUR0_CURSOR0_CONTROL
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR0_CURSOR0_COLOR0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_COLOR1
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+//DSCL0_SCL_COEF_RAM_TAP_SELECT
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//DSCL0_SCL_COEF_RAM_TAP_DATA
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL0_SCL_MODE
+#define DSCL0_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL0_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL0_SCL_TAP_CONTROL
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL0_DSCL_CONTROL
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL0_DSCL_2TAP_CONTROL
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL0_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT_C
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL0_SCL_BLACK_COLOR
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+//DSCL0_DSCL_UPDATE
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL0_DSCL_AUTOCAL
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL0_OTG_H_BLANK
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_OTG_V_BLANK
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_RECOUT_START
+#define DSCL0_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL0_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL0_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL0_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL0_RECOUT_SIZE
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_MPC_SIZE
+#define DSCL0_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL0_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL0_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL0_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_LB_DATA_FORMAT
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL0_LB_MEMORY_CTRL
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL0_LB_V_COUNTER
+#define DSCL0_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL0_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL0_DSCL_MEM_PWR_CTRL
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL0_DSCL_MEM_PWR_STATUS
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL0_OBUF_CONTROL
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+//DSCL0_OBUF_MEM_PWR_CTRL
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+//CM0_CM_CONTROL
+#define CM0_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM0_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM0_CM_POST_CSC_CONTROL
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CM0_CM_POST_CSC_C11_C12
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C13_C14
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C21_C22
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C23_C24
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C31_C32
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C33_C34
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C11_C12
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C13_C14
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C21_C22
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C23_C24
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C31_C32
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C33_C34
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_CONTROL
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+//CM0_CM_GAMUT_REMAP_C11_C12
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C13_C14
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C21_C22
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C23_C24
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C31_C32
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C33_C34
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C11_C12
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C13_C14
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C21_C22
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C23_C24
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C31_C32
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C33_C34
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM0_CM_BIAS_CR_R
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM0_CM_BIAS_Y_G_CB_B
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_CONTROL
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+//CM0_CM_GAMCOR_LUT_INDEX
+#define CM0_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+//CM0_CM_GAMCOR_LUT_DATA
+#define CM0_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_LUT_CONTROL
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+//CM0_CM_GAMCOR_RAMA_START_CNTL_B
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMA_START_CNTL_G
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMA_START_CNTL_R
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_END_CNTL1_B
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_END_CNTL2_B
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMA_END_CNTL1_G
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_END_CNTL2_G
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMA_END_CNTL1_R
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_END_CNTL2_R
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMA_OFFSET_B
+#define CM0_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMA_OFFSET_G
+#define CM0_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMA_OFFSET_R
+#define CM0_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMA_REGION_0_1
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_2_3
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_4_5
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_6_7
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_8_9
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_10_11
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_12_13
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_14_15
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_16_17
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_18_19
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_20_21
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_22_23
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_24_25
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_26_27
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_28_29
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_30_31
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_32_33
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_START_CNTL_B
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMB_START_CNTL_G
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMB_START_CNTL_R
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_END_CNTL1_B
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_END_CNTL2_B
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMB_END_CNTL1_G
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_END_CNTL2_G
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMB_END_CNTL1_R
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_END_CNTL2_R
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMB_OFFSET_B
+#define CM0_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMB_OFFSET_G
+#define CM0_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMB_OFFSET_R
+#define CM0_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMB_REGION_0_1
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_2_3
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_4_5
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_6_7
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_8_9
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_10_11
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_12_13
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_14_15
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_16_17
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_18_19
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_20_21
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_22_23
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_24_25
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_26_27
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_28_29
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_30_31
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_32_33
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_HDR_MULT_COEF
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM0_CM_MEM_PWR_CTRL
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+//CM0_CM_MEM_PWR_STATUS
+#define CM0_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+//CM0_CM_DEALPHA
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM0_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM0_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+//CM0_CM_COEF_FORMAT
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//CM0_DPP_CRC_VAL_R
+#define CM0_DPP_CRC_VAL_R__DPP_CRC_R_CR__SHIFT 0x0
+#define CM0_DPP_CRC_VAL_R__DPP_CRC_R_CR_MASK 0xFFFFFFFFL
+//CM0_DPP_CRC_VAL_G
+#define CM0_DPP_CRC_VAL_G__DPP_CRC_G_Y__SHIFT 0x0
+#define CM0_DPP_CRC_VAL_G__DPP_CRC_G_Y_MASK 0xFFFFFFFFL
+//CM0_DPP_CRC_VAL_B
+#define CM0_DPP_CRC_VAL_B__DPP_CRC_B_CB__SHIFT 0x0
+#define CM0_DPP_CRC_VAL_B__DPP_CRC_B_CB_MASK 0xFFFFFFFFL
+//CM0_DPP_CRC_VAL_A
+#define CM0_DPP_CRC_VAL_A__DPP_CRC_ALPHA__SHIFT 0x0
+#define CM0_DPP_CRC_VAL_A__DPP_CRC_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+//DPP_TOP0_DPP_CONTROL
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP0_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP0_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP0_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+#define DPP_TOP0_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+//DPP_TOP0_DPP_SOFT_RESET
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP0_DPP_CRC_CTRL
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP0_HOST_READ_CONTROL
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON11_PERFCOUNTER_CNTL
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFCOUNTER_CNTL2
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFCOUNTER_STATE
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON11_PERFMON_CNTL
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON11_PERFMON_CNTL2
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON11_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON11_PERFMON_CVALUE_LOW
+#define DC_PERFMON11_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON11_PERFMON_HI
+#define DC_PERFMON11_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON11_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFMON_LOW
+#define DC_PERFMON11_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+//CNVC_CFG1_FORMAT_CONTROL
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+//CNVC_CFG1_FCNV_FP_BIAS_R
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_G
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_B
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_R
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_G
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_B
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG1_COLOR_KEYER_CONTROL
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG1_COLOR_KEYER_ALPHA
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_RED
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_GREEN
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_BLUE
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_ALPHA_2BIT_LUT
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+//CNVC_CFG1_PRE_DEALPHA
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+//CNVC_CFG1_PRE_CSC_MODE
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CNVC_CFG1_PRE_CSC_C11_C12
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C13_C14
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C21_C22
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C23_C24
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C31_C32
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C33_C34
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C11_C12
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C13_C14
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C21_C22
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C23_C24
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C31_C32
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C33_C34
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+//CNVC_CFG1_CNVC_COEF_FORMAT
+#define CNVC_CFG1_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+//CNVC_CFG1_PRE_DEGAM
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+//CNVC_CFG1_PRE_REALPHA
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+//CNVC_CUR1_CURSOR0_CONTROL
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR1_CURSOR0_COLOR0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_COLOR1
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+//DSCL1_SCL_COEF_RAM_TAP_SELECT
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//DSCL1_SCL_COEF_RAM_TAP_DATA
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL1_SCL_MODE
+#define DSCL1_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL1_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL1_SCL_TAP_CONTROL
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL1_DSCL_CONTROL
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL1_DSCL_2TAP_CONTROL
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL1_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT_C
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL1_SCL_BLACK_COLOR
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+//DSCL1_DSCL_UPDATE
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL1_DSCL_AUTOCAL
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL1_OTG_H_BLANK
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_OTG_V_BLANK
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_RECOUT_START
+#define DSCL1_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL1_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL1_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL1_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL1_RECOUT_SIZE
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_MPC_SIZE
+#define DSCL1_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL1_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL1_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL1_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_LB_DATA_FORMAT
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL1_LB_MEMORY_CTRL
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL1_LB_V_COUNTER
+#define DSCL1_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL1_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL1_DSCL_MEM_PWR_CTRL
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL1_DSCL_MEM_PWR_STATUS
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL1_OBUF_CONTROL
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+//DSCL1_OBUF_MEM_PWR_CTRL
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+//CM1_CM_CONTROL
+#define CM1_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM1_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM1_CM_POST_CSC_CONTROL
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CM1_CM_POST_CSC_C11_C12
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C13_C14
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C21_C22
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C23_C24
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C31_C32
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C33_C34
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C11_C12
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C13_C14
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C21_C22
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C23_C24
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C31_C32
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C33_C34
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_CONTROL
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+//CM1_CM_GAMUT_REMAP_C11_C12
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C13_C14
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C21_C22
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C23_C24
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C31_C32
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C33_C34
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C11_C12
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C13_C14
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C21_C22
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C23_C24
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C31_C32
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C33_C34
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM1_CM_BIAS_CR_R
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM1_CM_BIAS_Y_G_CB_B
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_CONTROL
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+//CM1_CM_GAMCOR_LUT_INDEX
+#define CM1_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+//CM1_CM_GAMCOR_LUT_DATA
+#define CM1_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_LUT_CONTROL
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+//CM1_CM_GAMCOR_RAMA_START_CNTL_B
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMA_START_CNTL_G
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMA_START_CNTL_R
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_END_CNTL1_B
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_END_CNTL2_B
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMA_END_CNTL1_G
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_END_CNTL2_G
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMA_END_CNTL1_R
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_END_CNTL2_R
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMA_OFFSET_B
+#define CM1_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMA_OFFSET_G
+#define CM1_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMA_OFFSET_R
+#define CM1_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMA_REGION_0_1
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_2_3
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_4_5
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_6_7
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_8_9
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_10_11
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_12_13
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_14_15
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_16_17
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_18_19
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_20_21
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_22_23
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_24_25
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_26_27
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_28_29
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_30_31
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_32_33
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_START_CNTL_B
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMB_START_CNTL_G
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMB_START_CNTL_R
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_END_CNTL1_B
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_END_CNTL2_B
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMB_END_CNTL1_G
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_END_CNTL2_G
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMB_END_CNTL1_R
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_END_CNTL2_R
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMB_OFFSET_B
+#define CM1_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMB_OFFSET_G
+#define CM1_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMB_OFFSET_R
+#define CM1_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMB_REGION_0_1
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_2_3
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_4_5
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_6_7
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_8_9
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_10_11
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_12_13
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_14_15
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_16_17
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_18_19
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_20_21
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_22_23
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_24_25
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_26_27
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_28_29
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_30_31
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_32_33
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_HDR_MULT_COEF
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM1_CM_MEM_PWR_CTRL
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+//CM1_CM_MEM_PWR_STATUS
+#define CM1_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+//CM1_CM_DEALPHA
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM1_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM1_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+//CM1_CM_COEF_FORMAT
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM1_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM1_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM1_CM_TEST_DEBUG_INDEX
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM1_CM_TEST_DEBUG_DATA
+#define CM1_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM1_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//CM1_DPP_CRC_VAL_R
+#define CM1_DPP_CRC_VAL_R__DPP_CRC_R_CR__SHIFT 0x0
+#define CM1_DPP_CRC_VAL_R__DPP_CRC_R_CR_MASK 0xFFFFFFFFL
+//CM1_DPP_CRC_VAL_G
+#define CM1_DPP_CRC_VAL_G__DPP_CRC_G_Y__SHIFT 0x0
+#define CM1_DPP_CRC_VAL_G__DPP_CRC_G_Y_MASK 0xFFFFFFFFL
+//CM1_DPP_CRC_VAL_B
+#define CM1_DPP_CRC_VAL_B__DPP_CRC_B_CB__SHIFT 0x0
+#define CM1_DPP_CRC_VAL_B__DPP_CRC_B_CB_MASK 0xFFFFFFFFL
+//CM1_DPP_CRC_VAL_A
+#define CM1_DPP_CRC_VAL_A__DPP_CRC_ALPHA__SHIFT 0x0
+#define CM1_DPP_CRC_VAL_A__DPP_CRC_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+//DPP_TOP1_DPP_CONTROL
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP1_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP1_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP1_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+#define DPP_TOP1_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+//DPP_TOP1_DPP_SOFT_RESET
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP1_DPP_CRC_CTRL
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP1_HOST_READ_CONTROL
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON12_PERFCOUNTER_CNTL
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFCOUNTER_CNTL2
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFCOUNTER_STATE
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON12_PERFMON_CNTL
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON12_PERFMON_CNTL2
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON12_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON12_PERFMON_CVALUE_LOW
+#define DC_PERFMON12_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON12_PERFMON_HI
+#define DC_PERFMON12_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON12_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFMON_LOW
+#define DC_PERFMON12_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+//CNVC_CFG2_FORMAT_CONTROL
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+//CNVC_CFG2_FCNV_FP_BIAS_R
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_G
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_B
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_R
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_G
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_B
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG2_COLOR_KEYER_CONTROL
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG2_COLOR_KEYER_ALPHA
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_RED
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_GREEN
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_BLUE
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_ALPHA_2BIT_LUT
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+//CNVC_CFG2_PRE_DEALPHA
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+//CNVC_CFG2_PRE_CSC_MODE
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CNVC_CFG2_PRE_CSC_C11_C12
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C13_C14
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C21_C22
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C23_C24
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C31_C32
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C33_C34
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C11_C12
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C13_C14
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C21_C22
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C23_C24
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C31_C32
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C33_C34
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+//CNVC_CFG2_CNVC_COEF_FORMAT
+#define CNVC_CFG2_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+//CNVC_CFG2_PRE_DEGAM
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+//CNVC_CFG2_PRE_REALPHA
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+//CNVC_CUR2_CURSOR0_CONTROL
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR2_CURSOR0_COLOR0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_COLOR1
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+//DSCL2_SCL_COEF_RAM_TAP_SELECT
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//DSCL2_SCL_COEF_RAM_TAP_DATA
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL2_SCL_MODE
+#define DSCL2_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL2_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL2_SCL_TAP_CONTROL
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL2_DSCL_CONTROL
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL2_DSCL_2TAP_CONTROL
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL2_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT_C
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL2_SCL_BLACK_COLOR
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+//DSCL2_DSCL_UPDATE
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL2_DSCL_AUTOCAL
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL2_OTG_H_BLANK
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_OTG_V_BLANK
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_RECOUT_START
+#define DSCL2_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL2_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL2_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL2_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL2_RECOUT_SIZE
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_MPC_SIZE
+#define DSCL2_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL2_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL2_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL2_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_LB_DATA_FORMAT
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL2_LB_MEMORY_CTRL
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL2_LB_V_COUNTER
+#define DSCL2_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL2_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL2_DSCL_MEM_PWR_CTRL
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL2_DSCL_MEM_PWR_STATUS
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL2_OBUF_CONTROL
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+//DSCL2_OBUF_MEM_PWR_CTRL
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+//CM2_CM_CONTROL
+#define CM2_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM2_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM2_CM_POST_CSC_CONTROL
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CM2_CM_POST_CSC_C11_C12
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C13_C14
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C21_C22
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C23_C24
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C31_C32
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C33_C34
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C11_C12
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C13_C14
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C21_C22
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C23_C24
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C31_C32
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C33_C34
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_CONTROL
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+//CM2_CM_GAMUT_REMAP_C11_C12
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C13_C14
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C21_C22
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C23_C24
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C31_C32
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C33_C34
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C11_C12
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C13_C14
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C21_C22
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C23_C24
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C31_C32
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C33_C34
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM2_CM_BIAS_CR_R
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM2_CM_BIAS_Y_G_CB_B
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_CONTROL
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+//CM2_CM_GAMCOR_LUT_INDEX
+#define CM2_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+//CM2_CM_GAMCOR_LUT_DATA
+#define CM2_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_LUT_CONTROL
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+//CM2_CM_GAMCOR_RAMA_START_CNTL_B
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMA_START_CNTL_G
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMA_START_CNTL_R
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_END_CNTL1_B
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_END_CNTL2_B
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMA_END_CNTL1_G
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_END_CNTL2_G
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMA_END_CNTL1_R
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_END_CNTL2_R
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMA_OFFSET_B
+#define CM2_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMA_OFFSET_G
+#define CM2_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMA_OFFSET_R
+#define CM2_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMA_REGION_0_1
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_2_3
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_4_5
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_6_7
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_8_9
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_10_11
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_12_13
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_14_15
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_16_17
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_18_19
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_20_21
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_22_23
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_24_25
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_26_27
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_28_29
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_30_31
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_32_33
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_START_CNTL_B
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMB_START_CNTL_G
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMB_START_CNTL_R
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_END_CNTL1_B
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_END_CNTL2_B
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMB_END_CNTL1_G
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_END_CNTL2_G
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMB_END_CNTL1_R
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_END_CNTL2_R
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMB_OFFSET_B
+#define CM2_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMB_OFFSET_G
+#define CM2_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMB_OFFSET_R
+#define CM2_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMB_REGION_0_1
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_2_3
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_4_5
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_6_7
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_8_9
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_10_11
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_12_13
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_14_15
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_16_17
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_18_19
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_20_21
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_22_23
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_24_25
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_26_27
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_28_29
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_30_31
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_32_33
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_HDR_MULT_COEF
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM2_CM_MEM_PWR_CTRL
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+//CM2_CM_MEM_PWR_STATUS
+#define CM2_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+//CM2_CM_DEALPHA
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM2_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM2_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+//CM2_CM_COEF_FORMAT
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM2_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM2_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM2_CM_TEST_DEBUG_INDEX
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM2_CM_TEST_DEBUG_DATA
+#define CM2_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM2_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//CM2_DPP_CRC_VAL_R
+#define CM2_DPP_CRC_VAL_R__DPP_CRC_R_CR__SHIFT 0x0
+#define CM2_DPP_CRC_VAL_R__DPP_CRC_R_CR_MASK 0xFFFFFFFFL
+//CM2_DPP_CRC_VAL_G
+#define CM2_DPP_CRC_VAL_G__DPP_CRC_G_Y__SHIFT 0x0
+#define CM2_DPP_CRC_VAL_G__DPP_CRC_G_Y_MASK 0xFFFFFFFFL
+//CM2_DPP_CRC_VAL_B
+#define CM2_DPP_CRC_VAL_B__DPP_CRC_B_CB__SHIFT 0x0
+#define CM2_DPP_CRC_VAL_B__DPP_CRC_B_CB_MASK 0xFFFFFFFFL
+//CM2_DPP_CRC_VAL_A
+#define CM2_DPP_CRC_VAL_A__DPP_CRC_ALPHA__SHIFT 0x0
+#define CM2_DPP_CRC_VAL_A__DPP_CRC_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+//DPP_TOP2_DPP_CONTROL
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP2_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP2_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP2_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+#define DPP_TOP2_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+//DPP_TOP2_DPP_SOFT_RESET
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP2_DPP_CRC_CTRL
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP2_HOST_READ_CONTROL
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON13_PERFCOUNTER_CNTL
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFCOUNTER_CNTL2
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFCOUNTER_STATE
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON13_PERFMON_CNTL
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON13_PERFMON_CNTL2
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON13_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON13_PERFMON_CVALUE_LOW
+#define DC_PERFMON13_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON13_PERFMON_HI
+#define DC_PERFMON13_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON13_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFMON_LOW
+#define DC_PERFMON13_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+//CNVC_CFG3_FORMAT_CONTROL
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+//CNVC_CFG3_FCNV_FP_BIAS_R
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_G
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_B
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_R
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_G
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_B
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG3_COLOR_KEYER_CONTROL
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG3_COLOR_KEYER_ALPHA
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_RED
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_GREEN
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_BLUE
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_ALPHA_2BIT_LUT
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+//CNVC_CFG3_PRE_DEALPHA
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+//CNVC_CFG3_PRE_CSC_MODE
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CNVC_CFG3_PRE_CSC_C11_C12
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C13_C14
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C21_C22
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C23_C24
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C31_C32
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C33_C34
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C11_C12
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C13_C14
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C21_C22
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C23_C24
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C31_C32
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C33_C34
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+//CNVC_CFG3_CNVC_COEF_FORMAT
+#define CNVC_CFG3_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+//CNVC_CFG3_PRE_DEGAM
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+//CNVC_CFG3_PRE_REALPHA
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+//CNVC_CUR3_CURSOR0_CONTROL
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR3_CURSOR0_COLOR0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_COLOR1
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+//DSCL3_SCL_COEF_RAM_TAP_SELECT
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//DSCL3_SCL_COEF_RAM_TAP_DATA
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL3_SCL_MODE
+#define DSCL3_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL3_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL3_SCL_TAP_CONTROL
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL3_DSCL_CONTROL
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL3_DSCL_2TAP_CONTROL
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL3_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT_C
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL3_SCL_BLACK_COLOR
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+//DSCL3_DSCL_UPDATE
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL3_DSCL_AUTOCAL
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL3_OTG_H_BLANK
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_OTG_V_BLANK
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_RECOUT_START
+#define DSCL3_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL3_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL3_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL3_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL3_RECOUT_SIZE
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_MPC_SIZE
+#define DSCL3_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL3_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL3_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL3_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_LB_DATA_FORMAT
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL3_LB_MEMORY_CTRL
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL3_LB_V_COUNTER
+#define DSCL3_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL3_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL3_DSCL_MEM_PWR_CTRL
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL3_DSCL_MEM_PWR_STATUS
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL3_OBUF_CONTROL
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+//DSCL3_OBUF_MEM_PWR_CTRL
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+//CM3_CM_CONTROL
+#define CM3_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM3_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM3_CM_POST_CSC_CONTROL
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CM3_CM_POST_CSC_C11_C12
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C13_C14
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C21_C22
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C23_C24
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C31_C32
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C33_C34
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C11_C12
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C13_C14
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C21_C22
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C23_C24
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C31_C32
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C33_C34
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_CONTROL
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+//CM3_CM_GAMUT_REMAP_C11_C12
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C13_C14
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C21_C22
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C23_C24
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C31_C32
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C33_C34
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C11_C12
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C13_C14
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C21_C22
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C23_C24
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C31_C32
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C33_C34
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM3_CM_BIAS_CR_R
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM3_CM_BIAS_Y_G_CB_B
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_CONTROL
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+//CM3_CM_GAMCOR_LUT_INDEX
+#define CM3_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+//CM3_CM_GAMCOR_LUT_DATA
+#define CM3_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_LUT_CONTROL
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+//CM3_CM_GAMCOR_RAMA_START_CNTL_B
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMA_START_CNTL_G
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMA_START_CNTL_R
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_END_CNTL1_B
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_END_CNTL2_B
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMA_END_CNTL1_G
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_END_CNTL2_G
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMA_END_CNTL1_R
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_END_CNTL2_R
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMA_OFFSET_B
+#define CM3_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMA_OFFSET_G
+#define CM3_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMA_OFFSET_R
+#define CM3_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMA_REGION_0_1
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_2_3
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_4_5
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_6_7
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_8_9
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_10_11
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_12_13
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_14_15
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_16_17
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_18_19
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_20_21
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_22_23
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_24_25
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_26_27
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_28_29
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_30_31
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_32_33
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_START_CNTL_B
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMB_START_CNTL_G
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMB_START_CNTL_R
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_END_CNTL1_B
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_END_CNTL2_B
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMB_END_CNTL1_G
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_END_CNTL2_G
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMB_END_CNTL1_R
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_END_CNTL2_R
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMB_OFFSET_B
+#define CM3_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMB_OFFSET_G
+#define CM3_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMB_OFFSET_R
+#define CM3_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMB_REGION_0_1
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_2_3
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_4_5
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_6_7
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_8_9
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_10_11
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_12_13
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_14_15
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_16_17
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_18_19
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_20_21
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_22_23
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_24_25
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_26_27
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_28_29
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_30_31
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_32_33
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_HDR_MULT_COEF
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM3_CM_MEM_PWR_CTRL
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+//CM3_CM_MEM_PWR_STATUS
+#define CM3_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+//CM3_CM_DEALPHA
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM3_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM3_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+//CM3_CM_COEF_FORMAT
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM3_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM3_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM3_CM_TEST_DEBUG_INDEX
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM3_CM_TEST_DEBUG_DATA
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//CM3_DPP_CRC_VAL_R
+#define CM3_DPP_CRC_VAL_R__DPP_CRC_R_CR__SHIFT 0x0
+#define CM3_DPP_CRC_VAL_R__DPP_CRC_R_CR_MASK 0xFFFFFFFFL
+//CM3_DPP_CRC_VAL_G
+#define CM3_DPP_CRC_VAL_G__DPP_CRC_G_Y__SHIFT 0x0
+#define CM3_DPP_CRC_VAL_G__DPP_CRC_G_Y_MASK 0xFFFFFFFFL
+//CM3_DPP_CRC_VAL_B
+#define CM3_DPP_CRC_VAL_B__DPP_CRC_B_CB__SHIFT 0x0
+#define CM3_DPP_CRC_VAL_B__DPP_CRC_B_CB_MASK 0xFFFFFFFFL
+//CM3_DPP_CRC_VAL_A
+#define CM3_DPP_CRC_VAL_A__DPP_CRC_ALPHA__SHIFT 0x0
+#define CM3_DPP_CRC_VAL_A__DPP_CRC_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+//DPP_TOP3_DPP_CONTROL
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP3_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP3_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP3_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+#define DPP_TOP3_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+//DPP_TOP3_DPP_SOFT_RESET
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP3_DPP_CRC_CTRL
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP3_HOST_READ_CONTROL
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON14_PERFCOUNTER_CNTL
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFCOUNTER_CNTL2
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFCOUNTER_STATE
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON14_PERFMON_CNTL
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON14_PERFMON_CNTL2
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON14_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON14_PERFMON_CVALUE_LOW
+#define DC_PERFMON14_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON14_PERFMON_HI
+#define DC_PERFMON14_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON14_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFMON_LOW
+#define DC_PERFMON14_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+//MPCC0_MPCC_TOP_SEL
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_BOT_SEL
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_OPP_ID
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC0_MPCC_CONTROL
+#define MPCC0_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC0_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC0_MPCC_SM_CONTROL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC0_MPCC_UPDATE_LOCK_SEL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC0_MPCC_TOP_GAIN
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_INSIDE
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+//MPCC0_MPCC_BG_R_CR
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_G_Y
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_B_CB
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC0_MPCC_MEM_PWR_CTRL
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+//MPCC0_MPCC_STATUS
+#define MPCC0_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC0_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC0_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC0_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+//MPCC1_MPCC_TOP_SEL
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_BOT_SEL
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_OPP_ID
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC1_MPCC_CONTROL
+#define MPCC1_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC1_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC1_MPCC_SM_CONTROL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC1_MPCC_UPDATE_LOCK_SEL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC1_MPCC_TOP_GAIN
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_INSIDE
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+//MPCC1_MPCC_BG_R_CR
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_G_Y
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_B_CB
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC1_MPCC_MEM_PWR_CTRL
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+//MPCC1_MPCC_STATUS
+#define MPCC1_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC1_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC1_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC1_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+//MPCC2_MPCC_TOP_SEL
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_BOT_SEL
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_OPP_ID
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC2_MPCC_CONTROL
+#define MPCC2_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC2_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC2_MPCC_SM_CONTROL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC2_MPCC_UPDATE_LOCK_SEL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC2_MPCC_TOP_GAIN
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_INSIDE
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+//MPCC2_MPCC_BG_R_CR
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_G_Y
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_B_CB
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC2_MPCC_MEM_PWR_CTRL
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+//MPCC2_MPCC_STATUS
+#define MPCC2_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC2_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC2_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC2_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+//MPCC3_MPCC_TOP_SEL
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_BOT_SEL
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_OPP_ID
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC3_MPCC_CONTROL
+#define MPCC3_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC3_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC3_MPCC_SM_CONTROL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC3_MPCC_UPDATE_LOCK_SEL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC3_MPCC_TOP_GAIN
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_INSIDE
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+//MPCC3_MPCC_BG_R_CR
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_G_Y
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_B_CB
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC3_MPCC_MEM_PWR_CTRL
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+//MPCC3_MPCC_STATUS
+#define MPCC3_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC3_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC3_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC3_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+//MPC_CLOCK_CONTROL
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x1
+#define MPC_CLOCK_CONTROL__MPC_TEST_CLK_SEL__SHIFT 0x4
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00000002L
+#define MPC_CLOCK_CONTROL__MPC_TEST_CLK_SEL_MASK 0x00000030L
+//MPC_SOFT_RESET
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET__SHIFT 0x0
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET__SHIFT 0x1
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET__SHIFT 0x2
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET__SHIFT 0x3
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET__SHIFT 0xa
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET__SHIFT 0xb
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET__SHIFT 0xc
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET__SHIFT 0xd
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET__SHIFT 0x14
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET__SHIFT 0x15
+#define MPC_SOFT_RESET__MPC_SFT2_SOFT_RESET__SHIFT 0x16
+#define MPC_SOFT_RESET__MPC_SFT3_SOFT_RESET__SHIFT 0x17
+#define MPC_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x1f
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET_MASK 0x00000001L
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET_MASK 0x00000002L
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET_MASK 0x00000004L
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET_MASK 0x00000008L
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET_MASK 0x00000400L
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET_MASK 0x00000800L
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET_MASK 0x00001000L
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET_MASK 0x00002000L
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET_MASK 0x00100000L
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET_MASK 0x00200000L
+#define MPC_SOFT_RESET__MPC_SFT2_SOFT_RESET_MASK 0x00400000L
+#define MPC_SOFT_RESET__MPC_SFT3_SOFT_RESET_MASK 0x00800000L
+#define MPC_SOFT_RESET__MPC_SOFT_RESET_MASK 0x80000000L
+//MPC_CRC_CTRL
+#define MPC_CRC_CTRL__MPC_CRC_EN__SHIFT 0x0
+#define MPC_CRC_CTRL__MPC_CRC_CONT_EN__SHIFT 0x4
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_MODE__SHIFT 0x8
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_EN__SHIFT 0xa
+#define MPC_CRC_CTRL__MPC_CRC_INTERLACE_MODE__SHIFT 0xc
+#define MPC_CRC_CTRL__MPC_CRC_SRC_SEL__SHIFT 0x18
+#define MPC_CRC_CTRL__MPC_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_ENABLED__SHIFT 0x1e
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_LOCK__SHIFT 0x1f
+#define MPC_CRC_CTRL__MPC_CRC_EN_MASK 0x00000001L
+#define MPC_CRC_CTRL__MPC_CRC_CONT_EN_MASK 0x00000010L
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_MODE_MASK 0x00000300L
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_EN_MASK 0x00000400L
+#define MPC_CRC_CTRL__MPC_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define MPC_CRC_CTRL__MPC_CRC_SRC_SEL_MASK 0x03000000L
+#define MPC_CRC_CTRL__MPC_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_ENABLED_MASK 0x40000000L
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_LOCK_MASK 0x80000000L
+//MPC_CRC_SEL_CONTROL
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DPP_SEL__SHIFT 0x0
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_OPP_SEL__SHIFT 0x4
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DWB_SEL__SHIFT 0x8
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_MASK__SHIFT 0x10
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DPP_SEL_MASK 0x0000000FL
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_OPP_SEL_MASK 0x000000F0L
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DWB_SEL_MASK 0x00000300L
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_MASK_MASK 0xFFFF0000L
+//MPC_PERFMON_EVENT_CTRL
+#define MPC_PERFMON_EVENT_CTRL__MPC_PERFMON_EVENT_EN__SHIFT 0x0
+#define MPC_PERFMON_EVENT_CTRL__MPC_PERFMON_EVENT_EN_MASK 0x00000001L
+//MPC_BYPASS_BG_AR
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA__SHIFT 0x0
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR__SHIFT 0x10
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR_MASK 0xFFFF0000L
+//MPC_BYPASS_BG_GB
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y__SHIFT 0x0
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB__SHIFT 0x10
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB_MASK 0xFFFF0000L
+//MPC_HOST_READ_CONTROL
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+//MPC_DPP_PENDING_STATUS
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_SURFACE_UPDATE_PENDING__SHIFT 0x0
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CONFIG_UPDATE_PENDING__SHIFT 0x1
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CURSOR_UPDATE_PENDING__SHIFT 0x2
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_SURFACE_UPDATE_PENDING__SHIFT 0x4
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CONFIG_UPDATE_PENDING__SHIFT 0x5
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CURSOR_UPDATE_PENDING__SHIFT 0x6
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_SURFACE_UPDATE_PENDING__SHIFT 0x8
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CONFIG_UPDATE_PENDING__SHIFT 0x9
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CURSOR_UPDATE_PENDING__SHIFT 0xa
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_SURFACE_UPDATE_PENDING__SHIFT 0xc
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CONFIG_UPDATE_PENDING__SHIFT 0xd
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CURSOR_UPDATE_PENDING__SHIFT 0xe
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_SURFACE_UPDATE_PENDING_MASK 0x00000001L
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CONFIG_UPDATE_PENDING_MASK 0x00000002L
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CURSOR_UPDATE_PENDING_MASK 0x00000004L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_SURFACE_UPDATE_PENDING_MASK 0x00000010L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CONFIG_UPDATE_PENDING_MASK 0x00000020L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CURSOR_UPDATE_PENDING_MASK 0x00000040L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_SURFACE_UPDATE_PENDING_MASK 0x00000100L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CONFIG_UPDATE_PENDING_MASK 0x00000200L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CURSOR_UPDATE_PENDING_MASK 0x00000400L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_SURFACE_UPDATE_PENDING_MASK 0x00001000L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CONFIG_UPDATE_PENDING_MASK 0x00002000L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CURSOR_UPDATE_PENDING_MASK 0x00004000L
+//MPC_PENDING_STATUS_MISC
+#define MPC_PENDING_STATUS_MISC__OUT_OPP0_CONFIG_UPDATE_PENDING__SHIFT 0x0
+#define MPC_PENDING_STATUS_MISC__OUT_OPP1_CONFIG_UPDATE_PENDING__SHIFT 0x1
+#define MPC_PENDING_STATUS_MISC__OUT_OPP2_CONFIG_UPDATE_PENDING__SHIFT 0x2
+#define MPC_PENDING_STATUS_MISC__OUT_OPP3_CONFIG_UPDATE_PENDING__SHIFT 0x3
+#define MPC_PENDING_STATUS_MISC__MPCC0_CONFIG_UPDATE_PENDING__SHIFT 0x8
+#define MPC_PENDING_STATUS_MISC__MPCC1_CONFIG_UPDATE_PENDING__SHIFT 0x9
+#define MPC_PENDING_STATUS_MISC__MPCC2_CONFIG_UPDATE_PENDING__SHIFT 0xa
+#define MPC_PENDING_STATUS_MISC__MPCC3_CONFIG_UPDATE_PENDING__SHIFT 0xb
+#define MPC_PENDING_STATUS_MISC__IN_DWB0_CONFIG_UPDATE_PENDING__SHIFT 0x10
+#define MPC_PENDING_STATUS_MISC__OUT_OPP0_CONFIG_UPDATE_PENDING_MASK 0x00000001L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP1_CONFIG_UPDATE_PENDING_MASK 0x00000002L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP2_CONFIG_UPDATE_PENDING_MASK 0x00000004L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP3_CONFIG_UPDATE_PENDING_MASK 0x00000008L
+#define MPC_PENDING_STATUS_MISC__MPCC0_CONFIG_UPDATE_PENDING_MASK 0x00000100L
+#define MPC_PENDING_STATUS_MISC__MPCC1_CONFIG_UPDATE_PENDING_MASK 0x00000200L
+#define MPC_PENDING_STATUS_MISC__MPCC2_CONFIG_UPDATE_PENDING_MASK 0x00000400L
+#define MPC_PENDING_STATUS_MISC__MPCC3_CONFIG_UPDATE_PENDING_MASK 0x00000800L
+#define MPC_PENDING_STATUS_MISC__IN_DWB0_CONFIG_UPDATE_PENDING_MASK 0x00010000L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET1
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET1
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET1
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET1
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET1
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET2
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET2
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET2
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET2
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET2
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET3
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET3
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET3
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET3
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET3
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//MPC_CRC_RESULT_A
+#define MPC_CRC_RESULT_A__MPC_CRC_RESULT_A__SHIFT 0x0
+#define MPC_CRC_RESULT_A__MPC_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//MPC_CRC_RESULT_R
+#define MPC_CRC_RESULT_R__MPC_CRC_RESULT_R__SHIFT 0x0
+#define MPC_CRC_RESULT_R__MPC_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//MPC_CRC_RESULT_G
+#define MPC_CRC_RESULT_G__MPC_CRC_RESULT_G__SHIFT 0x0
+#define MPC_CRC_RESULT_G__MPC_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//MPC_CRC_RESULT_B
+#define MPC_CRC_RESULT_B__MPC_CRC_RESULT_B__SHIFT 0x0
+#define MPC_CRC_RESULT_B__MPC_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//MPC_DWB0_MUX
+#define MPC_DWB0_MUX__MPC_DWB0_MUX__SHIFT 0x0
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_STATUS__SHIFT 0x4
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_MASK 0x0000000FL
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_STATUS_MASK 0x000000F0L
+
+
+// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON15_PERFCOUNTER_CNTL
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFCOUNTER_CNTL2
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFCOUNTER_STATE
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON15_PERFMON_CNTL
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON15_PERFMON_CNTL2
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON15_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON15_PERFMON_CVALUE_LOW
+#define DC_PERFMON15_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON15_PERFMON_HI
+#define DC_PERFMON15_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON15_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFMON_LOW
+#define DC_PERFMON15_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+//MPCC_OGAM0_MPCC_OGAM_CONTROL
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+//MPCC_OGAM0_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+//MPCC_OGAM1_MPCC_OGAM_CONTROL
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+//MPCC_OGAM1_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+//MPCC_OGAM2_MPCC_OGAM_CONTROL
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+//MPCC_OGAM2_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+//MPCC_OGAM3_MPCC_OGAM_CONTROL
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+//MPCC_OGAM3_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm0_dispdec
+//MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+//MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_MODE
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+//MPCC_MCM0_MPCC_MCM_3DLUT_INDEX
+#define MPCC_MCM0_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+//MPCC_MCM0_MPCC_MCM_3DLUT_DATA
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+//MPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm1_dispdec
+//MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+//MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_MODE
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+//MPCC_MCM1_MPCC_MCM_3DLUT_INDEX
+#define MPCC_MCM1_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+//MPCC_MCM1_MPCC_MCM_3DLUT_DATA
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+//MPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm2_dispdec
+//MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+//MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_MODE
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+//MPCC_MCM2_MPCC_MCM_3DLUT_INDEX
+#define MPCC_MCM2_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+//MPCC_MCM2_MPCC_MCM_3DLUT_DATA
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+//MPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm3_dispdec
+//MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+//MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_MODE
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+//MPCC_MCM3_MPCC_MCM_3DLUT_INDEX
+#define MPCC_MCM3_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+//MPCC_MCM3_MPCC_MCM_3DLUT_DATA
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+//MPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+//MPC_OUT0_MUX
+#define MPC_OUT0_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT0_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+//MPC_OUT0_DENORM_CONTROL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT0_DENORM_CLAMP_G_Y
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT0_DENORM_CLAMP_B_CB
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT1_MUX
+#define MPC_OUT1_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT1_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+//MPC_OUT1_DENORM_CONTROL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT1_DENORM_CLAMP_G_Y
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT1_DENORM_CLAMP_B_CB
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT2_MUX
+#define MPC_OUT2_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT2_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+//MPC_OUT2_DENORM_CONTROL
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT2_DENORM_CLAMP_G_Y
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT2_DENORM_CLAMP_B_CB
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT3_MUX
+#define MPC_OUT3_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT3_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+//MPC_OUT3_DENORM_CONTROL
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT3_DENORM_CLAMP_G_Y
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT3_DENORM_CLAMP_B_CB
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT_CSC_COEF_FORMAT
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT__SHIFT 0x0
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT__SHIFT 0x1
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC2_COEF_FORMAT__SHIFT 0x2
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC3_COEF_FORMAT__SHIFT 0x3
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT_MASK 0x00000001L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT_MASK 0x00000002L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC2_COEF_FORMAT_MASK 0x00000004L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC3_COEF_FORMAT_MASK 0x00000008L
+//MPC_OUT0_CSC_MODE
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+//MPC_OUT0_CSC_C11_C12_A
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_A
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_A
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_A
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_A
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_A
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C11_C12_B
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_B
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_B
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_B
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_B
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_B
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_MODE
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+//MPC_OUT1_CSC_C11_C12_A
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_A
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_A
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_A
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_A
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_A
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C11_C12_B
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_B
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_B
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_B
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_B
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_B
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_MODE
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+//MPC_OUT2_CSC_C11_C12_A
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C13_C14_A
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C21_C22_A
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C23_C24_A
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C31_C32_A
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C33_C34_A
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C11_C12_B
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C13_C14_B
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C21_C22_B
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C23_C24_B
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C31_C32_B
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C33_C34_B
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_MODE
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+//MPC_OUT3_CSC_C11_C12_A
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C13_C14_A
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C21_C22_A
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C23_C24_A
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C31_C32_A
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C33_C34_A
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C11_C12_B
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C13_C14_B
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C21_C22_B
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C23_C24_B
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C31_C32_B
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C33_C34_B
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_opp_abm0_dispdec
+//ABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define ABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_USER_LEVEL
+#define ABM0_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_TARGET_ABM_LEVEL
+#define ABM0_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_CURRENT_ABM_LEVEL
+#define ABM0_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_FINAL_DUTY_CYCLE
+#define ABM0_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM0_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_MINIMUM_DUTY_CYCLE
+#define ABM0_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM0_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_ABM_CNTL
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_BL1_PWM_GRP2_REG_LOCK
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//ABM0_DC_ABM1_CNTL
+#define ABM0_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM0_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+//ABM0_DC_ABM1_IPCSC_COEFF_SEL
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_THRES_12
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_THRES_34
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_CNTL_MISC
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//ABM0_DC_ABM1_DEBUG_MISC
+//ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//ABM0_DC_ABM1_HG_MISC_CTRL
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_LS_SUM_OF_LUMA
+#define ABM0_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_LS_MIN_MAX_LUMA
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//ABM0_DC_ABM1_LS_PIXEL_COUNT
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define ABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define ABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM0_DC_ABM1_HG_SAMPLE_RATE
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_LS_SAMPLE_RATE
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define ABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define ABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define ABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define ABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define ABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_1
+#define ABM0_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_2
+#define ABM0_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_3
+#define ABM0_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_4
+#define ABM0_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_5
+#define ABM0_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_6
+#define ABM0_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_7
+#define ABM0_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_8
+#define ABM0_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_9
+#define ABM0_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_10
+#define ABM0_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_11
+#define ABM0_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_12
+#define ABM0_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_13
+#define ABM0_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_14
+#define ABM0_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_15
+#define ABM0_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_16
+#define ABM0_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_17
+#define ABM0_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_18
+#define ABM0_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_19
+#define ABM0_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_20
+#define ABM0_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_21
+#define ABM0_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_22
+#define ABM0_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_23
+#define ABM0_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_24
+#define ABM0_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_BL_MASTER_LOCK
+#define ABM0_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_abm1_dispdec
+//ABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define ABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_USER_LEVEL
+#define ABM1_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_TARGET_ABM_LEVEL
+#define ABM1_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_CURRENT_ABM_LEVEL
+#define ABM1_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_FINAL_DUTY_CYCLE
+#define ABM1_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM1_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_MINIMUM_DUTY_CYCLE
+#define ABM1_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM1_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_ABM_CNTL
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_BL1_PWM_GRP2_REG_LOCK
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//ABM1_DC_ABM1_CNTL
+#define ABM1_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM1_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+//ABM1_DC_ABM1_IPCSC_COEFF_SEL
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_THRES_12
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_THRES_34
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_CNTL_MISC
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//ABM1_DC_ABM1_HG_MISC_CTRL
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_LS_SUM_OF_LUMA
+#define ABM1_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_LS_MIN_MAX_LUMA
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//ABM1_DC_ABM1_LS_PIXEL_COUNT
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define ABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define ABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM1_DC_ABM1_HG_SAMPLE_RATE
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_LS_SAMPLE_RATE
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define ABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define ABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define ABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define ABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define ABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_1
+#define ABM1_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_2
+#define ABM1_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_3
+#define ABM1_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_4
+#define ABM1_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_5
+#define ABM1_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_6
+#define ABM1_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_7
+#define ABM1_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_8
+#define ABM1_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_9
+#define ABM1_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_10
+#define ABM1_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_11
+#define ABM1_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_12
+#define ABM1_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_13
+#define ABM1_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_14
+#define ABM1_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_15
+#define ABM1_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_16
+#define ABM1_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_17
+#define ABM1_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_18
+#define ABM1_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_19
+#define ABM1_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_20
+#define ABM1_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_21
+#define ABM1_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_22
+#define ABM1_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_23
+#define ABM1_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_24
+#define ABM1_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_BL_MASTER_LOCK
+#define ABM1_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_abm2_dispdec
+//ABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define ABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_USER_LEVEL
+#define ABM2_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_TARGET_ABM_LEVEL
+#define ABM2_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_CURRENT_ABM_LEVEL
+#define ABM2_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_FINAL_DUTY_CYCLE
+#define ABM2_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM2_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_MINIMUM_DUTY_CYCLE
+#define ABM2_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM2_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_ABM_CNTL
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_BL1_PWM_GRP2_REG_LOCK
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//ABM2_DC_ABM1_CNTL
+#define ABM2_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM2_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+//ABM2_DC_ABM1_IPCSC_COEFF_SEL
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_THRES_12
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_THRES_34
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_CNTL_MISC
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//ABM2_DC_ABM1_HG_MISC_CTRL
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_LS_SUM_OF_LUMA
+#define ABM2_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_LS_MIN_MAX_LUMA
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//ABM2_DC_ABM1_LS_PIXEL_COUNT
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define ABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define ABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM2_DC_ABM1_HG_SAMPLE_RATE
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_LS_SAMPLE_RATE
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define ABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define ABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define ABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define ABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define ABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_1
+#define ABM2_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_2
+#define ABM2_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_3
+#define ABM2_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_4
+#define ABM2_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_5
+#define ABM2_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_6
+#define ABM2_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_7
+#define ABM2_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_8
+#define ABM2_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_9
+#define ABM2_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_10
+#define ABM2_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_11
+#define ABM2_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_12
+#define ABM2_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_13
+#define ABM2_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_14
+#define ABM2_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_15
+#define ABM2_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_16
+#define ABM2_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_17
+#define ABM2_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_18
+#define ABM2_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_19
+#define ABM2_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_20
+#define ABM2_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_21
+#define ABM2_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_22
+#define ABM2_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_23
+#define ABM2_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_24
+#define ABM2_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_BL_MASTER_LOCK
+#define ABM2_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_abm3_dispdec
+//ABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define ABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_USER_LEVEL
+#define ABM3_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_TARGET_ABM_LEVEL
+#define ABM3_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_CURRENT_ABM_LEVEL
+#define ABM3_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_FINAL_DUTY_CYCLE
+#define ABM3_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM3_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_MINIMUM_DUTY_CYCLE
+#define ABM3_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM3_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_ABM_CNTL
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_BL1_PWM_GRP2_REG_LOCK
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//ABM3_DC_ABM1_CNTL
+#define ABM3_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM3_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+//ABM3_DC_ABM1_IPCSC_COEFF_SEL
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_THRES_12
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_THRES_34
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_CNTL_MISC
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//ABM3_DC_ABM1_HG_MISC_CTRL
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_LS_SUM_OF_LUMA
+#define ABM3_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_LS_MIN_MAX_LUMA
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//ABM3_DC_ABM1_LS_PIXEL_COUNT
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define ABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define ABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM3_DC_ABM1_HG_SAMPLE_RATE
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_LS_SAMPLE_RATE
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define ABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define ABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define ABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define ABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define ABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_1
+#define ABM3_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_2
+#define ABM3_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_3
+#define ABM3_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_4
+#define ABM3_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_5
+#define ABM3_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_6
+#define ABM3_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_7
+#define ABM3_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_8
+#define ABM3_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_9
+#define ABM3_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_10
+#define ABM3_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_11
+#define ABM3_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_12
+#define ABM3_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_13
+#define ABM3_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_14
+#define ABM3_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_15
+#define ABM3_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_16
+#define ABM3_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_17
+#define ABM3_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_18
+#define ABM3_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_19
+#define ABM3_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_20
+#define ABM3_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_21
+#define ABM3_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_22
+#define ABM3_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_23
+#define ABM3_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_24
+#define ABM3_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_BL_MASTER_LOCK
+#define ABM3_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+//DPG0_DPG_CONTROL
+#define DPG0_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG0_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG0_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG0_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG0_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG0_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG0_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG0_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG0_DPG_RAMP_CONTROL
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG0_DPG_DIMENSIONS
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_COLOUR_R_CR
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_G_Y
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_B_CB
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG0_DPG_OFFSET_SEGMENT
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_STATUS
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+//DPG0_OPP_PIPE_CRC_RESULTA
+#define DPG0_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//DPG0_OPP_PIPE_CRC_RESULTR
+#define DPG0_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//DPG0_OPP_PIPE_CRC_RESULTG
+#define DPG0_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//DPG0_OPP_PIPE_CRC_RESULTB
+#define DPG0_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//DPG0_OPP_PIPE_CRC_RESULTC
+#define DPG0_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+//FMT0_FMT_CLAMP_COMPONENT_R
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_G
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_B
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT0_FMT_DYNAMIC_EXP_CNTL
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT0_FMT_CONTROL
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT0_FMT_BIT_DEPTH_CONTROL
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT0_FMT_DITHER_RAND_R_SEED
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_G_SEED
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_B_SEED
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_CNTL
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT0_FMT_MAP420_MEMORY_CONTROL
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT0_FMT_422_CONTROL
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+//OPPBUF0_OPPBUF_CONTROL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF0_OPPBUF_CONTROL1
+#define OPPBUF0_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+//OPP_PIPE0_OPP_PIPE_CONTROL
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+//DPG1_DPG_CONTROL
+#define DPG1_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG1_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG1_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG1_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG1_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG1_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG1_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG1_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG1_DPG_RAMP_CONTROL
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG1_DPG_DIMENSIONS
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_COLOUR_R_CR
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_G_Y
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_B_CB
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG1_DPG_OFFSET_SEGMENT
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_STATUS
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+//DPG1_OPP_PIPE_CRC_RESULTA
+#define DPG1_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//DPG1_OPP_PIPE_CRC_RESULTR
+#define DPG1_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//DPG1_OPP_PIPE_CRC_RESULTG
+#define DPG1_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//DPG1_OPP_PIPE_CRC_RESULTB
+#define DPG1_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//DPG1_OPP_PIPE_CRC_RESULTC
+#define DPG1_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+//FMT1_FMT_CLAMP_COMPONENT_R
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_G
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_B
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT1_FMT_DYNAMIC_EXP_CNTL
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT1_FMT_CONTROL
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT1_FMT_BIT_DEPTH_CONTROL
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT1_FMT_DITHER_RAND_R_SEED
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_G_SEED
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_B_SEED
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_CNTL
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT1_FMT_MAP420_MEMORY_CONTROL
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT1_FMT_422_CONTROL
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+//OPPBUF1_OPPBUF_CONTROL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF1_OPPBUF_CONTROL1
+#define OPPBUF1_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+//OPP_PIPE1_OPP_PIPE_CONTROL
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_dpg2_dispdec
+//DPG2_DPG_CONTROL
+#define DPG2_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG2_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG2_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG2_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG2_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG2_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG2_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG2_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG2_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG2_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG2_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG2_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG2_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG2_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG2_DPG_RAMP_CONTROL
+#define DPG2_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG2_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG2_DPG_DIMENSIONS
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG2_DPG_COLOUR_R_CR
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG2_DPG_COLOUR_G_Y
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG2_DPG_COLOUR_B_CB
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG2_DPG_OFFSET_SEGMENT
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG2_DPG_STATUS
+#define DPG2_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG2_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+//DPG2_OPP_PIPE_CRC_RESULTA
+#define DPG2_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//DPG2_OPP_PIPE_CRC_RESULTR
+#define DPG2_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//DPG2_OPP_PIPE_CRC_RESULTG
+#define DPG2_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//DPG2_OPP_PIPE_CRC_RESULTB
+#define DPG2_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//DPG2_OPP_PIPE_CRC_RESULTC
+#define DPG2_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_fmt2_dispdec
+//FMT2_FMT_CLAMP_COMPONENT_R
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_COMPONENT_G
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_COMPONENT_B
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT2_FMT_DYNAMIC_EXP_CNTL
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT2_FMT_CONTROL
+#define FMT2_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT2_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT2_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT2_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT2_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT2_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT2_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT2_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT2_FMT_BIT_DEPTH_CONTROL
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT2_FMT_DITHER_RAND_R_SEED
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT2_FMT_DITHER_RAND_G_SEED
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT2_FMT_DITHER_RAND_B_SEED
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_CNTL
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT2_FMT_MAP420_MEMORY_CONTROL
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT2_FMT_422_CONTROL
+#define FMT2_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT2_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf2_dispdec
+//OPPBUF2_OPPBUF_CONTROL
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF2_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF2_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF2_OPPBUF_CONTROL1
+#define OPPBUF2_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF2_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe2_dispdec
+//OPP_PIPE2_OPP_PIPE_CONTROL
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc2_dispdec
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_dpg3_dispdec
+//DPG3_DPG_CONTROL
+#define DPG3_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG3_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG3_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG3_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG3_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG3_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG3_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG3_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG3_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG3_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG3_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG3_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG3_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG3_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG3_DPG_RAMP_CONTROL
+#define DPG3_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG3_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG3_DPG_DIMENSIONS
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG3_DPG_COLOUR_R_CR
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG3_DPG_COLOUR_G_Y
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG3_DPG_COLOUR_B_CB
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG3_DPG_OFFSET_SEGMENT
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG3_DPG_STATUS
+#define DPG3_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG3_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+//DPG3_OPP_PIPE_CRC_RESULTA
+#define DPG3_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//DPG3_OPP_PIPE_CRC_RESULTR
+#define DPG3_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//DPG3_OPP_PIPE_CRC_RESULTG
+#define DPG3_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//DPG3_OPP_PIPE_CRC_RESULTB
+#define DPG3_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//DPG3_OPP_PIPE_CRC_RESULTC
+#define DPG3_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_fmt3_dispdec
+//FMT3_FMT_CLAMP_COMPONENT_R
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_COMPONENT_G
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_COMPONENT_B
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT3_FMT_DYNAMIC_EXP_CNTL
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT3_FMT_CONTROL
+#define FMT3_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT3_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT3_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT3_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT3_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT3_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT3_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT3_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT3_FMT_BIT_DEPTH_CONTROL
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT3_FMT_DITHER_RAND_R_SEED
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT3_FMT_DITHER_RAND_G_SEED
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT3_FMT_DITHER_RAND_B_SEED
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_CNTL
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT3_FMT_MAP420_MEMORY_CONTROL
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT3_FMT_422_CONTROL
+#define FMT3_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT3_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf3_dispdec
+//OPPBUF3_OPPBUF_CONTROL
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF3_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF3_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF3_OPPBUF_CONTROL1
+#define OPPBUF3_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF3_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe3_dispdec
+//OPP_PIPE3_OPP_PIPE_CONTROL
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc3_dispdec
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_dscrm0_dispdec
+//DSCRM0_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm1_dispdec
+//DSCRM1_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm2_dispdec
+//DSCRM2_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm3_dispdec
+//DSCRM3_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+//OPP_TOP_CLK_CONTROL
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS__SHIFT 0x4
+#define OPP_TOP_CLK_CONTROL__OPP_TEST_CLK_SEL__SHIFT 0x8
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON__SHIFT 0xc
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON__SHIFT 0xd
+#define OPP_TOP_CLK_CONTROL__OPP_ABM2_CLOCK_ON__SHIFT 0xe
+#define OPP_TOP_CLK_CONTROL__OPP_ABM3_CLOCK_ON__SHIFT 0xf
+#define OPP_TOP_CLK_CONTROL__OPP_FGCG_REP_DIS__SHIFT 0x18
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS_MASK 0x00000010L
+#define OPP_TOP_CLK_CONTROL__OPP_TEST_CLK_SEL_MASK 0x00000F00L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON_MASK 0x00001000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON_MASK 0x00002000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM2_CLOCK_ON_MASK 0x00004000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM3_CLOCK_ON_MASK 0x00008000L
+#define OPP_TOP_CLK_CONTROL__OPP_FGCG_REP_DIS_MASK 0x01000000L
+#define OPP_ABM_CONTROL__OPP_ABM_BLPWM_SEL__SHIFT 0x0
+#define OPP_ABM_CONTROL__OPP_ABM_BLPWM_SEL_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON16_PERFCOUNTER_CNTL
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFCOUNTER_CNTL2
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFCOUNTER_STATE
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON16_PERFMON_CNTL
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON16_PERFMON_CNTL2
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON16_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON16_PERFMON_CVALUE_LOW
+#define DC_PERFMON16_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON16_PERFMON_HI
+#define DC_PERFMON16_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON16_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFMON_LOW
+#define DC_PERFMON16_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+//ODM0_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM0_OPTC_DATA_SOURCE_SELECT
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+//ODM0_OPTC_DATA_FORMAT_CONTROL
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM0_OPTC_BYTES_PER_PIXEL
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM0_OPTC_WIDTH_CONTROL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM0_OPTC_INPUT_CLOCK_CONTROL
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM0_OPTC_MEMORY_CONFIG
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+//ODM0_OPTC_INPUT_SPARE_REGISTER
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+//ODM0_OPTC_UNDERFLOW_THRESHOLD
+#define ODM0_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
+#define ODM0_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+//ODM1_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM1_OPTC_DATA_SOURCE_SELECT
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+//ODM1_OPTC_DATA_FORMAT_CONTROL
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM1_OPTC_BYTES_PER_PIXEL
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM1_OPTC_WIDTH_CONTROL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM1_OPTC_INPUT_CLOCK_CONTROL
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM1_OPTC_MEMORY_CONFIG
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+//ODM1_OPTC_INPUT_SPARE_REGISTER
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+//ODM1_OPTC_UNDERFLOW_THRESHOLD
+#define ODM1_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
+#define ODM1_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm2_dispdec
+//ODM2_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM2_OPTC_DATA_SOURCE_SELECT
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+//ODM2_OPTC_DATA_FORMAT_CONTROL
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM2_OPTC_BYTES_PER_PIXEL
+#define ODM2_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM2_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM2_OPTC_WIDTH_CONTROL
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM2_OPTC_INPUT_CLOCK_CONTROL
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM2_OPTC_MEMORY_CONFIG
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+//ODM2_OPTC_INPUT_SPARE_REGISTER
+#define ODM2_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM2_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+//ODM2_OPTC_UNDERFLOW_THRESHOLD
+#define ODM2_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
+#define ODM2_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm3_dispdec
+//ODM3_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM3_OPTC_DATA_SOURCE_SELECT
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+//ODM3_OPTC_DATA_FORMAT_CONTROL
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM3_OPTC_BYTES_PER_PIXEL
+#define ODM3_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM3_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM3_OPTC_WIDTH_CONTROL
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM3_OPTC_INPUT_CLOCK_CONTROL
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM3_OPTC_MEMORY_CONFIG
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+//ODM3_OPTC_INPUT_SPARE_REGISTER
+#define ODM3_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM3_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+//ODM3_OPTC_UNDERFLOW_THRESHOLD
+#define ODM3_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
+#define ODM3_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+//OTG0_OTG_H_TOTAL
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_H_BLANK_START_END
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A_CNTL
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG0_OTG_H_TIMING_CNTL
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+//OTG0_OTG_V_TOTAL
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MIN
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MAX
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MID
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_CONTROL
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_V_COUNT_STOP_CONTROL
+#define OTG0_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG0_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+//OTG0_OTG_V_COUNT_STOP_CONTROL2
+#define OTG0_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG0_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+//OTG0_OTG_V_TOTAL_INT_STATUS
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG0_OTG_VSYNC_NOM_INT_STATUS
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG0_OTG_V_BLANK_START_END
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A_CNTL
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+//OTG0_OTG_TRIGA_CNTL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGA_MANUAL_TRIG
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_TRIGB_CNTL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGB_MANUAL_TRIG
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG0_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+//OTG0_OTG_CONTROL
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG0_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG0_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+//OTG0_OTG_DLPC_CONTROL
+#define OTG0_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG0_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+//OTG0_OTG_INTERLACE_CONTROL
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG0_OTG_INTERLACE_STATUS
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG0_OTG_PIXEL_DATA_READBACK0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG0_OTG_PIXEL_DATA_READBACK1
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG0_OTG_STATUS
+#define OTG0_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG0_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG0_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG0_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG0_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG0_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG0_OTG_STATUS_POSITION
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_LONG_VBLANK_STATUS
+#define OTG0_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG0_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+//OTG0_OTG_NOM_VERT_POSITION
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG0_OTG_STATUS_FRAME_COUNT
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_STATUS_VF_COUNT
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_STATUS_HV_COUNT
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_COUNT_CONTROL
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG0_OTG_COUNT_RESET
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG0_OTG_VERT_SYNC_CONTROL
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG0_OTG_STEREO_STATUS
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG0_OTG_STEREO_CONTROL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG0_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG0_OTG_SNAPSHOT_STATUS
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG0_OTG_SNAPSHOT_CONTROL
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG0_OTG_SNAPSHOT_POSITION
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_SNAPSHOT_FRAME
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_INTERRUPT_CONTROL
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG0_OTG_UPDATE_LOCK
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG0_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG0_OTG_MASTER_EN
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG0_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+//OTG0_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_CRC_CNTL
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_POLY_SEL__SHIFT 0x2
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_POLY_SEL_MASK 0x00000004L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG0_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_DATA_RG
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC0_DATA_B
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_DATA_RG
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_DATA_B
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC2_DATA_RG
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC2_DATA_B
+#define OTG0_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG0_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC3_DATA_RG
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC3_DATA_B
+#define OTG0_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG0_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_STATIC_SCREEN_CONTROL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG0_OTG_3D_STRUCTURE_CONTROL
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG0_OTG_GSL_VSYNC_GAP
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG0_OTG_MASTER_UPDATE_MODE
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG0_OTG_CLOCK_CONTROL
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG0_OTG_VSTARTUP_PARAM
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG0_OTG_VUPDATE_PARAM
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG0_OTG_VREADY_PARAM
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG0_OTG_GLOBAL_SYNC_STATUS
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG0_OTG_MASTER_UPDATE_LOCK
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG0_OTG_GSL_CONTROL
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG0_OTG_GSL_WINDOW_X
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG0_OTG_GSL_WINDOW_Y
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG0_OTG_VUPDATE_KEEPOUT
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL0
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL1
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL2
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL3
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+//OTG0_OTG_GLOBAL_CONTROL4
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+//OTG0_OTG_TRIG_MANUAL_CONTROL
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG0_OTG_DRR_TIMING_INT_STATUS
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+//OTG0_OTG_DRR_V_TOTAL_REACH_RANGE
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+//OTG0_OTG_DRR_V_TOTAL_CHANGE
+#define OTG0_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG0_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+//OTG0_OTG_DRR_TRIGGER_WINDOW
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG0_OTG_DRR_CONTROL
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG0_OTG_DRR_CONTOL2
+#define OTG0_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+//OTG0_OTG_M_CONST_DTO0
+#define OTG0_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG0_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+//OTG0_OTG_M_CONST_DTO1
+#define OTG0_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG0_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+//OTG0_OTG_REQUEST_CONTROL
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG0_OTG_DSC_START_POSITION
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG0_OTG_PIPE_UPDATE_STATUS
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG0_OTG_SPARE_REGISTER
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+//OTG1_OTG_H_TOTAL
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_H_BLANK_START_END
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A_CNTL
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG1_OTG_H_TIMING_CNTL
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+//OTG1_OTG_V_TOTAL
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MIN
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MAX
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MID
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_CONTROL
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_V_COUNT_STOP_CONTROL
+#define OTG1_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG1_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+//OTG1_OTG_V_COUNT_STOP_CONTROL2
+#define OTG1_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG1_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+//OTG1_OTG_V_TOTAL_INT_STATUS
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG1_OTG_VSYNC_NOM_INT_STATUS
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG1_OTG_V_BLANK_START_END
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A_CNTL
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+//OTG1_OTG_TRIGA_CNTL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGA_MANUAL_TRIG
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_TRIGB_CNTL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGB_MANUAL_TRIG
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG1_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+//OTG1_OTG_CONTROL
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG1_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG1_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+//OTG1_OTG_DLPC_CONTROL
+#define OTG1_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG1_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+//OTG1_OTG_INTERLACE_CONTROL
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG1_OTG_INTERLACE_STATUS
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG1_OTG_PIXEL_DATA_READBACK0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG1_OTG_PIXEL_DATA_READBACK1
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG1_OTG_STATUS
+#define OTG1_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG1_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG1_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG1_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG1_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG1_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG1_OTG_STATUS_POSITION
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_LONG_VBLANK_STATUS
+#define OTG1_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG1_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+//OTG1_OTG_NOM_VERT_POSITION
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG1_OTG_STATUS_FRAME_COUNT
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_STATUS_VF_COUNT
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_STATUS_HV_COUNT
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_COUNT_CONTROL
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG1_OTG_COUNT_RESET
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG1_OTG_VERT_SYNC_CONTROL
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG1_OTG_STEREO_STATUS
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG1_OTG_STEREO_CONTROL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG1_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG1_OTG_SNAPSHOT_STATUS
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG1_OTG_SNAPSHOT_CONTROL
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG1_OTG_SNAPSHOT_POSITION
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_SNAPSHOT_FRAME
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_INTERRUPT_CONTROL
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG1_OTG_UPDATE_LOCK
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG1_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG1_OTG_MASTER_EN
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG1_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+//OTG1_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_CRC_CNTL
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_POLY_SEL__SHIFT 0x2
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_POLY_SEL_MASK 0x00000004L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG1_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_DATA_RG
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC0_DATA_B
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_DATA_RG
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_DATA_B
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC2_DATA_RG
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC2_DATA_B
+#define OTG1_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG1_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC3_DATA_RG
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC3_DATA_B
+#define OTG1_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG1_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_STATIC_SCREEN_CONTROL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG1_OTG_3D_STRUCTURE_CONTROL
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG1_OTG_GSL_VSYNC_GAP
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG1_OTG_MASTER_UPDATE_MODE
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG1_OTG_CLOCK_CONTROL
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG1_OTG_VSTARTUP_PARAM
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG1_OTG_VUPDATE_PARAM
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG1_OTG_VREADY_PARAM
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG1_OTG_GLOBAL_SYNC_STATUS
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG1_OTG_MASTER_UPDATE_LOCK
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG1_OTG_GSL_CONTROL
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG1_OTG_GSL_WINDOW_X
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG1_OTG_GSL_WINDOW_Y
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG1_OTG_VUPDATE_KEEPOUT
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL0
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL1
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL2
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL3
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+//OTG1_OTG_GLOBAL_CONTROL4
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+//OTG1_OTG_TRIG_MANUAL_CONTROL
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG1_OTG_DRR_TIMING_INT_STATUS
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+//OTG1_OTG_DRR_V_TOTAL_REACH_RANGE
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+//OTG1_OTG_DRR_V_TOTAL_CHANGE
+#define OTG1_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG1_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+//OTG1_OTG_DRR_TRIGGER_WINDOW
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG1_OTG_DRR_CONTROL
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG1_OTG_DRR_CONTOL2
+#define OTG1_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+//OTG1_OTG_M_CONST_DTO0
+#define OTG1_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG1_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+//OTG1_OTG_M_CONST_DTO1
+#define OTG1_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG1_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+//OTG1_OTG_REQUEST_CONTROL
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG1_OTG_DSC_START_POSITION
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG1_OTG_PIPE_UPDATE_STATUS
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG1_OTG_SPARE_REGISTER
+#define OTG1_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG1_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg2_dispdec
+//OTG2_OTG_H_TOTAL
+#define OTG2_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG2_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG2_OTG_H_BLANK_START_END
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG2_OTG_H_SYNC_A
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG2_OTG_H_SYNC_A_CNTL
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG2_OTG_H_TIMING_CNTL
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+//OTG2_OTG_V_TOTAL
+#define OTG2_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MIN
+#define OTG2_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MAX
+#define OTG2_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MID
+#define OTG2_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_CONTROL
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_V_COUNT_STOP_CONTROL
+#define OTG2_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG2_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+//OTG2_OTG_V_COUNT_STOP_CONTROL2
+#define OTG2_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG2_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+//OTG2_OTG_V_TOTAL_INT_STATUS
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG2_OTG_VSYNC_NOM_INT_STATUS
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG2_OTG_V_BLANK_START_END
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG2_OTG_V_SYNC_A
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG2_OTG_V_SYNC_A_CNTL
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+//OTG2_OTG_TRIGA_CNTL
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG2_OTG_TRIGA_MANUAL_TRIG
+#define OTG2_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG2_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG2_OTG_TRIGB_CNTL
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG2_OTG_TRIGB_MANUAL_TRIG
+#define OTG2_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG2_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG2_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG2_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+//OTG2_OTG_CONTROL
+#define OTG2_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG2_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG2_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG2_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG2_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG2_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG2_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG2_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG2_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG2_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+//OTG2_OTG_DLPC_CONTROL
+#define OTG2_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG2_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+//OTG2_OTG_INTERLACE_CONTROL
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG2_OTG_INTERLACE_STATUS
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG2_OTG_PIXEL_DATA_READBACK0
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG2_OTG_PIXEL_DATA_READBACK1
+#define OTG2_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG2_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG2_OTG_STATUS
+#define OTG2_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG2_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG2_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG2_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG2_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG2_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG2_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG2_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG2_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG2_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG2_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG2_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG2_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG2_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG2_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG2_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG2_OTG_STATUS_POSITION
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG2_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG2_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG2_OTG_LONG_VBLANK_STATUS
+#define OTG2_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG2_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+//OTG2_OTG_NOM_VERT_POSITION
+#define OTG2_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG2_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG2_OTG_STATUS_FRAME_COUNT
+#define OTG2_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG2_OTG_STATUS_VF_COUNT
+#define OTG2_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG2_OTG_STATUS_HV_COUNT
+#define OTG2_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG2_OTG_COUNT_CONTROL
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG2_OTG_COUNT_RESET
+#define OTG2_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG2_OTG_VERT_SYNC_CONTROL
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG2_OTG_STEREO_STATUS
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG2_OTG_STEREO_CONTROL
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG2_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG2_OTG_SNAPSHOT_STATUS
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG2_OTG_SNAPSHOT_CONTROL
+#define OTG2_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG2_OTG_SNAPSHOT_POSITION
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG2_OTG_SNAPSHOT_FRAME
+#define OTG2_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG2_OTG_INTERRUPT_CONTROL
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG2_OTG_UPDATE_LOCK
+#define OTG2_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG2_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG2_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG2_OTG_MASTER_EN
+#define OTG2_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG2_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG2_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+//OTG2_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG2_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG2_OTG_CRC_CNTL
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_POLY_SEL__SHIFT 0x2
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG2_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_POLY_SEL_MASK 0x00000004L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG2_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_DATA_RG
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC0_DATA_B
+#define OTG2_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG2_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_DATA_RG
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC1_DATA_B
+#define OTG2_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG2_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC2_DATA_RG
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC2_DATA_B
+#define OTG2_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG2_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC3_DATA_RG
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC3_DATA_B
+#define OTG2_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG2_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_STATIC_SCREEN_CONTROL
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG2_OTG_3D_STRUCTURE_CONTROL
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG2_OTG_GSL_VSYNC_GAP
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG2_OTG_MASTER_UPDATE_MODE
+#define OTG2_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG2_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG2_OTG_CLOCK_CONTROL
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG2_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG2_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG2_OTG_VSTARTUP_PARAM
+#define OTG2_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG2_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG2_OTG_VUPDATE_PARAM
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG2_OTG_VREADY_PARAM
+#define OTG2_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG2_OTG_GLOBAL_SYNC_STATUS
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG2_OTG_MASTER_UPDATE_LOCK
+#define OTG2_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG2_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG2_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG2_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG2_OTG_GSL_CONTROL
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG2_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG2_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG2_OTG_GSL_WINDOW_X
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG2_OTG_GSL_WINDOW_Y
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG2_OTG_VUPDATE_KEEPOUT
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG2_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG2_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL0
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL1
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL2
+#define OTG2_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG2_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG2_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG2_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL3
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+//OTG2_OTG_GLOBAL_CONTROL4
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+//OTG2_OTG_TRIG_MANUAL_CONTROL
+#define OTG2_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG2_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG2_OTG_DRR_TIMING_INT_STATUS
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+//OTG2_OTG_DRR_V_TOTAL_REACH_RANGE
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+//OTG2_OTG_DRR_V_TOTAL_CHANGE
+#define OTG2_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG2_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+//OTG2_OTG_DRR_TRIGGER_WINDOW
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG2_OTG_DRR_CONTROL
+#define OTG2_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG2_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG2_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG2_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG2_OTG_DRR_CONTOL2
+#define OTG2_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG2_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+//OTG2_OTG_M_CONST_DTO0
+#define OTG2_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG2_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+//OTG2_OTG_M_CONST_DTO1
+#define OTG2_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG2_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+//OTG2_OTG_REQUEST_CONTROL
+#define OTG2_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG2_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG2_OTG_DSC_START_POSITION
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG2_OTG_PIPE_UPDATE_STATUS
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG2_OTG_SPARE_REGISTER
+#define OTG2_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG2_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg3_dispdec
+//OTG3_OTG_H_TOTAL
+#define OTG3_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG3_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG3_OTG_H_BLANK_START_END
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG3_OTG_H_SYNC_A
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG3_OTG_H_SYNC_A_CNTL
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG3_OTG_H_TIMING_CNTL
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+//OTG3_OTG_V_TOTAL
+#define OTG3_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MIN
+#define OTG3_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MAX
+#define OTG3_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MID
+#define OTG3_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_CONTROL
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_V_COUNT_STOP_CONTROL
+#define OTG3_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG3_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+//OTG3_OTG_V_COUNT_STOP_CONTROL2
+#define OTG3_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG3_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+//OTG3_OTG_V_TOTAL_INT_STATUS
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG3_OTG_VSYNC_NOM_INT_STATUS
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG3_OTG_V_BLANK_START_END
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG3_OTG_V_SYNC_A
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG3_OTG_V_SYNC_A_CNTL
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+//OTG3_OTG_TRIGA_CNTL
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG3_OTG_TRIGA_MANUAL_TRIG
+#define OTG3_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG3_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG3_OTG_TRIGB_CNTL
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG3_OTG_TRIGB_MANUAL_TRIG
+#define OTG3_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG3_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG3_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG3_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+//OTG3_OTG_CONTROL
+#define OTG3_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG3_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG3_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG3_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG3_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG3_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG3_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG3_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG3_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG3_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+//OTG3_OTG_DLPC_CONTROL
+#define OTG3_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG3_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+//OTG3_OTG_INTERLACE_CONTROL
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG3_OTG_INTERLACE_STATUS
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG3_OTG_PIXEL_DATA_READBACK0
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG3_OTG_PIXEL_DATA_READBACK1
+#define OTG3_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG3_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG3_OTG_STATUS
+#define OTG3_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG3_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG3_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG3_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG3_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG3_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG3_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG3_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG3_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG3_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG3_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG3_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG3_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG3_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG3_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG3_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG3_OTG_STATUS_POSITION
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG3_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG3_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG3_OTG_LONG_VBLANK_STATUS
+#define OTG3_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG3_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+//OTG3_OTG_NOM_VERT_POSITION
+#define OTG3_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG3_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG3_OTG_STATUS_FRAME_COUNT
+#define OTG3_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG3_OTG_STATUS_VF_COUNT
+#define OTG3_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG3_OTG_STATUS_HV_COUNT
+#define OTG3_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG3_OTG_COUNT_CONTROL
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG3_OTG_COUNT_RESET
+#define OTG3_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG3_OTG_VERT_SYNC_CONTROL
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG3_OTG_STEREO_STATUS
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG3_OTG_STEREO_CONTROL
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG3_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG3_OTG_SNAPSHOT_STATUS
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG3_OTG_SNAPSHOT_CONTROL
+#define OTG3_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG3_OTG_SNAPSHOT_POSITION
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG3_OTG_SNAPSHOT_FRAME
+#define OTG3_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG3_OTG_INTERRUPT_CONTROL
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG3_OTG_UPDATE_LOCK
+#define OTG3_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG3_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG3_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG3_OTG_MASTER_EN
+#define OTG3_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG3_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG3_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+//OTG3_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG3_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG3_OTG_CRC_CNTL
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_POLY_SEL__SHIFT 0x2
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG3_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_POLY_SEL_MASK 0x00000004L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG3_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_DATA_RG
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC0_DATA_B
+#define OTG3_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG3_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_DATA_RG
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC1_DATA_B
+#define OTG3_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG3_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC2_DATA_RG
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC2_DATA_B
+#define OTG3_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG3_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC3_DATA_RG
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC3_DATA_B
+#define OTG3_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG3_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_STATIC_SCREEN_CONTROL
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG3_OTG_3D_STRUCTURE_CONTROL
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG3_OTG_GSL_VSYNC_GAP
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG3_OTG_MASTER_UPDATE_MODE
+#define OTG3_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG3_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG3_OTG_CLOCK_CONTROL
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG3_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG3_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG3_OTG_VSTARTUP_PARAM
+#define OTG3_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG3_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG3_OTG_VUPDATE_PARAM
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG3_OTG_VREADY_PARAM
+#define OTG3_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG3_OTG_GLOBAL_SYNC_STATUS
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG3_OTG_MASTER_UPDATE_LOCK
+#define OTG3_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG3_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG3_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG3_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG3_OTG_GSL_CONTROL
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG3_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG3_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG3_OTG_GSL_WINDOW_X
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG3_OTG_GSL_WINDOW_Y
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG3_OTG_VUPDATE_KEEPOUT
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG3_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG3_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL0
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL1
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL2
+#define OTG3_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG3_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG3_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG3_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL3
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+//OTG3_OTG_GLOBAL_CONTROL4
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+//OTG3_OTG_TRIG_MANUAL_CONTROL
+#define OTG3_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG3_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG3_OTG_DRR_TIMING_INT_STATUS
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+//OTG3_OTG_DRR_V_TOTAL_REACH_RANGE
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+//OTG3_OTG_DRR_V_TOTAL_CHANGE
+#define OTG3_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG3_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+//OTG3_OTG_DRR_TRIGGER_WINDOW
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG3_OTG_DRR_CONTROL
+#define OTG3_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG3_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG3_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG3_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG3_OTG_DRR_CONTOL2
+#define OTG3_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG3_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+//OTG3_OTG_M_CONST_DTO0
+#define OTG3_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG3_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+//OTG3_OTG_M_CONST_DTO1
+#define OTG3_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG3_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+//OTG3_OTG_REQUEST_CONTROL
+#define OTG3_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG3_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG3_OTG_DSC_START_POSITION
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG3_OTG_PIPE_UPDATE_STATUS
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG3_OTG_SPARE_REGISTER
+#define OTG3_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG3_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg_crc320_dispdec
+//OTG_CRC320_OTG_CRC0_DATA_R32
+#define OTG_CRC320_OTG_CRC0_DATA_R32__CRC0_R_CR32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_R32__CRC0_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC0_DATA_G32
+#define OTG_CRC320_OTG_CRC0_DATA_G32__CRC0_G_Y32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_G32__CRC0_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC0_DATA_B32
+#define OTG_CRC320_OTG_CRC0_DATA_B32__CRC0_B_CB32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_B32__CRC0_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC0_DATA_C32
+#define OTG_CRC320_OTG_CRC0_DATA_C32__CRC0_C32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_C32__CRC0_C32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC0_DATA_AES
+#define OTG_CRC320_OTG_CRC0_DATA_AES__CRC0_AES__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_AES__CRC0_AES_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC1_DATA_R32
+#define OTG_CRC320_OTG_CRC1_DATA_R32__CRC1_R_CR32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC1_DATA_R32__CRC1_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC1_DATA_G32
+#define OTG_CRC320_OTG_CRC1_DATA_G32__CRC1_G_Y32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC1_DATA_G32__CRC1_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC1_DATA_B32
+#define OTG_CRC320_OTG_CRC1_DATA_B32__CRC1_B_CB32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC1_DATA_B32__CRC1_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC1_DATA_C32
+#define OTG_CRC320_OTG_CRC1_DATA_C32__CRC1_C32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC1_DATA_C32__CRC1_C32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC2_DATA_R32
+#define OTG_CRC320_OTG_CRC2_DATA_R32__CRC2_R_CR32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC2_DATA_R32__CRC2_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC2_DATA_G32
+#define OTG_CRC320_OTG_CRC2_DATA_G32__CRC2_G_Y32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC2_DATA_G32__CRC2_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC2_DATA_B32
+#define OTG_CRC320_OTG_CRC2_DATA_B32__CRC2_B_CB32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC2_DATA_B32__CRC2_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC2_DATA_C32
+#define OTG_CRC320_OTG_CRC2_DATA_C32__CRC2_C32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC2_DATA_C32__CRC2_C32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC3_DATA_R32
+#define OTG_CRC320_OTG_CRC3_DATA_R32__CRC3_R_CR32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC3_DATA_R32__CRC3_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC3_DATA_G32
+#define OTG_CRC320_OTG_CRC3_DATA_G32__CRC3_G_Y32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC3_DATA_G32__CRC3_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC3_DATA_B32
+#define OTG_CRC320_OTG_CRC3_DATA_B32__CRC3_B_CB32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC3_DATA_B32__CRC3_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC3_DATA_C32
+#define OTG_CRC320_OTG_CRC3_DATA_C32__CRC3_C32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC3_DATA_C32__CRC3_C32_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg_crc321_dispdec
+//OTG_CRC321_OTG_CRC0_DATA_R32
+#define OTG_CRC321_OTG_CRC0_DATA_R32__CRC0_R_CR32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_R32__CRC0_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC0_DATA_G32
+#define OTG_CRC321_OTG_CRC0_DATA_G32__CRC0_G_Y32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_G32__CRC0_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC0_DATA_B32
+#define OTG_CRC321_OTG_CRC0_DATA_B32__CRC0_B_CB32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_B32__CRC0_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC0_DATA_C32
+#define OTG_CRC321_OTG_CRC0_DATA_C32__CRC0_C32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_C32__CRC0_C32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC0_DATA_AES
+#define OTG_CRC321_OTG_CRC0_DATA_AES__CRC0_AES__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_AES__CRC0_AES_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC1_DATA_R32
+#define OTG_CRC321_OTG_CRC1_DATA_R32__CRC1_R_CR32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC1_DATA_R32__CRC1_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC1_DATA_G32
+#define OTG_CRC321_OTG_CRC1_DATA_G32__CRC1_G_Y32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC1_DATA_G32__CRC1_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC1_DATA_B32
+#define OTG_CRC321_OTG_CRC1_DATA_B32__CRC1_B_CB32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC1_DATA_B32__CRC1_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC1_DATA_C32
+#define OTG_CRC321_OTG_CRC1_DATA_C32__CRC1_C32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC1_DATA_C32__CRC1_C32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC2_DATA_R32
+#define OTG_CRC321_OTG_CRC2_DATA_R32__CRC2_R_CR32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC2_DATA_R32__CRC2_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC2_DATA_G32
+#define OTG_CRC321_OTG_CRC2_DATA_G32__CRC2_G_Y32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC2_DATA_G32__CRC2_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC2_DATA_B32
+#define OTG_CRC321_OTG_CRC2_DATA_B32__CRC2_B_CB32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC2_DATA_B32__CRC2_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC2_DATA_C32
+#define OTG_CRC321_OTG_CRC2_DATA_C32__CRC2_C32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC2_DATA_C32__CRC2_C32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC3_DATA_R32
+#define OTG_CRC321_OTG_CRC3_DATA_R32__CRC3_R_CR32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC3_DATA_R32__CRC3_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC3_DATA_G32
+#define OTG_CRC321_OTG_CRC3_DATA_G32__CRC3_G_Y32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC3_DATA_G32__CRC3_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC3_DATA_B32
+#define OTG_CRC321_OTG_CRC3_DATA_B32__CRC3_B_CB32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC3_DATA_B32__CRC3_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC3_DATA_C32
+#define OTG_CRC321_OTG_CRC3_DATA_C32__CRC3_C32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC3_DATA_C32__CRC3_C32_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg_crc322_dispdec
+//OTG_CRC322_OTG_CRC0_DATA_R32
+#define OTG_CRC322_OTG_CRC0_DATA_R32__CRC0_R_CR32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_R32__CRC0_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC0_DATA_G32
+#define OTG_CRC322_OTG_CRC0_DATA_G32__CRC0_G_Y32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_G32__CRC0_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC0_DATA_B32
+#define OTG_CRC322_OTG_CRC0_DATA_B32__CRC0_B_CB32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_B32__CRC0_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC0_DATA_C32
+#define OTG_CRC322_OTG_CRC0_DATA_C32__CRC0_C32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_C32__CRC0_C32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC0_DATA_AES
+#define OTG_CRC322_OTG_CRC0_DATA_AES__CRC0_AES__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_AES__CRC0_AES_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC1_DATA_R32
+#define OTG_CRC322_OTG_CRC1_DATA_R32__CRC1_R_CR32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC1_DATA_R32__CRC1_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC1_DATA_G32
+#define OTG_CRC322_OTG_CRC1_DATA_G32__CRC1_G_Y32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC1_DATA_G32__CRC1_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC1_DATA_B32
+#define OTG_CRC322_OTG_CRC1_DATA_B32__CRC1_B_CB32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC1_DATA_B32__CRC1_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC1_DATA_C32
+#define OTG_CRC322_OTG_CRC1_DATA_C32__CRC1_C32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC1_DATA_C32__CRC1_C32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC2_DATA_R32
+#define OTG_CRC322_OTG_CRC2_DATA_R32__CRC2_R_CR32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC2_DATA_R32__CRC2_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC2_DATA_G32
+#define OTG_CRC322_OTG_CRC2_DATA_G32__CRC2_G_Y32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC2_DATA_G32__CRC2_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC2_DATA_B32
+#define OTG_CRC322_OTG_CRC2_DATA_B32__CRC2_B_CB32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC2_DATA_B32__CRC2_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC2_DATA_C32
+#define OTG_CRC322_OTG_CRC2_DATA_C32__CRC2_C32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC2_DATA_C32__CRC2_C32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC3_DATA_R32
+#define OTG_CRC322_OTG_CRC3_DATA_R32__CRC3_R_CR32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC3_DATA_R32__CRC3_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC3_DATA_G32
+#define OTG_CRC322_OTG_CRC3_DATA_G32__CRC3_G_Y32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC3_DATA_G32__CRC3_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC3_DATA_B32
+#define OTG_CRC322_OTG_CRC3_DATA_B32__CRC3_B_CB32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC3_DATA_B32__CRC3_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC3_DATA_C32
+#define OTG_CRC322_OTG_CRC3_DATA_C32__CRC3_C32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC3_DATA_C32__CRC3_C32_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg_crc323_dispdec
+//OTG_CRC323_OTG_CRC0_DATA_R32
+#define OTG_CRC323_OTG_CRC0_DATA_R32__CRC0_R_CR32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_R32__CRC0_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC0_DATA_G32
+#define OTG_CRC323_OTG_CRC0_DATA_G32__CRC0_G_Y32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_G32__CRC0_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC0_DATA_B32
+#define OTG_CRC323_OTG_CRC0_DATA_B32__CRC0_B_CB32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_B32__CRC0_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC0_DATA_C32
+#define OTG_CRC323_OTG_CRC0_DATA_C32__CRC0_C32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_C32__CRC0_C32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC0_DATA_AES
+#define OTG_CRC323_OTG_CRC0_DATA_AES__CRC0_AES__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_AES__CRC0_AES_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC1_DATA_R32
+#define OTG_CRC323_OTG_CRC1_DATA_R32__CRC1_R_CR32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC1_DATA_R32__CRC1_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC1_DATA_G32
+#define OTG_CRC323_OTG_CRC1_DATA_G32__CRC1_G_Y32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC1_DATA_G32__CRC1_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC1_DATA_B32
+#define OTG_CRC323_OTG_CRC1_DATA_B32__CRC1_B_CB32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC1_DATA_B32__CRC1_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC1_DATA_C32
+#define OTG_CRC323_OTG_CRC1_DATA_C32__CRC1_C32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC1_DATA_C32__CRC1_C32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC2_DATA_R32
+#define OTG_CRC323_OTG_CRC2_DATA_R32__CRC2_R_CR32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC2_DATA_R32__CRC2_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC2_DATA_G32
+#define OTG_CRC323_OTG_CRC2_DATA_G32__CRC2_G_Y32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC2_DATA_G32__CRC2_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC2_DATA_B32
+#define OTG_CRC323_OTG_CRC2_DATA_B32__CRC2_B_CB32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC2_DATA_B32__CRC2_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC2_DATA_C32
+#define OTG_CRC323_OTG_CRC2_DATA_C32__CRC2_C32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC2_DATA_C32__CRC2_C32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC3_DATA_R32
+#define OTG_CRC323_OTG_CRC3_DATA_R32__CRC3_R_CR32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC3_DATA_R32__CRC3_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC3_DATA_G32
+#define OTG_CRC323_OTG_CRC3_DATA_G32__CRC3_G_Y32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC3_DATA_G32__CRC3_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC3_DATA_B32
+#define OTG_CRC323_OTG_CRC3_DATA_B32__CRC3_B_CB32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC3_DATA_B32__CRC3_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC3_DATA_C32
+#define OTG_CRC323_OTG_CRC3_DATA_C32__CRC3_C32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC3_DATA_C32__CRC3_C32_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+//GSL_SOURCE_SELECT
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL__SHIFT 0x0
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL__SHIFT 0x4
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL__SHIFT 0x8
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL_MASK 0x00000007L
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL_MASK 0x00000070L
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL_MASK 0x00000700L
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL_MASK 0x00070000L
+//OPTC_DLPC_CONTROL
+#define OPTC_DLPC_CONTROL__OPTC_DLPC_SNAPSHOT_MUX__SHIFT 0x0
+#define OPTC_DLPC_CONTROL__OPTC_DLPC_SNAPSHOT_MUX_MASK 0x00000007L
+//OPTC_CLOCK_CONTROL
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON__SHIFT 0x1
+#define OPTC_CLOCK_CONTROL__OPTC_TEST_CLK_SEL__SHIFT 0x8
+#define OPTC_CLOCK_CONTROL__OPTC_FGCG_REP_DIS__SHIFT 0xf
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON_MASK 0x00000002L
+#define OPTC_CLOCK_CONTROL__OPTC_TEST_CLK_SEL_MASK 0x00000F00L
+#define OPTC_CLOCK_CONTROL__OPTC_FGCG_REP_DIS_MASK 0x00008000L
+//ODM_MEM_PWR_CTRL
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_FORCE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_DIS__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_FORCE__SHIFT 0x4
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_DIS__SHIFT 0x6
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_FORCE__SHIFT 0x8
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_DIS__SHIFT 0xa
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_FORCE__SHIFT 0xc
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_DIS__SHIFT 0xe
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_FORCE__SHIFT 0x10
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_DIS__SHIFT 0x12
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_FORCE__SHIFT 0x14
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_DIS__SHIFT 0x16
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_FORCE__SHIFT 0x18
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_DIS__SHIFT 0x1a
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_FORCE__SHIFT 0x1c
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_DIS__SHIFT 0x1e
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_FORCE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_DIS_MASK 0x00000004L
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_FORCE_MASK 0x00000030L
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_DIS_MASK 0x00000040L
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_FORCE_MASK 0x00000300L
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_DIS_MASK 0x00000400L
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_FORCE_MASK 0x00003000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_DIS_MASK 0x00004000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_FORCE_MASK 0x00030000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_DIS_MASK 0x00040000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_FORCE_MASK 0x00300000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_DIS_MASK 0x00400000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_FORCE_MASK 0x03000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_DIS_MASK 0x04000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_FORCE_MASK 0x30000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_DIS_MASK 0x40000000L
+//ODM_MEM_PWR_CTRL3
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_UNASSIGNED_PWR_MODE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_VBLANK_PWR_MODE__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_UNASSIGNED_PWR_MODE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_VBLANK_PWR_MODE_MASK 0x0000000CL
+//ODM_MEM_PWR_STATUS
+#define ODM_MEM_PWR_STATUS__ODM_MEM0_PWR_STATE__SHIFT 0x0
+#define ODM_MEM_PWR_STATUS__ODM_MEM1_PWR_STATE__SHIFT 0x2
+#define ODM_MEM_PWR_STATUS__ODM_MEM2_PWR_STATE__SHIFT 0x4
+#define ODM_MEM_PWR_STATUS__ODM_MEM3_PWR_STATE__SHIFT 0x6
+#define ODM_MEM_PWR_STATUS__ODM_MEM4_PWR_STATE__SHIFT 0x8
+#define ODM_MEM_PWR_STATUS__ODM_MEM5_PWR_STATE__SHIFT 0xa
+#define ODM_MEM_PWR_STATUS__ODM_MEM6_PWR_STATE__SHIFT 0xc
+#define ODM_MEM_PWR_STATUS__ODM_MEM7_PWR_STATE__SHIFT 0xe
+#define ODM_MEM_PWR_STATUS__ODM_MEM0_PWR_STATE_MASK 0x00000003L
+#define ODM_MEM_PWR_STATUS__ODM_MEM1_PWR_STATE_MASK 0x0000000CL
+#define ODM_MEM_PWR_STATUS__ODM_MEM2_PWR_STATE_MASK 0x00000030L
+#define ODM_MEM_PWR_STATUS__ODM_MEM3_PWR_STATE_MASK 0x000000C0L
+#define ODM_MEM_PWR_STATUS__ODM_MEM4_PWR_STATE_MASK 0x00000300L
+#define ODM_MEM_PWR_STATUS__ODM_MEM5_PWR_STATE_MASK 0x00000C00L
+#define ODM_MEM_PWR_STATUS__ODM_MEM6_PWR_STATE_MASK 0x00003000L
+#define ODM_MEM_PWR_STATUS__ODM_MEM7_PWR_STATE_MASK 0x0000C000L
+//OPTC_MISC_SPARE_REGISTER
+#define OPTC_MISC_SPARE_REGISTER__OPTC_MISC_SPARE_REG__SHIFT 0x0
+#define OPTC_MISC_SPARE_REGISTER__OPTC_MISC_SPARE_REG_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_optc_optc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON17_PERFCOUNTER_CNTL
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFCOUNTER_CNTL2
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFCOUNTER_STATE
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON17_PERFMON_CNTL
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON17_PERFMON_CNTL2
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON17_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON17_PERFMON_CVALUE_LOW
+#define DC_PERFMON17_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON17_PERFMON_HI
+#define DC_PERFMON17_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON17_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFMON_LOW
+#define DC_PERFMON17_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD0_DC_HPD_INT_CONTROL
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD0_DC_HPD_CONTROL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD0_DC_HPD_FAST_TRAIN_CNTL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD0_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+//HPD1_DC_HPD_INT_STATUS
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD1_DC_HPD_INT_CONTROL
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD1_DC_HPD_CONTROL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD1_DC_HPD_FAST_TRAIN_CNTL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD1_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd2_dispdec
+//HPD2_DC_HPD_INT_STATUS
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD2_DC_HPD_INT_CONTROL
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD2_DC_HPD_CONTROL
+#define HPD2_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD2_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD2_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD2_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD2_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD2_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD2_DC_HPD_FAST_TRAIN_CNTL
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD2_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd3_dispdec
+//HPD3_DC_HPD_INT_STATUS
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD3_DC_HPD_INT_CONTROL
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD3_DC_HPD_CONTROL
+#define HPD3_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD3_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD3_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD3_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD3_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD3_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD3_DC_HPD_FAST_TRAIN_CNTL
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD3_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd4_dispdec
+//HPD4_DC_HPD_INT_STATUS
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD4_DC_HPD_INT_CONTROL
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD4_DC_HPD_CONTROL
+#define HPD4_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD4_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD4_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD4_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD4_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD4_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD4_DC_HPD_FAST_TRAIN_CNTL
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD4_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+//DP0_DP_LINK_CNTL
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP0_DP_PIXEL_FORMAT
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP0_DP_MSA_COLORIMETRY
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP0_DP_CONFIG
+#define DP0_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP0_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP0_DP_VID_STREAM_CNTL
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP0_DP_STEER_FIFO
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP0_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP0_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP0_DP_MSA_MISC
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP0_DP_DPHY_INTERNAL_CTRL
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP0_DP_VID_TIMING
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP0_DP_VID_N
+#define DP0_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP0_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP0_DP_VID_M
+#define DP0_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP0_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP0_DP_LINK_FRAMING_CNTL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP0_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP0_DP_HBR2_EYE_PATTERN
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP0_DP_VID_MSA_VBID
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP0_DP_VID_INTERRUPT_CNTL
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP0_DP_DPHY_CNTL
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP0_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP0_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP0_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP0_DP_DPHY_SYM0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM1
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM2
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP0_DP_DPHY_8B10B_CNTL
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP0_DP_DPHY_PRBS_CNTL
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP0_DP_DPHY_SCRAM_CNTL
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP0_DP_DPHY_CRC_CONTROL0
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP0_DP_DPHY_CRC_CONTROL1
+#define DP0_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP0_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP0_DP_DPHY_CRC_RESULT0
+#define DP0_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP0_DP_DPHY_CRC_RESULT1
+#define DP0_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP0_DP_DPHY_CRC_STATUS
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP0_DP_DPHY_FAST_TRAINING
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP0_DP_DPHY_FAST_TRAINING_STATUS
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP0_DP_DPHY_CRC_RESULT2
+#define DP0_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP0_DP_DPHY_CRC_RESULT3
+#define DP0_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP0_DP_SEC_CNTL
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP0_DP_SEC_CNTL1
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING1
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING2
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING3
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING4
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP0_DP_SEC_AUD_N
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_N_READBACK
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M_READBACK
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_TIMESTAMP
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP0_DP_SEC_PACKET_CNTL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP0_DP_MSE_RATE_CNTL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP0_DP_MSE_RATE_UPDATE
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP0_DP_MSE_SAT0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP0_DP_MSE_SAT_UPDATE
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP0_DP_MSE_LINK_TIMING
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP0_DP_MSE_MISC_CNTL
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP0_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP0_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP0_DP_CP_ENCRYPTION_CONTROL
+//DP0_DP_MSE_SAT0_STATUS
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1_STATUS
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2_STATUS
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP0_DP_DPIA_SPARE
+#define DP0_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP0_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP0_DP_MSA_TIMING_PARAM1
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM2
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM3
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP0_DP_MSA_TIMING_PARAM4
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP0_DP_MSO_CNTL
+#define DP0_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP0_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP0_DP_MSO_CNTL1
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP0_DP_DSC_CNTL
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP0_DP_SEC_CNTL2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP0_DP_SEC_CNTL3
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL4
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL5
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL6
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP0_DP_SEC_CNTL7
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP0_DP_DB_CNTL
+#define DP0_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP0_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP0_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP0_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP0_DP_MSA_VBID_MISC
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_METADATA_TRANSMISSION
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP0_DP_ALPM_CNTL
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP0_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP0_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP0_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP0_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP0_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP0_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP8_CNTL
+#define DP0_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP9_CNTL
+#define DP0_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP10_CNTL
+#define DP0_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP11_CNTL
+#define DP0_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP_EN_DB_STATUS
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP0_DP_AUXLESS_ALPM_CNTL1
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP0_DP_AUXLESS_ALPM_CNTL2
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP0_DP_AUXLESS_ALPM_CNTL3
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_AUXLESS_ALPM_CNTL4
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP0_DP_AUXLESS_ALPM_CNTL5
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP0_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP0_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP0_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP0_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP0_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP0_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+//DIG0_DIG_FE_CNTL
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG0_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG0_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG0_DIG_FE_CLK_CNTL
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG0_DIG_FE_EN_CNTL
+#define DIG0_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG0_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG0_DIG_OUTPUT_CRC_CNTL
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG0_DIG_OUTPUT_CRC_RESULT
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG0_DIG_CLOCK_PATTERN
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG0_DIG_TEST_PATTERN
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG0_DIG_RANDOM_PATTERN_SEED
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG0_DIG_FIFO_CTRL0
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG0_DIG_FIFO_CTRL1
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG0_HDMI_METADATA_PACKET_CONTROL
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_CONTROL
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG0_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG0_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG0_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG0_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG0_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG0_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG0_HDMI_STATUS
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG0_HDMI_AUDIO_PACKET_CONTROL
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG0_HDMI_ACR_PACKET_CONTROL
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG0_HDMI_VBI_PACKET_CONTROL
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG0_HDMI_INFOFRAME_CONTROL0
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG0_HDMI_INFOFRAME_CONTROL1
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG0_HDMI_GC
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG0_HDMI_DB_CONTROL
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG0_HDMI_ACR_32_0
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_32_1
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_44_0
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_44_1
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_48_0
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_48_1
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_STATUS_0
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_STATUS_1
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG0_AFMT_CNTL
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG0_DIG_BE_CLK_CNTL
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET__SHIFT 0x6
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON__SHIFT 0xc
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET_MASK 0x00000040L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON_MASK 0x00001000L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG0_DIG_BE_CNTL
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG0_DIG_BE_EN_CNTL
+#define DIG0_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG0_TMDS_CNTL
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG0_TMDS_CONTROL_CHAR
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG0_TMDS_CONTROL0_FEEDBACK
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG0_TMDS_STEREOSYNC_CTL_SEL
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG0_TMDS_CTL_BITS
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG0_TMDS_DCBALANCER_CONTROL
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG0_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG0_TMDS_CTL0_1_GEN_CNTL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG0_TMDS_CTL2_3_GEN_CNTL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG0_DIG_VERSION
+#define DIG0_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG0_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+//DP1_DP_LINK_CNTL
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP1_DP_PIXEL_FORMAT
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP1_DP_MSA_COLORIMETRY
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP1_DP_CONFIG
+#define DP1_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP1_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP1_DP_VID_STREAM_CNTL
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP1_DP_STEER_FIFO
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP1_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP1_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP1_DP_MSA_MISC
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP1_DP_DPHY_INTERNAL_CTRL
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP1_DP_VID_TIMING
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP1_DP_VID_N
+#define DP1_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP1_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP1_DP_VID_M
+#define DP1_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP1_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP1_DP_LINK_FRAMING_CNTL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP1_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP1_DP_HBR2_EYE_PATTERN
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP1_DP_VID_MSA_VBID
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP1_DP_VID_INTERRUPT_CNTL
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP1_DP_DPHY_CNTL
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP1_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP1_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP1_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP1_DP_DPHY_SYM0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM1
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM2
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP1_DP_DPHY_8B10B_CNTL
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP1_DP_DPHY_PRBS_CNTL
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP1_DP_DPHY_SCRAM_CNTL
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP1_DP_DPHY_CRC_CONTROL0
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP1_DP_DPHY_CRC_CONTROL1
+#define DP1_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP1_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP1_DP_DPHY_CRC_RESULT0
+#define DP1_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP1_DP_DPHY_CRC_RESULT1
+#define DP1_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP1_DP_DPHY_CRC_STATUS
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP1_DP_DPHY_FAST_TRAINING
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP1_DP_DPHY_FAST_TRAINING_STATUS
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP1_DP_DPHY_CRC_RESULT2
+#define DP1_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP1_DP_DPHY_CRC_RESULT3
+#define DP1_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP1_DP_SEC_CNTL
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP1_DP_SEC_CNTL1
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING1
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING2
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING3
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING4
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP1_DP_SEC_AUD_N
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_N_READBACK
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M_READBACK
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_TIMESTAMP
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP1_DP_SEC_PACKET_CNTL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP1_DP_MSE_RATE_CNTL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP1_DP_CP_MSE_STATUS
+//DP1_DP_MSE_RATE_UPDATE
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP1_DP_MSE_SAT0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP1_DP_MSE_SAT_UPDATE
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP1_DP_MSE_LINK_TIMING
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP1_DP_MSE_MISC_CNTL
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP1_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP1_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP1_DP_MSE_SAT0_STATUS
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1_STATUS
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2_STATUS
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP1_DP_DPIA_SPARE
+#define DP1_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP1_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP1_DP_MSA_TIMING_PARAM1
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM2
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM3
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP1_DP_MSA_TIMING_PARAM4
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP1_DP_MSO_CNTL
+#define DP1_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP1_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP1_DP_MSO_CNTL1
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP1_DP_DSC_CNTL
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP1_DP_SEC_CNTL2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP1_DP_SEC_CNTL3
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL4
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL5
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL6
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP1_DP_SEC_CNTL7
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP1_DP_DB_CNTL
+#define DP1_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP1_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP1_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP1_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP1_DP_MSA_VBID_MISC
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_METADATA_TRANSMISSION
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP1_DP_ALPM_CNTL
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP1_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP1_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP1_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP1_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP1_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP1_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP8_CNTL
+#define DP1_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP9_CNTL
+#define DP1_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP10_CNTL
+#define DP1_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP11_CNTL
+#define DP1_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP_EN_DB_STATUS
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP1_DP_AUXLESS_ALPM_CNTL1
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP1_DP_AUXLESS_ALPM_CNTL2
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP1_DP_AUXLESS_ALPM_CNTL3
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_AUXLESS_ALPM_CNTL4
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP1_DP_AUXLESS_ALPM_CNTL5
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP1_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP1_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP1_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP1_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP1_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP1_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+//DIG1_DIG_FE_CNTL
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG1_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG1_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG1_DIG_FE_CLK_CNTL
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG1_DIG_FE_EN_CNTL
+#define DIG1_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG1_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG1_DIG_OUTPUT_CRC_CNTL
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG1_DIG_OUTPUT_CRC_RESULT
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG1_DIG_CLOCK_PATTERN
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG1_DIG_TEST_PATTERN
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG1_DIG_RANDOM_PATTERN_SEED
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG1_DIG_FIFO_CTRL0
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG1_DIG_FIFO_CTRL1
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG1_HDMI_METADATA_PACKET_CONTROL
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_CONTROL
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG1_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG1_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG1_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG1_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG1_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG1_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG1_HDMI_STATUS
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG1_HDMI_AUDIO_PACKET_CONTROL
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG1_HDMI_ACR_PACKET_CONTROL
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG1_HDMI_VBI_PACKET_CONTROL
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG1_HDMI_INFOFRAME_CONTROL0
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG1_HDMI_INFOFRAME_CONTROL1
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG1_HDMI_GC
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG1_HDMI_DB_CONTROL
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG1_HDMI_ACR_32_0
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_32_1
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_44_0
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_44_1
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_48_0
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_48_1
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_STATUS_0
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_STATUS_1
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG1_AFMT_CNTL
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG1_DIG_BE_CLK_CNTL
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG1_DIG_BE_CNTL
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG1_DIG_BE_EN_CNTL
+#define DIG1_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG1_TMDS_CNTL
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG1_TMDS_CONTROL_CHAR
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG1_TMDS_CONTROL0_FEEDBACK
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG1_TMDS_STEREOSYNC_CTL_SEL
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG1_TMDS_CTL_BITS
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG1_TMDS_DCBALANCER_CONTROL
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG1_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG1_TMDS_CTL0_1_GEN_CNTL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG1_TMDS_CTL2_3_GEN_CNTL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG1_DIG_DEBUG
+//DIG1_DIG_VERSION
+#define DIG1_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG1_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp2_dispdec
+//DP2_DP_LINK_CNTL
+#define DP2_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP2_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP2_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP2_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP2_DP_PIXEL_FORMAT
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP2_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP2_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP2_DP_MSA_COLORIMETRY
+#define DP2_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP2_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP2_DP_CONFIG
+#define DP2_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP2_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP2_DP_VID_STREAM_CNTL
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP2_DP_STEER_FIFO
+#define DP2_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP2_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP2_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP2_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP2_DP_MSA_MISC
+#define DP2_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP2_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP2_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP2_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP2_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP2_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP2_DP_DPHY_INTERNAL_CTRL
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP2_DP_VID_TIMING
+#define DP2_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP2_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP2_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP2_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP2_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP2_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP2_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP2_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP2_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP2_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP2_DP_VID_N
+#define DP2_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP2_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP2_DP_VID_M
+#define DP2_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP2_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP2_DP_LINK_FRAMING_CNTL
+#define DP2_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP2_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP2_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP2_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP2_DP_HBR2_EYE_PATTERN
+#define DP2_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP2_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP2_DP_VID_MSA_VBID
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP2_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP2_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP2_DP_VID_INTERRUPT_CNTL
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP2_DP_DPHY_CNTL
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP2_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP2_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP2_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP2_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP2_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP2_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP2_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP2_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP2_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP2_DP_DPHY_SYM0
+#define DP2_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP2_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP2_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP2_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP2_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP2_DP_DPHY_SYM1
+#define DP2_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP2_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP2_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP2_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP2_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP2_DP_DPHY_SYM2
+#define DP2_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP2_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP2_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP2_DP_DPHY_8B10B_CNTL
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP2_DP_DPHY_PRBS_CNTL
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP2_DP_DPHY_SCRAM_CNTL
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP2_DP_DPHY_CRC_CONTROL0
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP2_DP_DPHY_CRC_CONTROL1
+#define DP2_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP2_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP2_DP_DPHY_CRC_RESULT0
+#define DP2_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP2_DP_DPHY_CRC_RESULT1
+#define DP2_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP2_DP_DPHY_CRC_STATUS
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP2_DP_DPHY_FAST_TRAINING
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP2_DP_DPHY_FAST_TRAINING_STATUS
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP2_DP_DPHY_CRC_RESULT2
+#define DP2_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP2_DP_DPHY_CRC_RESULT3
+#define DP2_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP2_DP_SEC_CNTL
+#define DP2_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP2_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP2_DP_SEC_CNTL1
+#define DP2_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING1
+#define DP2_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP2_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING2
+#define DP2_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP2_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING3
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING4
+#define DP2_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP2_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP2_DP_SEC_AUD_N
+#define DP2_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP2_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_N_READBACK
+#define DP2_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP2_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_M
+#define DP2_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP2_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_M_READBACK
+#define DP2_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP2_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP2_DP_SEC_TIMESTAMP
+#define DP2_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP2_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP2_DP_SEC_PACKET_CNTL
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP2_DP_MSE_RATE_CNTL
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP2_DP_MSE_RATE_UPDATE
+#define DP2_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP2_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP2_DP_MSE_SAT0
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP2_DP_MSE_SAT1
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP2_DP_MSE_SAT2
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP2_DP_MSE_SAT_UPDATE
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP2_DP_MSE_LINK_TIMING
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP2_DP_MSE_MISC_CNTL
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP2_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP2_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP2_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP2_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP2_DP_MSE_SAT0_STATUS
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP2_DP_MSE_SAT1_STATUS
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP2_DP_MSE_SAT2_STATUS
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP2_DP_DPIA_SPARE
+#define DP2_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP2_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP2_DP_MSA_TIMING_PARAM1
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP2_DP_MSA_TIMING_PARAM2
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP2_DP_MSA_TIMING_PARAM3
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP2_DP_MSA_TIMING_PARAM4
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP2_DP_MSO_CNTL
+#define DP2_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP2_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP2_DP_MSO_CNTL1
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP2_DP_DSC_CNTL
+#define DP2_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP2_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP2_DP_SEC_CNTL2
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP2_DP_SEC_CNTL3
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL4
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL5
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL6
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP2_DP_SEC_CNTL7
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP2_DP_DB_CNTL
+#define DP2_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP2_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP2_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP2_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP2_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP2_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP2_DP_MSA_VBID_MISC
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_METADATA_TRANSMISSION
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP2_DP_ALPM_CNTL
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP2_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP2_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP2_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP2_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP2_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP2_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP8_CNTL
+#define DP2_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP9_CNTL
+#define DP2_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP10_CNTL
+#define DP2_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP11_CNTL
+#define DP2_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP_EN_DB_STATUS
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP2_DP_AUXLESS_ALPM_CNTL1
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP2_DP_AUXLESS_ALPM_CNTL2
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP2_DP_AUXLESS_ALPM_CNTL3
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_AUXLESS_ALPM_CNTL4
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP2_DP_AUXLESS_ALPM_CNTL5
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP2_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP2_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP2_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP2_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP2_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP2_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig2_dispdec
+//DIG2_DIG_FE_CNTL
+#define DIG2_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG2_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG2_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG2_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG2_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG2_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG2_DIG_FE_CLK_CNTL
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG2_DIG_FE_EN_CNTL
+#define DIG2_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG2_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG2_DIG_OUTPUT_CRC_CNTL
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG2_DIG_OUTPUT_CRC_RESULT
+#define DIG2_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG2_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG2_DIG_CLOCK_PATTERN
+#define DIG2_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG2_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG2_DIG_TEST_PATTERN
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG2_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG2_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG2_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG2_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG2_DIG_RANDOM_PATTERN_SEED
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG2_DIG_FIFO_CTRL0
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG2_DIG_FIFO_CTRL1
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG2_HDMI_METADATA_PACKET_CONTROL
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_CONTROL
+#define DIG2_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG2_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG2_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG2_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG2_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG2_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG2_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG2_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG2_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG2_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG2_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG2_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG2_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG2_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG2_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG2_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG2_HDMI_STATUS
+#define DIG2_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG2_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG2_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG2_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG2_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG2_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG2_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG2_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG2_HDMI_AUDIO_PACKET_CONTROL
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG2_HDMI_ACR_PACKET_CONTROL
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG2_HDMI_VBI_PACKET_CONTROL
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG2_HDMI_INFOFRAME_CONTROL0
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG2_HDMI_INFOFRAME_CONTROL1
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG2_HDMI_GC
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG2_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG2_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG2_HDMI_DB_CONTROL
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG2_HDMI_ACR_32_0
+#define DIG2_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG2_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_32_1
+#define DIG2_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG2_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_44_0
+#define DIG2_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG2_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_44_1
+#define DIG2_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG2_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_48_0
+#define DIG2_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG2_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_48_1
+#define DIG2_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG2_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_STATUS_0
+#define DIG2_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG2_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_STATUS_1
+#define DIG2_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG2_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG2_AFMT_CNTL
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG2_DIG_BE_CLK_CNTL
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG2_DIG_BE_CNTL
+#define DIG2_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG2_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG2_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG2_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG2_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG2_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG2_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG2_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG2_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG2_DIG_BE_EN_CNTL
+#define DIG2_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG2_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG2_TMDS_CNTL
+#define DIG2_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG2_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG2_TMDS_CONTROL_CHAR
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG2_TMDS_CONTROL0_FEEDBACK
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG2_TMDS_STEREOSYNC_CTL_SEL
+#define DIG2_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG2_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG2_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG2_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG2_TMDS_CTL_BITS
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG2_TMDS_DCBALANCER_CONTROL
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG2_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG2_TMDS_CTL0_1_GEN_CNTL
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG2_TMDS_CTL2_3_GEN_CNTL
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG2_DIG_VERSION
+#define DIG2_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG2_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp3_dispdec
+//DP3_DP_LINK_CNTL
+#define DP3_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP3_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP3_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP3_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP3_DP_PIXEL_FORMAT
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP3_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP3_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP3_DP_MSA_COLORIMETRY
+#define DP3_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP3_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP3_DP_CONFIG
+#define DP3_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP3_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP3_DP_VID_STREAM_CNTL
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP3_DP_STEER_FIFO
+#define DP3_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP3_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP3_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP3_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP3_DP_MSA_MISC
+#define DP3_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP3_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP3_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP3_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP3_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP3_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP3_DP_DPHY_INTERNAL_CTRL
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP3_DP_VID_TIMING
+#define DP3_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP3_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP3_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP3_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP3_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP3_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP3_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP3_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP3_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP3_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP3_DP_VID_N
+#define DP3_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP3_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP3_DP_VID_M
+#define DP3_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP3_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP3_DP_LINK_FRAMING_CNTL
+#define DP3_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP3_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP3_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP3_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP3_DP_HBR2_EYE_PATTERN
+#define DP3_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP3_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP3_DP_VID_MSA_VBID
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP3_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP3_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP3_DP_VID_INTERRUPT_CNTL
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP3_DP_DPHY_CNTL
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP3_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP3_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP3_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP3_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP3_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP3_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP3_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP3_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP3_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP3_DP_DPHY_SYM0
+#define DP3_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP3_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP3_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP3_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP3_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP3_DP_DPHY_SYM1
+#define DP3_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP3_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP3_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP3_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP3_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP3_DP_DPHY_SYM2
+#define DP3_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP3_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP3_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP3_DP_DPHY_8B10B_CNTL
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP3_DP_DPHY_PRBS_CNTL
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP3_DP_DPHY_SCRAM_CNTL
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP3_DP_DPHY_CRC_CONTROL0
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP3_DP_DPHY_CRC_CONTROL1
+#define DP3_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP3_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP3_DP_DPHY_CRC_RESULT0
+#define DP3_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP3_DP_DPHY_CRC_RESULT1
+#define DP3_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP3_DP_DPHY_CRC_STATUS
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP3_DP_DPHY_FAST_TRAINING
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP3_DP_DPHY_FAST_TRAINING_STATUS
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP3_DP_DPHY_CRC_RESULT2
+#define DP3_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP3_DP_DPHY_CRC_RESULT3
+#define DP3_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP3_DP_SEC_CNTL
+#define DP3_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP3_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP3_DP_SEC_CNTL1
+#define DP3_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING1
+#define DP3_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP3_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING2
+#define DP3_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP3_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING3
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING4
+#define DP3_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP3_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP3_DP_SEC_AUD_N
+#define DP3_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP3_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_N_READBACK
+#define DP3_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP3_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_M
+#define DP3_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP3_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_M_READBACK
+#define DP3_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP3_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP3_DP_SEC_TIMESTAMP
+#define DP3_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP3_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP3_DP_SEC_PACKET_CNTL
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP3_DP_MSE_RATE_CNTL
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP3_DP_MSE_RATE_UPDATE
+#define DP3_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP3_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP3_DP_MSE_SAT0
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP3_DP_MSE_SAT1
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP3_DP_MSE_SAT2
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP3_DP_MSE_SAT_UPDATE
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP3_DP_MSE_LINK_TIMING
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP3_DP_MSE_MISC_CNTL
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP3_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP3_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP3_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP3_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP3_DP_MSE_SAT0_STATUS
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP3_DP_MSE_SAT1_STATUS
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP3_DP_MSE_SAT2_STATUS
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP3_DP_DPIA_SPARE
+#define DP3_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP3_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP3_DP_MSA_TIMING_PARAM1
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP3_DP_MSA_TIMING_PARAM2
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP3_DP_MSA_TIMING_PARAM3
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP3_DP_MSA_TIMING_PARAM4
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP3_DP_MSO_CNTL
+#define DP3_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP3_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP3_DP_MSO_CNTL1
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP3_DP_DSC_CNTL
+#define DP3_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP3_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP3_DP_SEC_CNTL2
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP3_DP_SEC_CNTL3
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL4
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL5
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL6
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP3_DP_SEC_CNTL7
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP3_DP_DB_CNTL
+#define DP3_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP3_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP3_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP3_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP3_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP3_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP3_DP_MSA_VBID_MISC
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_METADATA_TRANSMISSION
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP3_DP_ALPM_CNTL
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP3_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP3_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP3_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP3_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP3_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP3_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP8_CNTL
+#define DP3_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP9_CNTL
+#define DP3_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP10_CNTL
+#define DP3_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP11_CNTL
+#define DP3_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP_EN_DB_STATUS
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP3_DP_AUXLESS_ALPM_CNTL1
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP3_DP_AUXLESS_ALPM_CNTL2
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP3_DP_AUXLESS_ALPM_CNTL3
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_AUXLESS_ALPM_CNTL4
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP3_DP_AUXLESS_ALPM_CNTL5
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP3_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP3_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP3_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP3_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP3_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP3_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig3_dispdec
+//DIG3_DIG_FE_CNTL
+#define DIG3_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG3_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG3_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG3_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG3_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG3_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG3_DIG_FE_CLK_CNTL
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG3_DIG_FE_EN_CNTL
+#define DIG3_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG3_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG3_DIG_OUTPUT_CRC_CNTL
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG3_DIG_OUTPUT_CRC_RESULT
+#define DIG3_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG3_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG3_DIG_CLOCK_PATTERN
+#define DIG3_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG3_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG3_DIG_TEST_PATTERN
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG3_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG3_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG3_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG3_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG3_DIG_RANDOM_PATTERN_SEED
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG3_DIG_FIFO_CTRL0
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG3_DIG_FIFO_CTRL1
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG3_HDMI_METADATA_PACKET_CONTROL
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_CONTROL
+#define DIG3_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG3_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG3_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG3_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG3_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG3_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG3_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG3_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG3_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG3_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG3_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG3_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG3_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG3_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG3_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG3_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG3_HDMI_STATUS
+#define DIG3_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG3_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG3_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG3_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG3_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG3_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG3_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG3_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG3_HDMI_AUDIO_PACKET_CONTROL
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG3_HDMI_ACR_PACKET_CONTROL
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG3_HDMI_VBI_PACKET_CONTROL
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG3_HDMI_INFOFRAME_CONTROL0
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG3_HDMI_INFOFRAME_CONTROL1
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG3_HDMI_GC
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG3_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG3_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG3_HDMI_DB_CONTROL
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG3_HDMI_ACR_32_0
+#define DIG3_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG3_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_32_1
+#define DIG3_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG3_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_44_0
+#define DIG3_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG3_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_44_1
+#define DIG3_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG3_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_48_0
+#define DIG3_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG3_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_48_1
+#define DIG3_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG3_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_STATUS_0
+#define DIG3_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG3_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_STATUS_1
+#define DIG3_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG3_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG3_AFMT_CNTL
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG3_DIG_BE_CLK_CNTL
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG3_DIG_BE_CNTL
+#define DIG3_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG3_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG3_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG3_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG3_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG3_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG3_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG3_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG3_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG3_DIG_BE_EN_CNTL
+#define DIG3_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG3_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG3_TMDS_CNTL
+#define DIG3_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG3_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG3_TMDS_CONTROL_CHAR
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG3_TMDS_CONTROL0_FEEDBACK
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG3_TMDS_STEREOSYNC_CTL_SEL
+#define DIG3_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG3_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG3_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG3_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG3_TMDS_CTL_BITS
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG3_TMDS_DCBALANCER_CONTROL
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG3_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG3_TMDS_CTL0_1_GEN_CNTL
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG3_TMDS_CTL2_3_GEN_CNTL
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG3_DIG_VERSION
+#define DIG3_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG3_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp4_dispdec
+//DP4_DP_LINK_CNTL
+#define DP4_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP4_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP4_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP4_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP4_DP_PIXEL_FORMAT
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP4_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP4_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP4_DP_MSA_COLORIMETRY
+#define DP4_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP4_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP4_DP_CONFIG
+#define DP4_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP4_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP4_DP_VID_STREAM_CNTL
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP4_DP_STEER_FIFO
+#define DP4_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP4_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP4_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP4_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP4_DP_MSA_MISC
+#define DP4_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP4_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP4_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP4_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP4_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP4_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP4_DP_DPHY_INTERNAL_CTRL
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP4_DP_VID_TIMING
+#define DP4_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP4_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP4_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP4_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP4_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP4_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP4_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP4_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP4_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP4_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP4_DP_VID_N
+#define DP4_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP4_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP4_DP_VID_M
+#define DP4_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP4_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP4_DP_LINK_FRAMING_CNTL
+#define DP4_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP4_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP4_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP4_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP4_DP_HBR2_EYE_PATTERN
+#define DP4_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP4_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP4_DP_VID_MSA_VBID
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP4_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP4_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP4_DP_VID_INTERRUPT_CNTL
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP4_DP_DPHY_CNTL
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP4_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP4_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP4_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP4_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP4_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP4_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP4_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP4_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP4_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP4_DP_DPHY_SYM0
+#define DP4_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP4_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP4_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP4_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP4_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP4_DP_DPHY_SYM1
+#define DP4_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP4_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP4_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP4_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP4_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP4_DP_DPHY_SYM2
+#define DP4_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP4_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP4_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP4_DP_DPHY_8B10B_CNTL
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP4_DP_DPHY_PRBS_CNTL
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP4_DP_DPHY_SCRAM_CNTL
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP4_DP_DPHY_CRC_CONTROL0
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP4_DP_DPHY_CRC_CONTROL1
+#define DP4_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP4_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP4_DP_DPHY_CRC_RESULT0
+#define DP4_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP4_DP_DPHY_CRC_RESULT1
+#define DP4_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP4_DP_DPHY_CRC_STATUS
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP4_DP_DPHY_FAST_TRAINING
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP4_DP_DPHY_FAST_TRAINING_STATUS
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP4_DP_DPHY_CRC_RESULT2
+#define DP4_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP4_DP_DPHY_CRC_RESULT3
+#define DP4_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP4_DP_SEC_CNTL
+#define DP4_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP4_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP4_DP_SEC_CNTL1
+#define DP4_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING1
+#define DP4_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP4_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING2
+#define DP4_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP4_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING3
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING4
+#define DP4_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP4_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP4_DP_SEC_AUD_N
+#define DP4_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP4_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_N_READBACK
+#define DP4_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP4_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_M
+#define DP4_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP4_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_M_READBACK
+#define DP4_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP4_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP4_DP_SEC_TIMESTAMP
+#define DP4_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP4_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP4_DP_SEC_PACKET_CNTL
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP4_DP_MSE_RATE_CNTL
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP4_DP_MSE_RATE_UPDATE
+#define DP4_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP4_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP4_DP_MSE_SAT0
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP4_DP_MSE_SAT1
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP4_DP_MSE_SAT2
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP4_DP_MSE_SAT_UPDATE
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP4_DP_MSE_LINK_TIMING
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP4_DP_MSE_MISC_CNTL
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP4_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP4_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP4_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP4_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP4_DP_MSE_SAT0_STATUS
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP4_DP_MSE_SAT1_STATUS
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP4_DP_MSE_SAT2_STATUS
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP4_DP_DPIA_SPARE
+#define DP4_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP4_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP4_DP_MSA_TIMING_PARAM1
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP4_DP_MSA_TIMING_PARAM2
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP4_DP_MSA_TIMING_PARAM3
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP4_DP_MSA_TIMING_PARAM4
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP4_DP_MSO_CNTL
+#define DP4_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP4_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP4_DP_MSO_CNTL1
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP4_DP_DSC_CNTL
+#define DP4_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP4_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP4_DP_SEC_CNTL2
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP4_DP_SEC_CNTL3
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL4
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL5
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL6
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP4_DP_SEC_CNTL7
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP4_DP_DB_CNTL
+#define DP4_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP4_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP4_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP4_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP4_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP4_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP4_DP_MSA_VBID_MISC
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_METADATA_TRANSMISSION
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP4_DP_ALPM_CNTL
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP4_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP4_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP4_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP4_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP4_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP4_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP8_CNTL
+#define DP4_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP9_CNTL
+#define DP4_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP10_CNTL
+#define DP4_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP11_CNTL
+#define DP4_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP_EN_DB_STATUS
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP4_DP_AUXLESS_ALPM_CNTL1
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP4_DP_AUXLESS_ALPM_CNTL2
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP4_DP_AUXLESS_ALPM_CNTL3
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_AUXLESS_ALPM_CNTL4
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP4_DP_AUXLESS_ALPM_CNTL5
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP4_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP4_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP4_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP4_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP4_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP4_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig4_dispdec
+//DIG4_DIG_FE_CNTL
+#define DIG4_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG4_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG4_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG4_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG4_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG4_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG4_DIG_FE_CLK_CNTL
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG4_DIG_FE_EN_CNTL
+#define DIG4_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG4_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG4_DIG_OUTPUT_CRC_CNTL
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG4_DIG_OUTPUT_CRC_RESULT
+#define DIG4_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG4_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG4_DIG_CLOCK_PATTERN
+#define DIG4_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG4_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG4_DIG_TEST_PATTERN
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG4_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG4_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG4_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG4_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG4_DIG_RANDOM_PATTERN_SEED
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG4_DIG_FIFO_CTRL0
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG4_DIG_FIFO_CTRL1
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG4_HDMI_METADATA_PACKET_CONTROL
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_CONTROL
+#define DIG4_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG4_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG4_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG4_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG4_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG4_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG4_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG4_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG4_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG4_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG4_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG4_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG4_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG4_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG4_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG4_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG4_HDMI_STATUS
+#define DIG4_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG4_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG4_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG4_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG4_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG4_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG4_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG4_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG4_HDMI_AUDIO_PACKET_CONTROL
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG4_HDMI_ACR_PACKET_CONTROL
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG4_HDMI_VBI_PACKET_CONTROL
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG4_HDMI_INFOFRAME_CONTROL0
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG4_HDMI_INFOFRAME_CONTROL1
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG4_HDMI_GC
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG4_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG4_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG4_HDMI_DB_CONTROL
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG4_HDMI_ACR_32_0
+#define DIG4_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG4_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_32_1
+#define DIG4_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG4_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_44_0
+#define DIG4_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG4_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_44_1
+#define DIG4_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG4_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_48_0
+#define DIG4_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG4_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_48_1
+#define DIG4_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG4_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_STATUS_0
+#define DIG4_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG4_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_STATUS_1
+#define DIG4_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG4_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG4_AFMT_CNTL
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG4_DIG_BE_CLK_CNTL
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG4_DIG_BE_CNTL
+#define DIG4_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG4_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG4_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG4_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG4_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG4_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG4_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG4_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG4_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG4_DIG_BE_EN_CNTL
+#define DIG4_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG4_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG4_TMDS_CNTL
+#define DIG4_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG4_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG4_TMDS_CONTROL_CHAR
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG4_TMDS_CONTROL0_FEEDBACK
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG4_TMDS_STEREOSYNC_CTL_SEL
+#define DIG4_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG4_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG4_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG4_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG4_TMDS_CTL_BITS
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG4_TMDS_DCBALANCER_CONTROL
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG4_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG4_TMDS_CTL0_1_GEN_CNTL
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG4_TMDS_CTL2_3_GEN_CNTL
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG4_DIG_DEBUG
+//DIG4_DIG_VERSION
+#define DIG4_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG4_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dig0_afmt_afmt_dispdec
+//AFMT0_AFMT_ACP
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT0_AFMT_VBI_PACKET_CONTROL
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT0_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT0_AFMT_AUDIO_INFO0
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT0_AFMT_AUDIO_INFO1
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT0_AFMT_60958_0
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT0_AFMT_60958_1
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT0_AFMT_AUDIO_CRC_CONTROL
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT0_AFMT_RAMP_CONTROL0
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT0_AFMT_RAMP_CONTROL1
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT0_AFMT_RAMP_CONTROL2
+#define AFMT0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT0_AFMT_RAMP_CONTROL3
+#define AFMT0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT0_AFMT_60958_2
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT0_AFMT_AUDIO_CRC_RESULT
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT0_AFMT_STATUS
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT0_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT0_AFMT_INFOFRAME_CONTROL0
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT0_AFMT_AUDIO_SRC_CONTROL
+#define AFMT0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT0_AFMT_MEM_PWR
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig1_afmt_afmt_dispdec
+//AFMT1_AFMT_ACP
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT1_AFMT_VBI_PACKET_CONTROL
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT1_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT1_AFMT_AUDIO_INFO0
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT1_AFMT_AUDIO_INFO1
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT1_AFMT_60958_0
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT1_AFMT_60958_1
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT1_AFMT_AUDIO_CRC_CONTROL
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT1_AFMT_RAMP_CONTROL0
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT1_AFMT_RAMP_CONTROL1
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT1_AFMT_RAMP_CONTROL2
+#define AFMT1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT1_AFMT_RAMP_CONTROL3
+#define AFMT1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT1_AFMT_60958_2
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT1_AFMT_AUDIO_CRC_RESULT
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT1_AFMT_STATUS
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT1_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT1_AFMT_INFOFRAME_CONTROL0
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT1_AFMT_AUDIO_SRC_CONTROL
+#define AFMT1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT1_AFMT_MEM_PWR
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig2_afmt_afmt_dispdec
+//AFMT2_AFMT_ACP
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT2_AFMT_VBI_PACKET_CONTROL
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT2_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT2_AFMT_AUDIO_INFO0
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT2_AFMT_AUDIO_INFO1
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT2_AFMT_60958_0
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT2_AFMT_60958_1
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT2_AFMT_AUDIO_CRC_CONTROL
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT2_AFMT_RAMP_CONTROL0
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT2_AFMT_RAMP_CONTROL1
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT2_AFMT_RAMP_CONTROL2
+#define AFMT2_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT2_AFMT_RAMP_CONTROL3
+#define AFMT2_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT2_AFMT_60958_2
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT2_AFMT_AUDIO_CRC_RESULT
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT2_AFMT_STATUS
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT2_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT2_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT2_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT2_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT2_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT2_AFMT_INFOFRAME_CONTROL0
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT2_AFMT_AUDIO_SRC_CONTROL
+#define AFMT2_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT2_AFMT_MEM_PWR
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig3_afmt_afmt_dispdec
+//AFMT3_AFMT_ACP
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT3_AFMT_VBI_PACKET_CONTROL
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT3_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT3_AFMT_AUDIO_INFO0
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT3_AFMT_AUDIO_INFO1
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT3_AFMT_60958_0
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT3_AFMT_60958_1
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT3_AFMT_AUDIO_CRC_CONTROL
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT3_AFMT_RAMP_CONTROL0
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT3_AFMT_RAMP_CONTROL1
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT3_AFMT_RAMP_CONTROL2
+#define AFMT3_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT3_AFMT_RAMP_CONTROL3
+#define AFMT3_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT3_AFMT_60958_2
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT3_AFMT_AUDIO_CRC_RESULT
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT3_AFMT_STATUS
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT3_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT3_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT3_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT3_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT3_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT3_AFMT_INFOFRAME_CONTROL0
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT3_AFMT_AUDIO_SRC_CONTROL
+#define AFMT3_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT3_AFMT_MEM_PWR
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig4_afmt_afmt_dispdec
+//AFMT4_AFMT_ACP
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT4_AFMT_VBI_PACKET_CONTROL
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT4_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT4_AFMT_AUDIO_INFO0
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT4_AFMT_AUDIO_INFO1
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT4_AFMT_60958_0
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT4_AFMT_60958_1
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT4_AFMT_AUDIO_CRC_CONTROL
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT4_AFMT_RAMP_CONTROL0
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT4_AFMT_RAMP_CONTROL1
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT4_AFMT_RAMP_CONTROL2
+#define AFMT4_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT4_AFMT_RAMP_CONTROL3
+#define AFMT4_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT4_AFMT_60958_2
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT4_AFMT_AUDIO_CRC_RESULT
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT4_AFMT_STATUS
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT4_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT4_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT4_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT4_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT4_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT4_AFMT_INFOFRAME_CONTROL0
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT4_AFMT_AUDIO_SRC_CONTROL
+#define AFMT4_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT4_AFMT_MEM_PWR
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig0_dme_dme_dispdec
+//DME0_DME_CONTROL
+#define DME0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME0_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME0_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME0_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME0_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME0_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME0_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME0_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME0_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME0_DME_MEMORY_CONTROL
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig0_vpg_vpg_dispdec
+//VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG0_VPG_GENERIC_PACKET_DATA
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG0_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG0_VPG_GENERIC_STATUS
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG0_VPG_MEM_PWR
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG0_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG0_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG0_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG0_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG0_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG0_VPG_ISRC1_2_DATA
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG0_VPG_MPEG_INFO0
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG0_VPG_MPEG_INFO1
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dig1_dme_dme_dispdec
+//DME1_DME_CONTROL
+#define DME1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME1_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME1_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME1_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME1_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME1_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME1_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME1_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME1_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME1_DME_MEMORY_CONTROL
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig1_vpg_vpg_dispdec
+//VPG1_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG1_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG1_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG1_VPG_GENERIC_PACKET_DATA
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG1_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG1_VPG_GENERIC_STATUS
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG1_VPG_MEM_PWR
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG1_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG1_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG1_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG1_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG1_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG1_VPG_ISRC1_2_DATA
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG1_VPG_MPEG_INFO0
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG1_VPG_MPEG_INFO1
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dig2_dme_dme_dispdec
+//DME2_DME_CONTROL
+#define DME2_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME2_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME2_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME2_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME2_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME2_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME2_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME2_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME2_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME2_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME2_DME_MEMORY_CONTROL
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig2_vpg_vpg_dispdec
+//VPG2_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG2_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG2_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG2_VPG_GENERIC_PACKET_DATA
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG2_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG2_VPG_GENERIC_STATUS
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG2_VPG_MEM_PWR
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG2_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG2_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG2_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG2_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG2_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG2_VPG_ISRC1_2_DATA
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG2_VPG_MPEG_INFO0
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG2_VPG_MPEG_INFO1
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dig3_dme_dme_dispdec
+//DME3_DME_CONTROL
+#define DME3_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME3_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME3_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME3_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME3_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME3_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME3_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME3_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME3_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME3_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME3_DME_MEMORY_CONTROL
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig3_vpg_vpg_dispdec
+//VPG3_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG3_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG3_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG3_VPG_GENERIC_PACKET_DATA
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG3_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG3_VPG_GENERIC_STATUS
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG3_VPG_MEM_PWR
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG3_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG3_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG3_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG3_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG3_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG3_VPG_ISRC1_2_DATA
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG3_VPG_MPEG_INFO0
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG3_VPG_MPEG_INFO1
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dig4_dme_dme_dispdec
+//DME4_DME_CONTROL
+#define DME4_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME4_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME4_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME4_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME4_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME4_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME4_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME4_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME4_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME4_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME4_DME_MEMORY_CONTROL
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig4_vpg_vpg_dispdec
+//VPG4_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG4_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG4_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG4_VPG_GENERIC_PACKET_DATA
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG4_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG4_VPG_GENERIC_STATUS
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG4_VPG_MEM_PWR
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG4_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG4_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG4_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG4_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG4_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG4_VPG_ISRC1_2_DATA
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG4_VPG_MPEG_INFO0
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG4_VPG_MPEG_INFO1
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+//DP_AUX0_AUX_CONTROL
+#define DP_AUX0_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX0_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX0_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX0_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX0_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX0_AUX_SW_CONTROL
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX0_AUX_ARB_CONTROL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX0_AUX_INTERRUPT_CONTROL
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX0_AUX_SW_STATUS
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX0_AUX_LS_STATUS
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX0_AUX_SW_DATA
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX0_AUX_LS_DATA
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX0_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_TX_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL1
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX0_AUX_DPHY_TX_STATUS
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_RX_STATUS
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX0_AUX_GTC_SYNC_CONTROL
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX0_AUX_GTC_SYNC_STATUS
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX0_AUX_PHY_WAKE_CNTL
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+//DP_AUX1_AUX_CONTROL
+#define DP_AUX1_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX1_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX1_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX1_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX1_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX1_AUX_SW_CONTROL
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX1_AUX_ARB_CONTROL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX1_AUX_INTERRUPT_CONTROL
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX1_AUX_SW_STATUS
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX1_AUX_LS_STATUS
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX1_AUX_SW_DATA
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX1_AUX_LS_DATA
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX1_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_TX_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL1
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX1_AUX_DPHY_TX_STATUS
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_RX_STATUS
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX1_AUX_GTC_SYNC_CONTROL
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX1_AUX_GTC_SYNC_STATUS
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX1_AUX_PHY_WAKE_CNTL
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux2_dispdec
+//DP_AUX2_AUX_CONTROL
+#define DP_AUX2_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX2_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX2_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX2_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX2_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX2_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX2_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX2_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX2_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX2_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX2_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX2_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX2_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX2_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX2_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX2_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX2_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX2_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX2_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX2_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX2_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX2_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX2_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX2_AUX_SW_CONTROL
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX2_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX2_AUX_ARB_CONTROL
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX2_AUX_INTERRUPT_CONTROL
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX2_AUX_SW_STATUS
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX2_AUX_LS_STATUS
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX2_AUX_SW_DATA
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX2_AUX_LS_DATA
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX2_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX2_AUX_DPHY_TX_CONTROL
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX2_AUX_DPHY_RX_CONTROL0
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX2_AUX_DPHY_RX_CONTROL1
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX2_AUX_DPHY_TX_STATUS
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX2_AUX_DPHY_RX_STATUS
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX2_AUX_GTC_SYNC_CONTROL
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX2_AUX_GTC_SYNC_STATUS
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX2_AUX_PHY_WAKE_CNTL
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux3_dispdec
+//DP_AUX3_AUX_CONTROL
+#define DP_AUX3_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX3_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX3_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX3_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX3_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX3_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX3_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX3_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX3_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX3_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX3_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX3_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX3_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX3_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX3_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX3_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX3_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX3_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX3_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX3_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX3_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX3_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX3_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX3_AUX_SW_CONTROL
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX3_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX3_AUX_ARB_CONTROL
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX3_AUX_INTERRUPT_CONTROL
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX3_AUX_SW_STATUS
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX3_AUX_LS_STATUS
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX3_AUX_SW_DATA
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX3_AUX_LS_DATA
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX3_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX3_AUX_DPHY_TX_CONTROL
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX3_AUX_DPHY_RX_CONTROL0
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX3_AUX_DPHY_RX_CONTROL1
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX3_AUX_DPHY_TX_STATUS
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX3_AUX_DPHY_RX_STATUS
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX3_AUX_GTC_SYNC_CONTROL
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX3_AUX_GTC_SYNC_STATUS
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX3_AUX_PHY_WAKE_CNTL
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux4_dispdec
+//DP_AUX4_AUX_CONTROL
+#define DP_AUX4_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX4_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX4_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX4_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX4_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX4_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX4_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX4_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX4_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX4_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX4_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX4_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX4_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX4_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX4_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX4_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX4_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX4_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX4_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX4_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX4_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX4_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX4_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX4_AUX_SW_CONTROL
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX4_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX4_AUX_ARB_CONTROL
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX4_AUX_INTERRUPT_CONTROL
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX4_AUX_SW_STATUS
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX4_AUX_LS_STATUS
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX4_AUX_SW_DATA
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX4_AUX_LS_DATA
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX4_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX4_AUX_DPHY_TX_CONTROL
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX4_AUX_DPHY_RX_CONTROL0
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX4_AUX_DPHY_RX_CONTROL1
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX4_AUX_DPHY_TX_STATUS
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX4_AUX_DPHY_RX_STATUS
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX4_AUX_GTC_SYNC_CONTROL
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX4_AUX_GTC_SYNC_STATUS
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX4_AUX_PHY_WAKE_CNTL
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux0_dispdec
+//DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux1_dispdec
+//DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux2_dispdec
+//DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux3_dispdec
+//DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+//DC_I2C_CONTROL
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L
+//DC_I2C_ARBITRATION
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L
+//DC_I2C_INTERRUPT_CONTROL
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT__SHIFT 0xc
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK__SHIFT 0xd
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK__SHIFT 0xe
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT__SHIFT 0x10
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK__SHIFT 0x11
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK__SHIFT 0x12
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT__SHIFT 0x14
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK__SHIFT 0x15
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK__SHIFT 0x16
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT__SHIFT 0x18
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK__SHIFT 0x19
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK__SHIFT 0x1a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT__SHIFT 0x1b
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK__SHIFT 0x1c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK__SHIFT 0x1d
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT_MASK 0x00001000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK_MASK 0x00002000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK_MASK 0x00004000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT_MASK 0x00010000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK_MASK 0x00020000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK_MASK 0x00040000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT_MASK 0x00100000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK_MASK 0x00200000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK_MASK 0x00400000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT_MASK 0x01000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK_MASK 0x02000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK_MASK 0x04000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT_MASK 0x08000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK_MASK 0x10000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK_MASK 0x20000000L
+//DC_I2C_SW_STATUS
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L
+//DC_I2C_DDC1_HW_STATUS
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC2_HW_STATUS
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC3_HW_STATUS
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC4_HW_STATUS
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC5_HW_STATUS
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC1_SPEED
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC1_SETUP
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC2_SPEED
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC2_SETUP
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC3_SPEED
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC3_SETUP
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC4_SPEED
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC4_SETUP
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC5_SPEED
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC5_SETUP
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_TRANSACTION0
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION1
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION2
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION3
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x03FF0000L
+//DC_I2C_DATA
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000FF00L
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x03FF0000L
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L
+//DC_I2C_EDID_DETECT_CTRL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME__SHIFT 0x0
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID__SHIFT 0x14
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET__SHIFT 0x1c
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME_MASK 0x0000FFFFL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID_MASK 0x00F00000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET_MASK 0x10000000L
+//DC_I2C_READ_REQUEST_INTERRUPT
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED__SHIFT 0x0
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT__SHIFT 0x1
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK__SHIFT 0x2
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK__SHIFT 0x3
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED__SHIFT 0x4
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT__SHIFT 0x5
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK__SHIFT 0x6
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK__SHIFT 0x7
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED__SHIFT 0x8
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT__SHIFT 0x9
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK__SHIFT 0xa
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK__SHIFT 0xb
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED__SHIFT 0xc
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT__SHIFT 0xd
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK__SHIFT 0xe
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK__SHIFT 0xf
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED__SHIFT 0x10
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT__SHIFT 0x11
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK__SHIFT 0x12
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK__SHIFT 0x13
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED__SHIFT 0x14
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT__SHIFT 0x15
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK__SHIFT 0x16
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK__SHIFT 0x17
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED__SHIFT 0x18
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT__SHIFT 0x19
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK__SHIFT 0x1a
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK__SHIFT 0x1b
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE__SHIFT 0x1e
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE__SHIFT 0x1f
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED_MASK 0x00000001L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT_MASK 0x00000002L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK_MASK 0x00000004L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK_MASK 0x00000008L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED_MASK 0x00000010L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT_MASK 0x00000020L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK_MASK 0x00000040L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK_MASK 0x00000080L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED_MASK 0x00000100L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT_MASK 0x00000200L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK_MASK 0x00000400L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK_MASK 0x00000800L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED_MASK 0x00001000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT_MASK 0x00002000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK_MASK 0x00004000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK_MASK 0x00008000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED_MASK 0x00010000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT_MASK 0x00020000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK_MASK 0x00040000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK_MASK 0x00080000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED_MASK 0x00100000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT_MASK 0x00200000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK_MASK 0x00400000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK_MASK 0x00800000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED_MASK 0x01000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT_MASK 0x02000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK_MASK 0x04000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK_MASK 0x08000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE_MASK 0x40000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dio_dio_misc_dispdec
+//DIO_DCN_STATUS
+#define DIO_DCN_STATUS__DCN_ACTIVE__SHIFT 0x0
+#define DIO_DCN_STATUS__DCN_ACTIVE_MASK 0x00000001L
+//DIO_SCRATCH0
+#define DIO_SCRATCH0__DIO_SCRATCH0__SHIFT 0x0
+#define DIO_SCRATCH0__DIO_SCRATCH0_MASK 0xFFFFFFFFL
+//DIO_SCRATCH1
+#define DIO_SCRATCH1__DIO_SCRATCH1__SHIFT 0x0
+#define DIO_SCRATCH1__DIO_SCRATCH1_MASK 0xFFFFFFFFL
+//DIO_SCRATCH2
+#define DIO_SCRATCH2__DIO_SCRATCH2__SHIFT 0x0
+#define DIO_SCRATCH2__DIO_SCRATCH2_MASK 0xFFFFFFFFL
+//DIO_SCRATCH3
+#define DIO_SCRATCH3__DIO_SCRATCH3__SHIFT 0x0
+#define DIO_SCRATCH3__DIO_SCRATCH3_MASK 0xFFFFFFFFL
+//DIO_SCRATCH4
+#define DIO_SCRATCH4__DIO_SCRATCH4__SHIFT 0x0
+#define DIO_SCRATCH4__DIO_SCRATCH4_MASK 0xFFFFFFFFL
+//DIO_SCRATCH5
+#define DIO_SCRATCH5__DIO_SCRATCH5__SHIFT 0x0
+#define DIO_SCRATCH5__DIO_SCRATCH5_MASK 0xFFFFFFFFL
+//DIO_SCRATCH6
+#define DIO_SCRATCH6__DIO_SCRATCH6__SHIFT 0x0
+#define DIO_SCRATCH6__DIO_SCRATCH6_MASK 0xFFFFFFFFL
+//DIO_SCRATCH7
+#define DIO_SCRATCH7__DIO_SCRATCH7__SHIFT 0x0
+#define DIO_SCRATCH7__DIO_SCRATCH7_MASK 0xFFFFFFFFL
+//DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGA_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGB_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x1
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGC_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGD_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x3
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGE_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x4
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGF_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x5
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGG_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x6
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGA_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGB_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000002L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGC_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGD_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000008L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGE_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000010L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGF_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000020L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGG_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000040L
+//DIO_MEM_PWR_STATUS
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE__SHIFT 0x0
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE__SHIFT 0x3
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE__SHIFT 0x4
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE__SHIFT 0x5
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE__SHIFT 0x6
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE__SHIFT 0x7
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE__SHIFT 0x8
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE__SHIFT 0x9
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE_MASK 0x00000001L
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE_MASK 0x00000008L
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE_MASK 0x00000010L
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE_MASK 0x00000020L
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE_MASK 0x00000080L
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE_MASK 0x00000100L
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE_MASK 0x00000200L
+//DIO_MEM_PWR_CTRL
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS__SHIFT 0x1
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS__SHIFT 0x5
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE_MASK 0x00000001L
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS_MASK 0x00000002L
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS_MASK 0x00000080L
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS_MASK 0x00000400L
+//DIO_MEM_PWR_CTRL2
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE__SHIFT 0x18
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE__SHIFT 0x19
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE__SHIFT 0x1a
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE__SHIFT 0x1b
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE__SHIFT 0x1c
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE__SHIFT 0x1d
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE__SHIFT 0x1e
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE_MASK 0x01000000L
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE_MASK 0x02000000L
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE_MASK 0x04000000L
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE_MASK 0x08000000L
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE_MASK 0x10000000L
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE_MASK 0x20000000L
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE_MASK 0x40000000L
+//DIO_CLK_CNTL
+#define DIO_CLK_CNTL__DIO_TEST_CLK_SEL__SHIFT 0x0
+#define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS__SHIFT 0x9
+#define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL__REFCLK_R_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL__REFCLK_G_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL__SOCCLK_G_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL__SYMCLK_FE_R_GATE_DIS__SHIFT 0xe
+#define DIO_CLK_CNTL__SYMCLK_FE_G_GATE_DIS__SHIFT 0xf
+#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS__SHIFT 0x10
+#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS__SHIFT 0x11
+#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS__SHIFT 0x14
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS__SHIFT 0x15
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS__SHIFT 0x16
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS__SHIFT 0x17
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS__SHIFT 0x18
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS__SHIFT 0x19
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS__SHIFT 0x1a
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS__SHIFT 0x1b
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS__SHIFT 0x1c
+#define DIO_CLK_CNTL__DIO_TEST_CLK_SEL_MASK 0x0000007FL
+#define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS_MASK 0x00000200L
+#define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL__REFCLK_R_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL__REFCLK_G_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL__SOCCLK_G_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL__SYMCLK_FE_R_GATE_DIS_MASK 0x00004000L
+#define DIO_CLK_CNTL__SYMCLK_FE_G_GATE_DIS_MASK 0x00008000L
+#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS_MASK 0x00010000L
+#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS_MASK 0x00020000L
+#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS_MASK 0x00100000L
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS_MASK 0x00200000L
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS_MASK 0x00400000L
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS_MASK 0x00800000L
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS_MASK 0x01000000L
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS_MASK 0x02000000L
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS_MASK 0x04000000L
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS_MASK 0x08000000L
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS_MASK 0x10000000L
+//DIO_POWER_MANAGEMENT_CNTL
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x0
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x8
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x00000001L
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x00000100L
+//DIO_HDMI_RXSTATUS_TIMER_CONTROL
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x00000001L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x00000010L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x00000100L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x00001000L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0x0FFF0000L
+//DIO_PSP_INTERRUPT_STATUS
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE_MASK 0xFFFFFFFEL
+//DIO_PSP_INTERRUPT_CLEAR
+#define DIO_PSP_INTERRUPT_CLEAR__DIO_PSP_INTERRUPT_CLEAR__SHIFT 0x0
+#define DIO_PSP_INTERRUPT_CLEAR__DIO_PSP_INTERRUPT_CLEAR_MASK 0x00000001L
+//DIO_STATUS
+#define DIO_STATUS__DIO_EN__SHIFT 0x0
+#define DIO_STATUS__DIO_EN_MASK 0x00000001L
+//DIO_LINKA_CNTL
+#define DIO_LINKA_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKA_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKA_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKA_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKA_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKA_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKB_CNTL
+#define DIO_LINKB_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKB_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKB_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKB_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKB_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKB_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKC_CNTL
+#define DIO_LINKC_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKC_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKC_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKC_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKC_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKC_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKD_CNTL
+#define DIO_LINKD_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKD_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKD_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKD_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKD_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKD_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKE_CNTL
+#define DIO_LINKE_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKE_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKE_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKE_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKE_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKE_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKF_CNTL
+#define DIO_LINKF_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKF_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKF_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKF_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKF_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKF_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+
+
+// addressBlock: dce_dc_dio_dig_stream_mapper_dispdec
+//DIG0_STREAM_MAPPER_CONTROL
+#define DIG0_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG0_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+//DIG1_STREAM_MAPPER_CONTROL
+#define DIG1_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG1_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+//DIG2_STREAM_MAPPER_CONTROL
+#define DIG2_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG2_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+//DIG3_STREAM_MAPPER_CONTROL
+#define DIG3_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG3_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+//DIG4_STREAM_MAPPER_CONTROL
+#define DIG4_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG4_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_dio_dio_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON18_PERFCOUNTER_CNTL
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFCOUNTER_CNTL2
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFCOUNTER_STATE
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON18_PERFMON_CNTL
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON18_PERFMON_CNTL2
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON18_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON18_PERFMON_CVALUE_LOW
+#define DC_PERFMON18_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON18_PERFMON_HI
+#define DC_PERFMON18_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON18_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFMON_LOW
+#define DC_PERFMON18_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+//DC_GENERICA
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x0
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7
+#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L
+#define DC_GENERICA__GENERICA_SEL_MASK 0x00000F80L
+//DC_GENERICB
+#define DC_GENERICB__GENERICB_EN__SHIFT 0x0
+#define DC_GENERICB__GENERICB_SEL__SHIFT 0x8
+#define DC_GENERICB__GENERICB_EN_MASK 0x00000001L
+#define DC_GENERICB__GENERICB_SEL_MASK 0x00000F00L
+//DCIO_CLOCK_CNTL
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL__SHIFT 0x0
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL_MASK 0x0000001FL
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x00000020L
+//DC_REF_CLK_CNTL
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL__SHIFT 0x8
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL_MASK 0x00000300L
+//UNIPHYA_LINK_CNTL
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+//UNIPHYA_CHANNEL_XBAR_CNTL
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//UNIPHYB_LINK_CNTL
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+//UNIPHYB_CHANNEL_XBAR_CNTL
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//UNIPHYC_LINK_CNTL
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+//UNIPHYC_CHANNEL_XBAR_CNTL
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//UNIPHYD_CHANNEL_XBAR_CNTL
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//UNIPHYE_CHANNEL_XBAR_CNTL
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//DCIO_WRCMD_DELAY
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY__SHIFT 0x18
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY_MASK 0xFF000000L
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L
+//DCIO_SPARE
+#define DCIO_SPARE__DCIO_SPARE__SHIFT 0x0
+#define DCIO_SPARE__DCIO_SPARE_MASK 0xFFFFFFFFL
+//INTERCEPT_STATE
+#define INTERCEPT_STATE__PWRSEQ0_INTERCEPTB_STATE__SHIFT 0x0
+#define INTERCEPT_STATE__PWRSEQ1_INTERCEPTB_STATE__SHIFT 0x1
+#define INTERCEPT_STATE__DLPC_INTERCEPTB_STATE__SHIFT 0x2
+#define INTERCEPT_STATE__DPCS0_INTERCEPTB_STATE__SHIFT 0x4
+#define INTERCEPT_STATE__DPCS1_INTERCEPTB_STATE__SHIFT 0x5
+#define INTERCEPT_STATE__DPCS2_INTERCEPTB_STATE__SHIFT 0x6
+#define INTERCEPT_STATE__DPCS3_INTERCEPTB_STATE__SHIFT 0x7
+#define INTERCEPT_STATE__DPCS4_INTERCEPTB_STATE__SHIFT 0x8
+#define INTERCEPT_STATE__DPCS5_INTERCEPTB_STATE__SHIFT 0x9
+#define INTERCEPT_STATE__DPCS6_INTERCEPTB_STATE__SHIFT 0xa
+#define INTERCEPT_STATE__PWRSEQ0_INTERCEPTB_STATE_MASK 0x00000001L
+#define INTERCEPT_STATE__PWRSEQ1_INTERCEPTB_STATE_MASK 0x00000002L
+#define INTERCEPT_STATE__DLPC_INTERCEPTB_STATE_MASK 0x00000004L
+#define INTERCEPT_STATE__DPCS0_INTERCEPTB_STATE_MASK 0x00000010L
+#define INTERCEPT_STATE__DPCS1_INTERCEPTB_STATE_MASK 0x00000020L
+#define INTERCEPT_STATE__DPCS2_INTERCEPTB_STATE_MASK 0x00000040L
+#define INTERCEPT_STATE__DPCS3_INTERCEPTB_STATE_MASK 0x00000080L
+#define INTERCEPT_STATE__DPCS4_INTERCEPTB_STATE_MASK 0x00000100L
+#define INTERCEPT_STATE__DPCS5_INTERCEPTB_STATE_MASK 0x00000200L
+#define INTERCEPT_STATE__DPCS6_INTERCEPTB_STATE_MASK 0x00000400L
+//DCIO_PATTERN_GEN_PAT
+#define DCIO_PATTERN_GEN_PAT__DCIO_PATTERN_GEN_PAT__SHIFT 0x0
+#define DCIO_PATTERN_GEN_PAT__DCIO_PATTERN_GEN_PAT_MASK 0xFFFFFFFFL
+//DCIO_PATTERN_GEN_EN
+#define DCIO_PATTERN_GEN_EN__DCIO_PATTERN_GEN_EN__SHIFT 0x0
+#define DCIO_PATTERN_GEN_EN__DCIO_PATTERN_GEN_EN_MASK 0x00000001L
+//DCIO_BL_PWM_FRAME_START_DISP_SEL
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM0_GRP1_FRAME_START_DISP_SEL__SHIFT 0x0
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM1_GRP1_FRAME_START_DISP_SEL__SHIFT 0x4
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM0_GRP1_FRAME_START_DISP_SEL_MASK 0x00000007L
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM1_GRP1_FRAME_START_DISP_SEL_MASK 0x00000070L
+//DCIO_GSL_GENLK_PAD_CNTL
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_READY_SEL__SHIFT 0x4
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_READY_SEL__SHIFT 0x14
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_READY_SEL_MASK 0x00000030L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_READY_SEL_MASK 0x00300000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK_MASK 0x03000000L
+//DCIO_GSL_SWAPLOCK_PAD_CNTL
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_READY_SEL__SHIFT 0x4
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_READY_SEL__SHIFT 0x14
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_READY_SEL_MASK 0x00000030L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_READY_SEL_MASK 0x00300000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK_MASK 0x03000000L
+//DCIO_SOFT_RESET
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET__SHIFT 0x0
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET__SHIFT 0x1
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET__SHIFT 0x2
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET__SHIFT 0x3
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET__SHIFT 0x4
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET__SHIFT 0x5
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET__SHIFT 0x6
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET__SHIFT 0x8
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET__SHIFT 0x9
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET__SHIFT 0xa
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET__SHIFT 0xb
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET__SHIFT 0xc
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET__SHIFT 0xd
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET__SHIFT 0xe
+#define DCIO_SOFT_RESET__PWRSEQ0_SOFT_RESET__SHIFT 0x10
+#define DCIO_SOFT_RESET__PWRSEQ1_SOFT_RESET__SHIFT 0x11
+#define DCIO_SOFT_RESET__DLPC_SOFT_RESET__SHIFT 0x14
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET_MASK 0x00000001L
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET_MASK 0x00000002L
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET_MASK 0x00000004L
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET_MASK 0x00000008L
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET_MASK 0x00000010L
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET_MASK 0x00000020L
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET_MASK 0x00000040L
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET_MASK 0x00000100L
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET_MASK 0x00000200L
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET_MASK 0x00000400L
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET_MASK 0x00000800L
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET_MASK 0x00001000L
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET_MASK 0x00002000L
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET_MASK 0x00004000L
+#define DCIO_SOFT_RESET__PWRSEQ0_SOFT_RESET_MASK 0x00010000L
+#define DCIO_SOFT_RESET__PWRSEQ1_SOFT_RESET_MASK 0x00020000L
+#define DCIO_SOFT_RESET__DLPC_SOFT_RESET_MASK 0x00100000L
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+//DC_GPIO_GENERIC_MASK
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK__SHIFT 0x0
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV__SHIFT 0x2
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK__SHIFT 0x4
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS__SHIFT 0x5
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV__SHIFT 0x6
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK__SHIFT 0x8
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV__SHIFT 0xa
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK__SHIFT 0xc
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS__SHIFT 0xd
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV__SHIFT 0xe
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK__SHIFT 0x10
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV__SHIFT 0x12
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK__SHIFT 0x14
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS__SHIFT 0x15
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV__SHIFT 0x16
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK__SHIFT 0x18
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV__SHIFT 0x1a
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_STRENGTH_SN__SHIFT 0x1c
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK_MASK 0x00000001L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV_MASK 0x0000000CL
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK_MASK 0x00000010L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS_MASK 0x00000020L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV_MASK 0x000000C0L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV_MASK 0x00000C00L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK_MASK 0x00001000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS_MASK 0x00002000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV_MASK 0x0000C000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK_MASK 0x00010000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV_MASK 0x000C0000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK_MASK 0x00100000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV_MASK 0x00C00000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK_MASK 0x01000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV_MASK 0x0C000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_STRENGTH_SN_MASK 0xF0000000L
+//DC_GPIO_GENERIC_A
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A__SHIFT 0x0
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A__SHIFT 0x8
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A__SHIFT 0x10
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A__SHIFT 0x14
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A__SHIFT 0x15
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A__SHIFT 0x16
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A__SHIFT 0x17
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK 0x00000001L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK 0x00000100L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK 0x00010000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK 0x00100000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK 0x00200000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK 0x00400000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK 0x00800000L
+//DC_GPIO_GENERIC_EN
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN__SHIFT 0x0
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN__SHIFT 0x8
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN__SHIFT 0x10
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN__SHIFT 0x14
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN__SHIFT 0x15
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN__SHIFT 0x16
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN__SHIFT 0x17
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN_MASK 0x00000001L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN_MASK 0x00000100L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN_MASK 0x00010000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN_MASK 0x00100000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN_MASK 0x00200000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN_MASK 0x00400000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN_MASK 0x00800000L
+//DC_GPIO_GENERIC_Y
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y__SHIFT 0x0
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y__SHIFT 0x8
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y__SHIFT 0x10
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y__SHIFT 0x14
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y__SHIFT 0x15
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y__SHIFT 0x16
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y__SHIFT 0x17
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y_MASK 0x00000001L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y_MASK 0x00000100L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y_MASK 0x00010000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y_MASK 0x00100000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y_MASK 0x00200000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y_MASK 0x00400000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y_MASK 0x00800000L
+//DC_GPIO_DDC1_MASK
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC1_A
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC1_EN
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC1_Y
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC2_MASK
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC2_A
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC2_EN
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC2_Y
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC3_MASK
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE__SHIFT 0x10
+#define DC_GPIO_DDC3_MASK__AUX3_POL__SHIFT 0x14
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC3_MASK__AUX3_POL_MASK 0x00100000L
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC3_A
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC3_EN
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC3_Y
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC4_MASK
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE__SHIFT 0x10
+#define DC_GPIO_DDC4_MASK__AUX4_POL__SHIFT 0x14
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC4_MASK__AUX4_POL_MASK 0x00100000L
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC4_A
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC4_EN
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC4_Y
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC5_MASK
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE__SHIFT 0x10
+#define DC_GPIO_DDC5_MASK__AUX5_POL__SHIFT 0x14
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC5_MASK__AUX5_POL_MASK 0x00100000L
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC5_A
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC5_EN
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC5_Y
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDCVGA_MASK
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDCVGA_MASK__DDCVGA_INVERT_INPUT_POLARITY__SHIFT 0x4
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE__SHIFT 0x10
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL__SHIFT 0x14
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR__SHIFT 0x18
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_MASK__DDCVGA_INVERT_INPUT_POLARITY_MASK 0x00000010L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE_MASK 0x00010000L
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL_MASK 0x00100000L
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDCVGA_A
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A__SHIFT 0x0
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A__SHIFT 0x8
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A_MASK 0x00000100L
+//DC_GPIO_DDCVGA_EN
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN__SHIFT 0x0
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN__SHIFT 0x8
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN_MASK 0x00000100L
+//DC_GPIO_DDCVGA_Y
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y__SHIFT 0x0
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y__SHIFT 0x8
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y_MASK 0x00000100L
+//DC_GPIO_GENLK_MASK
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK__SHIFT 0x0
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN__SHIFT 0x3
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV__SHIFT 0x4
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK__SHIFT 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN__SHIFT 0xb
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV__SHIFT 0xc
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK__SHIFT 0x10
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN__SHIFT 0x13
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV__SHIFT 0x14
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK__SHIFT 0x18
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN__SHIFT 0x1b
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV__SHIFT 0x1c
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN_MASK 0x00000008L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV_MASK 0x00000030L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN_MASK 0x00000800L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV_MASK 0x00003000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK_MASK 0x00010000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN_MASK 0x00080000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV_MASK 0x00300000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK_MASK 0x01000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN_MASK 0x08000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV_MASK 0x30000000L
+//DC_GPIO_GENLK_A
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A__SHIFT 0x0
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A__SHIFT 0x8
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A__SHIFT 0x10
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A__SHIFT 0x18
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK 0x00000001L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK 0x00000100L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK 0x00010000L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK 0x01000000L
+//DC_GPIO_GENLK_EN
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN__SHIFT 0x0
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN__SHIFT 0x8
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN__SHIFT 0x10
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN__SHIFT 0x18
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN_MASK 0x00000001L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN_MASK 0x00000100L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN_MASK 0x00010000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN_MASK 0x01000000L
+//DC_GPIO_GENLK_Y
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y__SHIFT 0x0
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y__SHIFT 0x8
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y__SHIFT 0x10
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y__SHIFT 0x18
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y_MASK 0x00000001L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y_MASK 0x00000100L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y_MASK 0x00010000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y_MASK 0x01000000L
+//DC_GPIO_HPD_MASK
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x000000C0L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000C00L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x000C0000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00C00000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x0C000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0xC0000000L
+//DC_GPIO_HPD_A
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L
+//DC_GPIO_HPD_EN
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2
+#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5
+#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9
+#define DC_GPIO_HPD_EN__HPD12_SPARE1__SHIFT 0xa
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN__SHIFT 0x10
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI__SHIFT 0x11
+#define DC_GPIO_HPD_EN__HPD34_SPARE0__SHIFT 0x12
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN__SHIFT 0x14
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI__SHIFT 0x15
+#define DC_GPIO_HPD_EN__HPD34_SPARE1__SHIFT 0x16
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN__SHIFT 0x18
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI__SHIFT 0x19
+#define DC_GPIO_HPD_EN__HPD56_SPARE0__SHIFT 0x1a
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN__SHIFT 0x1c
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI__SHIFT 0x1d
+#define DC_GPIO_HPD_EN__HPD56_SPARE1__SHIFT 0x1e
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x00000002L
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x00000004L
+#define DC_GPIO_HPD_EN__HPD12_SPARE0_MASK 0x00000020L
+#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x00000040L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x00000200L
+#define DC_GPIO_HPD_EN__HPD12_SPARE1_MASK 0x00000400L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN_MASK 0x00010000L
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI_MASK 0x00020000L
+#define DC_GPIO_HPD_EN__HPD34_SPARE0_MASK 0x00040000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN_MASK 0x00100000L
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI_MASK 0x00200000L
+#define DC_GPIO_HPD_EN__HPD34_SPARE1_MASK 0x00400000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN_MASK 0x01000000L
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI_MASK 0x02000000L
+#define DC_GPIO_HPD_EN__HPD56_SPARE0_MASK 0x04000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN_MASK 0x10000000L
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI_MASK 0x20000000L
+#define DC_GPIO_HPD_EN__HPD56_SPARE1_MASK 0x40000000L
+//DC_GPIO_HPD_Y
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y__SHIFT 0x10
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y__SHIFT 0x18
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y__SHIFT 0x1a
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y__SHIFT 0x1c
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y_MASK 0x00010000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y_MASK 0x01000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y_MASK 0x04000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y_MASK 0x10000000L
+//DC_GPIO_DRIVE_STRENGTH_S0
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICA_S0__SHIFT 0x0
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICB_S0__SHIFT 0x1
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICC_S0__SHIFT 0x2
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICD_S0__SHIFT 0x3
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICE_S0__SHIFT 0x4
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICF_S0__SHIFT 0x5
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICG_S0__SHIFT 0x6
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_CLK_S0__SHIFT 0x8
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_VSYNC_S0__SHIFT 0x9
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_A_S0__SHIFT 0xa
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_B_S0__SHIFT 0xb
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICA_S0_MASK 0x00000001L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICB_S0_MASK 0x00000002L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICC_S0_MASK 0x00000004L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICD_S0_MASK 0x00000008L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICE_S0_MASK 0x00000010L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICF_S0_MASK 0x00000020L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICG_S0_MASK 0x00000040L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_CLK_S0_MASK 0x00000100L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_VSYNC_S0_MASK 0x00000200L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_A_S0_MASK 0x00000400L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_B_S0_MASK 0x00000800L
+//DC_GPIO_DRIVE_STRENGTH_S1
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICA_S1__SHIFT 0x0
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICB_S1__SHIFT 0x1
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICC_S1__SHIFT 0x2
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICD_S1__SHIFT 0x3
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICE_S1__SHIFT 0x4
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICF_S1__SHIFT 0x5
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICG_S1__SHIFT 0x6
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_CLK_S1__SHIFT 0x8
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_VSYNC_S1__SHIFT 0x9
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_A_S1__SHIFT 0xa
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_B_S1__SHIFT 0xb
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICA_S1_MASK 0x00000001L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICB_S1_MASK 0x00000002L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICC_S1_MASK 0x00000004L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICD_S1_MASK 0x00000008L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICE_S1_MASK 0x00000010L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICF_S1_MASK 0x00000020L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICG_S1_MASK 0x00000040L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_CLK_S1_MASK 0x00000100L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_VSYNC_S1_MASK 0x00000200L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_A_S1_MASK 0x00000400L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_B_S1_MASK 0x00000800L
+//DC_GPIO_PWRSEQ0_EN
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN__SHIFT 0x14
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL__SHIFT 0x15
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_EN__SHIFT 0x19
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_SEL__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL_MASK 0x00E00000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_EN_MASK 0x02000000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_SEL_MASK 0x1C000000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x20000000L
+//DC_GPIO_PAD_STRENGTH_1
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0x000F0000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0x00F00000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0F000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xF0000000L
+//DC_GPIO_PAD_STRENGTH_2
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL__SHIFT 0x1e
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH_MASK 0x00000700L
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH_MASK 0x00007000L
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL_MASK 0xC0000000L
+//PHY_AUX_CNTL
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x9
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL__SHIFT 0xa
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL__SHIFT 0xc
+#define PHY_AUX_CNTL__AUX3_PAD_RXSEL__SHIFT 0xe
+#define PHY_AUX_CNTL__AUX4_PAD_RXSEL__SHIFT 0x10
+#define PHY_AUX_CNTL__AUX5_PAD_RXSEL__SHIFT 0x12
+#define PHY_AUX_CNTL__AUX6_PAD_RXSEL__SHIFT 0x14
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00000200L
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL_MASK 0x00000C00L
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL_MASK 0x00003000L
+#define PHY_AUX_CNTL__AUX3_PAD_RXSEL_MASK 0x0000C000L
+#define PHY_AUX_CNTL__AUX4_PAD_RXSEL_MASK 0x00030000L
+#define PHY_AUX_CNTL__AUX5_PAD_RXSEL_MASK 0x000C0000L
+#define PHY_AUX_CNTL__AUX6_PAD_RXSEL_MASK 0x00300000L
+//DC_GPIO_DRIVE_TXIMPSEL
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICA_TXIMPSEL__SHIFT 0x0
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICB_TXIMPSEL__SHIFT 0x1
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICC_TXIMPSEL__SHIFT 0x2
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICD_TXIMPSEL__SHIFT 0x3
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICE_TXIMPSEL__SHIFT 0x4
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICF_TXIMPSEL__SHIFT 0x5
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICG_TXIMPSEL__SHIFT 0x6
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_CLK_TXIMPSEL__SHIFT 0x8
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_VSYNC_TXIMPSEL__SHIFT 0x9
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_A_TXIMPSEL__SHIFT 0xa
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_B_TXIMPSEL__SHIFT 0xb
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD1_TXIMPSEL__SHIFT 0xc
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD2_TXIMPSEL__SHIFT 0xd
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD3_TXIMPSEL__SHIFT 0xe
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD4_TXIMPSEL__SHIFT 0xf
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD5_TXIMPSEL__SHIFT 0x10
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD6_TXIMPSEL__SHIFT 0x11
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICA_TXIMPSEL_MASK 0x00000001L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICB_TXIMPSEL_MASK 0x00000002L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICC_TXIMPSEL_MASK 0x00000004L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICD_TXIMPSEL_MASK 0x00000008L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICE_TXIMPSEL_MASK 0x00000010L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICF_TXIMPSEL_MASK 0x00000020L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICG_TXIMPSEL_MASK 0x00000040L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_CLK_TXIMPSEL_MASK 0x00000100L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_VSYNC_TXIMPSEL_MASK 0x00000200L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_A_TXIMPSEL_MASK 0x00000400L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_B_TXIMPSEL_MASK 0x00000800L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD1_TXIMPSEL_MASK 0x00001000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD2_TXIMPSEL_MASK 0x00002000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD3_TXIMPSEL_MASK 0x00004000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD4_TXIMPSEL_MASK 0x00008000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD5_TXIMPSEL_MASK 0x00010000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD6_TXIMPSEL_MASK 0x00020000L
+//DC_GPIO_PWRSEQ1_EN
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN__SHIFT 0x14
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL__SHIFT 0x15
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_EN__SHIFT 0x19
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_SEL__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL_MASK 0x00E00000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_EN_MASK 0x02000000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_SEL_MASK 0x1C000000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x20000000L
+//DC_GPIO_TX12_EN
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN__SHIFT 0x3
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN__SHIFT 0x4
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN__SHIFT 0x5
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN__SHIFT 0x6
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN__SHIFT 0x7
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN__SHIFT 0x8
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN__SHIFT 0x9
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN_MASK 0x00000008L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN_MASK 0x00000010L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN_MASK 0x00000020L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN_MASK 0x00000040L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN_MASK 0x00000080L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN_MASK 0x00000100L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN_MASK 0x00000200L
+//DC_GPIO_AUX_CTRL_0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_FALLSLEWSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCEN__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL_MASK 0x000000C0L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL_MASK 0x00000300L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL_MASK 0x00000C00L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_FALLSLEWSEL_MASK 0x00003000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN_MASK 0x00200000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCEN_MASK 0x00C00000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCSEL_MASK 0xC0000000L
+//DC_GPIO_AUX_CTRL_1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1__SHIFT 0x7
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9_MASK 0x00000040L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1_MASK 0x00000080L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN_MASK 0x00001800L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE_MASK 0x0000C000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN_MASK 0x000C0000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL_MASK 0xC0000000L
+//DC_GPIO_AUX_CTRL_2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCEN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCSEL__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCSEL__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SLEWN__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SLEWN__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_FALLSLEWSEL_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_FALLSLEWSEL_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCEN_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCSEL_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCSEL_MASK 0x00004000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SLEWN_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SLEWN_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_COMPSEL_MASK 0x40000000L
+//DC_GPIO_RXEN
+#define DC_GPIO_RXEN__DC_GPIO_GENERICA_RXEN__SHIFT 0x0
+#define DC_GPIO_RXEN__DC_GPIO_GENERICB_RXEN__SHIFT 0x1
+#define DC_GPIO_RXEN__DC_GPIO_GENERICC_RXEN__SHIFT 0x2
+#define DC_GPIO_RXEN__DC_GPIO_GENERICD_RXEN__SHIFT 0x3
+#define DC_GPIO_RXEN__DC_GPIO_GENERICE_RXEN__SHIFT 0x4
+#define DC_GPIO_RXEN__DC_GPIO_GENERICF_RXEN__SHIFT 0x5
+#define DC_GPIO_RXEN__DC_GPIO_GENERICG_RXEN__SHIFT 0x6
+#define DC_GPIO_RXEN__DC_GPIO_HSYNCA_RXEN__SHIFT 0x8
+#define DC_GPIO_RXEN__DC_GPIO_VSYNCA_RXEN__SHIFT 0x9
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_CLK_RXEN__SHIFT 0xa
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_VSYNC_RXEN__SHIFT 0xb
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_A_RXEN__SHIFT 0xc
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_B_RXEN__SHIFT 0xd
+#define DC_GPIO_RXEN__DC_GPIO_HPD1_RXEN__SHIFT 0xe
+#define DC_GPIO_RXEN__DC_GPIO_HPD2_RXEN__SHIFT 0xf
+#define DC_GPIO_RXEN__DC_GPIO_HPD3_RXEN__SHIFT 0x10
+#define DC_GPIO_RXEN__DC_GPIO_HPD4_RXEN__SHIFT 0x11
+#define DC_GPIO_RXEN__DC_GPIO_HPD5_RXEN__SHIFT 0x12
+#define DC_GPIO_RXEN__DC_GPIO_HPD6_RXEN__SHIFT 0x13
+#define DC_GPIO_RXEN__DC_GPIO_GENERICA_RXEN_MASK 0x00000001L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICB_RXEN_MASK 0x00000002L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICC_RXEN_MASK 0x00000004L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICD_RXEN_MASK 0x00000008L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICE_RXEN_MASK 0x00000010L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICF_RXEN_MASK 0x00000020L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICG_RXEN_MASK 0x00000040L
+#define DC_GPIO_RXEN__DC_GPIO_HSYNCA_RXEN_MASK 0x00000100L
+#define DC_GPIO_RXEN__DC_GPIO_VSYNCA_RXEN_MASK 0x00000200L
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_CLK_RXEN_MASK 0x00000400L
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_VSYNC_RXEN_MASK 0x00000800L
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_A_RXEN_MASK 0x00001000L
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_B_RXEN_MASK 0x00002000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD1_RXEN_MASK 0x00004000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD2_RXEN_MASK 0x00008000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD3_RXEN_MASK 0x00010000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD4_RXEN_MASK 0x00020000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD5_RXEN_MASK 0x00040000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD6_RXEN_MASK 0x00080000L
+//DC_GPIO_PULLUPEN
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN__SHIFT 0x0
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN__SHIFT 0x1
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN__SHIFT 0x2
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN__SHIFT 0x3
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN__SHIFT 0x4
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN__SHIFT 0x5
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN__SHIFT 0x6
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN__SHIFT 0x8
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN__SHIFT 0x9
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN__SHIFT 0xe
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD2_PU_EN__SHIFT 0xf
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD3_PU_EN__SHIFT 0x10
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD4_PU_EN__SHIFT 0x11
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD5_PU_EN__SHIFT 0x12
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD6_PU_EN__SHIFT 0x13
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN_MASK 0x00000001L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN_MASK 0x00000002L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN_MASK 0x00000004L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN_MASK 0x00000008L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN_MASK 0x00000010L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN_MASK 0x00000020L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN_MASK 0x00000040L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN_MASK 0x00000100L
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN_MASK 0x00000200L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN_MASK 0x00004000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD2_PU_EN_MASK 0x00008000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD3_PU_EN_MASK 0x00010000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD4_PU_EN_MASK 0x00020000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD5_PU_EN_MASK 0x00040000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD6_PU_EN_MASK 0x00080000L
+//DC_GPIO_AUX_CTRL_3
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP_MASK 0x00000800L
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE_MASK 0x00C00000L
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE_MASK 0x03000000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE_MASK 0x0C000000L
+//DC_GPIO_AUX_CTRL_4
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL_MASK 0x00000F00L
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL_MASK 0x0000F000L
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL_MASK 0x000F0000L
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL_MASK 0x00F00000L
+//DC_GPIO_AUX_CTRL_5
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE__SHIFT 0xf
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN__SHIFT 0x17
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE_MASK 0x000000C0L
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE_MASK 0x00000300L
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE_MASK 0x00000C00L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE_MASK 0x00004000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE_MASK 0x00008000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN_MASK 0x00200000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN_MASK 0x00400000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN_MASK 0x00800000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL_MASK 0x20000000L
+//AUXI2C_PAD_ALL_PWR_OK
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK__SHIFT 0x2
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK__SHIFT 0x3
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK__SHIFT 0x4
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK__SHIFT 0x5
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK_MASK 0x00000004L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK_MASK 0x00000008L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK_MASK 0x00000010L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK_MASK 0x00000020L
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy1_dispdec
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy2_dispdec
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy3_dispdec
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy4_dispdec
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_pwrseq0_dispdec_pwrseq_dispdec
+//PWRSEQ0_DC_GPIO_PWRSEQ_EN
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00010000L
+//PWRSEQ0_DC_GPIO_PWRSEQ_CTRL
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL__SHIFT 0x1
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL__SHIFT 0x2
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN__SHIFT 0x3
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN__SHIFT 0x4
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN__SHIFT 0x5
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN__SHIFT 0x6
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN__SHIFT 0x7
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1__SHIFT 0x14
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1__SHIFT 0x15
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1__SHIFT 0x16
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL_MASK 0x00000002L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL_MASK 0x00000004L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN_MASK 0x00000008L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN_MASK 0x00000010L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN_MASK 0x00000020L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN_MASK 0x00000040L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN_MASK 0x00000080L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1_MASK 0x00100000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1_MASK 0x00200000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1_MASK 0x00400000L
+//PWRSEQ0_DC_GPIO_PWRSEQ_MASK
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS__SHIFT 0x4
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV__SHIFT 0x6
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x14
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x16
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS_MASK 0x00000010L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV_MASK 0x000000C0L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x0000C000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00100000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x00C00000L
+//PWRSEQ0_DC_GPIO_PWRSEQ_A_Y
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y__SHIFT 0x1
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y__SHIFT 0x9
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y__SHIFT 0x11
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y_MASK 0x00000002L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y_MASK 0x00000200L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y_MASK 0x00020000L
+//PWRSEQ0_PANEL_PWRSEQ_CNTL
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD__SHIFT 0x9
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL__SHIFT 0xa
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD__SHIFT 0x11
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL__SHIFT 0x12
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD__SHIFT 0x19
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL__SHIFT 0x1a
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN_MASK 0x00000001L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_MASK 0x00000100L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD_MASK 0x00000200L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL_MASK 0x00000400L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_MASK 0x00010000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD_MASK 0x00020000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL_MASK 0x00040000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_MASK 0x01000000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD_MASK 0x02000000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL_MASK 0x04000000L
+//PWRSEQ0_PANEL_PWRSEQ_STATE
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON__SHIFT 0x1
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN__SHIFT 0x2
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON__SHIFT 0x3
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE__SHIFT 0x4
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON_MASK 0x00000002L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON_MASK 0x00000008L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE_MASK 0x00000010L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE_MASK 0x00000F00L
+//PWRSEQ0_PANEL_PWRSEQ_DELAY1
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1_MASK 0x000000FFL
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2_MASK 0x0000FF00L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1_MASK 0x00FF0000L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2_MASK 0xFF000000L
+//PWRSEQ0_PANEL_PWRSEQ_DELAY2
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH_MASK 0x000000FFL
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3_MASK 0x0000FF00L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3_MASK 0x00FF0000L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+//PWRSEQ0_PANEL_PWRSEQ_REF_DIV1
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV_MASK 0x00000FFFL
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV_MASK 0xFFFF0000L
+//PWRSEQ0_BL_PWM_CNTL
+#define PWRSEQ0_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO__SHIFT 0x13
+#define PWRSEQ0_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED__SHIFT 0x14
+#define PWRSEQ0_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x15
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO_MASK 0x00080000L
+#define PWRSEQ0_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED_MASK 0x00100000L
+#define PWRSEQ0_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x00200000L
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+//PWRSEQ0_BL_PWM_CNTL2
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN_MASK 0x80000000L
+//PWRSEQ0_BL_PWM_PERIOD_CNTL
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000F0000L
+//PWRSEQ0_BL_PWM_GRP1_REG_LOCK
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//PWRSEQ0_PANEL_PWRSEQ_REF_DIV2
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV_MASK 0x0000007FL
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV_MASK 0x00007F00L
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE_MASK 0x00010000L
+//PWRSEQ0_PWRSEQ_SPARE
+#define PWRSEQ0_PWRSEQ_SPARE__PWRSEQ_SPARE__SHIFT 0x0
+#define PWRSEQ0_PWRSEQ_SPARE__PWRSEQ_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_pwrseq1_dispdec_pwrseq_dispdec
+//PWRSEQ1_DC_GPIO_PWRSEQ_EN
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00010000L
+//PWRSEQ1_DC_GPIO_PWRSEQ_CTRL
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL__SHIFT 0x1
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL__SHIFT 0x2
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN__SHIFT 0x3
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN__SHIFT 0x4
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN__SHIFT 0x5
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN__SHIFT 0x6
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN__SHIFT 0x7
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1__SHIFT 0x14
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1__SHIFT 0x15
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1__SHIFT 0x16
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL_MASK 0x00000002L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL_MASK 0x00000004L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN_MASK 0x00000008L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN_MASK 0x00000010L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN_MASK 0x00000020L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN_MASK 0x00000040L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN_MASK 0x00000080L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1_MASK 0x00100000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1_MASK 0x00200000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1_MASK 0x00400000L
+//PWRSEQ1_DC_GPIO_PWRSEQ_MASK
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS__SHIFT 0x4
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV__SHIFT 0x6
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x14
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x16
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS_MASK 0x00000010L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV_MASK 0x000000C0L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x0000C000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00100000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x00C00000L
+//PWRSEQ1_DC_GPIO_PWRSEQ_A_Y
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y__SHIFT 0x1
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y__SHIFT 0x9
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y__SHIFT 0x11
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y_MASK 0x00000002L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y_MASK 0x00000200L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y_MASK 0x00020000L
+//PWRSEQ1_PANEL_PWRSEQ_CNTL
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD__SHIFT 0x9
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL__SHIFT 0xa
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD__SHIFT 0x11
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL__SHIFT 0x12
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD__SHIFT 0x19
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL__SHIFT 0x1a
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN_MASK 0x00000001L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_MASK 0x00000100L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD_MASK 0x00000200L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL_MASK 0x00000400L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_MASK 0x00010000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD_MASK 0x00020000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL_MASK 0x00040000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_MASK 0x01000000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD_MASK 0x02000000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL_MASK 0x04000000L
+//PWRSEQ1_PANEL_PWRSEQ_STATE
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON__SHIFT 0x1
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN__SHIFT 0x2
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON__SHIFT 0x3
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE__SHIFT 0x4
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON_MASK 0x00000002L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON_MASK 0x00000008L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE_MASK 0x00000010L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE_MASK 0x00000F00L
+//PWRSEQ1_PANEL_PWRSEQ_DELAY1
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1_MASK 0x000000FFL
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2_MASK 0x0000FF00L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1_MASK 0x00FF0000L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2_MASK 0xFF000000L
+//PWRSEQ1_PANEL_PWRSEQ_DELAY2
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH_MASK 0x000000FFL
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3_MASK 0x0000FF00L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3_MASK 0x00FF0000L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+//PWRSEQ1_PANEL_PWRSEQ_REF_DIV1
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV_MASK 0x00000FFFL
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV_MASK 0xFFFF0000L
+//PWRSEQ1_BL_PWM_CNTL
+#define PWRSEQ1_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO__SHIFT 0x13
+#define PWRSEQ1_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED__SHIFT 0x14
+#define PWRSEQ1_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x15
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO_MASK 0x00080000L
+#define PWRSEQ1_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED_MASK 0x00100000L
+#define PWRSEQ1_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x00200000L
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+//PWRSEQ1_BL_PWM_CNTL2
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN_MASK 0x80000000L
+//PWRSEQ1_BL_PWM_PERIOD_CNTL
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000F0000L
+//PWRSEQ1_BL_PWM_GRP1_REG_LOCK
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//PWRSEQ1_PANEL_PWRSEQ_REF_DIV2
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV_MASK 0x0000007FL
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV_MASK 0x00007F00L
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE_MASK 0x00010000L
+//PWRSEQ1_PWRSEQ_SPARE
+#define PWRSEQ1_PWRSEQ_SPARE__PWRSEQ_SPARE__SHIFT 0x0
+#define PWRSEQ1_PWRSEQ_SPARE__PWRSEQ_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dscc_dispdec
+//DSCC0_DSCC_CONFIG0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC0_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC0_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC0_DSCC_CONFIG1
+#define DSCC0_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC0_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC0_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC0_DSCC_STATUS
+#define DSCC0_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC0_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC0_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+//DSCC0_DSCC_PPS_CONFIG0
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC0_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC0_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC0_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC0_DSCC_PPS_CONFIG1
+#define DSCC0_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC0_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC0_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC0_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC0_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC0_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC0_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC0_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC0_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG2
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG3
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG4
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG5
+#define DSCC0_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC0_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG6
+#define DSCC0_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC0_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC0_DSCC_PPS_CONFIG7
+#define DSCC0_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG8
+#define DSCC0_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG9
+#define DSCC0_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG10
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC0_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG11
+#define DSCC0_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC0_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC0_DSCC_PPS_CONFIG12
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG13
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG14
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG15
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG16
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG17
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG18
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG19
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG20
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG21
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG22
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC0_DSCC_MEM_POWER_CONTROL
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_MAX_ABS_ERROR0
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC0_DSCC_MAX_ABS_ERROR1
+#define DSCC0_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC0_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
+//DSCCIF0_DSCCIF_CONFIG0
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF0_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF0_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF0_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF0_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF0_DSCCIF_CONFIG1
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_top_dispdec
+//DSC_TOP0_DSC_TOP_CONTROL
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON19_PERFCOUNTER_CNTL
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFCOUNTER_CNTL2
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFCOUNTER_STATE
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON19_PERFMON_CNTL
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON19_PERFMON_CNTL2
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON19_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON19_PERFMON_CVALUE_LOW
+#define DC_PERFMON19_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON19_PERFMON_HI
+#define DC_PERFMON19_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON19_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFMON_LOW
+#define DC_PERFMON19_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
+//DSCC1_DSCC_CONFIG0
+#define DSCC1_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC1_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC1_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC1_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC1_DSCC_CONFIG1
+#define DSCC1_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC1_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC1_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC1_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC1_DSCC_STATUS
+#define DSCC1_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC1_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC1_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+//DSCC1_DSCC_PPS_CONFIG0
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC1_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC1_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC1_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC1_DSCC_PPS_CONFIG1
+#define DSCC1_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC1_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC1_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC1_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC1_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC1_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC1_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC1_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC1_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG2
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG3
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG4
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG5
+#define DSCC1_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC1_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG6
+#define DSCC1_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC1_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC1_DSCC_PPS_CONFIG7
+#define DSCC1_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG8
+#define DSCC1_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG9
+#define DSCC1_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG10
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC1_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG11
+#define DSCC1_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC1_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC1_DSCC_PPS_CONFIG12
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG13
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG14
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG15
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG16
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG17
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG18
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG19
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG20
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG21
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG22
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC1_DSCC_MEM_POWER_CONTROL
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_MAX_ABS_ERROR0
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC1_DSCC_MAX_ABS_ERROR1
+#define DSCC1_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC1_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
+//DSCCIF1_DSCCIF_CONFIG0
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF1_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF1_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF1_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF1_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF1_DSCCIF_CONFIG1
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_top_dispdec
+//DSC_TOP1_DSC_TOP_CONTROL
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+//DSC_TOP1_DSC_DEBUG_CONTROL
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON20_PERFCOUNTER_CNTL
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFCOUNTER_CNTL2
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFCOUNTER_STATE
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON20_PERFMON_CNTL
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON20_PERFMON_CNTL2
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON20_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON20_PERFMON_CVALUE_LOW
+#define DC_PERFMON20_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON20_PERFMON_HI
+#define DC_PERFMON20_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON20_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFMON_LOW
+#define DC_PERFMON20_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dscc_dispdec
+//DSCC2_DSCC_CONFIG0
+#define DSCC2_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC2_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC2_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC2_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC2_DSCC_CONFIG1
+#define DSCC2_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC2_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC2_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC2_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC2_DSCC_STATUS
+#define DSCC2_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC2_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC2_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+//DSCC2_DSCC_PPS_CONFIG0
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC2_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC2_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC2_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC2_DSCC_PPS_CONFIG1
+#define DSCC2_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC2_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC2_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC2_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC2_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC2_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC2_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC2_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC2_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG2
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG3
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG4
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG5
+#define DSCC2_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC2_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG6
+#define DSCC2_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC2_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC2_DSCC_PPS_CONFIG7
+#define DSCC2_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG8
+#define DSCC2_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG9
+#define DSCC2_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG10
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC2_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG11
+#define DSCC2_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC2_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC2_DSCC_PPS_CONFIG12
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG13
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG14
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG15
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG16
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG17
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG18
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG19
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG20
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG21
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG22
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC2_DSCC_MEM_POWER_CONTROL
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_MAX_ABS_ERROR0
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC2_DSCC_MAX_ABS_ERROR1
+#define DSCC2_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC2_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
+//DSCCIF2_DSCCIF_CONFIG0
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF2_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF2_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF2_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF2_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF2_DSCCIF_CONFIG1
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_top_dispdec
+//DSC_TOP2_DSC_TOP_CONTROL
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+//DSC_TOP2_DSC_DEBUG_CONTROL
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON21_PERFCOUNTER_CNTL
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFCOUNTER_CNTL2
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFCOUNTER_STATE
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON21_PERFMON_CNTL
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON21_PERFMON_CNTL2
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON21_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON21_PERFMON_CVALUE_LOW
+#define DC_PERFMON21_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON21_PERFMON_HI
+#define DC_PERFMON21_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON21_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFMON_LOW
+#define DC_PERFMON21_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dscc_dispdec
+//DSCC3_DSCC_CONFIG0
+#define DSCC3_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC3_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC3_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC3_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC3_DSCC_CONFIG1
+#define DSCC3_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC3_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC3_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC3_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC3_DSCC_STATUS
+#define DSCC3_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC3_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC3_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+//DSCC3_DSCC_PPS_CONFIG0
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC3_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC3_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC3_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC3_DSCC_PPS_CONFIG1
+#define DSCC3_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC3_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC3_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC3_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC3_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC3_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC3_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC3_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC3_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG2
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG3
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG4
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG5
+#define DSCC3_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC3_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG6
+#define DSCC3_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC3_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC3_DSCC_PPS_CONFIG7
+#define DSCC3_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG8
+#define DSCC3_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG9
+#define DSCC3_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG10
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC3_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG11
+#define DSCC3_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC3_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC3_DSCC_PPS_CONFIG12
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG13
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG14
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG15
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG16
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG17
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG18
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG19
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG20
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG21
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG22
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC3_DSCC_MEM_POWER_CONTROL
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_MAX_ABS_ERROR0
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC3_DSCC_MAX_ABS_ERROR1
+#define DSCC3_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC3_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
+//DSCCIF3_DSCCIF_CONFIG0
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF3_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF3_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF3_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF3_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF3_DSCCIF_CONFIG1
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_top_dispdec
+//DSC_TOP3_DSC_TOP_CONTROL
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+//DSC_TOP3_DSC_DEBUG_CONTROL
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON22_PERFCOUNTER_CNTL
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFCOUNTER_CNTL2
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFCOUNTER_STATE
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON22_PERFMON_CNTL
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON22_PERFMON_CNTL2
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON22_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON22_PERFMON_CVALUE_LOW
+#define DC_PERFMON22_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON22_PERFMON_HI
+#define DC_PERFMON22_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON22_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFMON_LOW
+#define DC_PERFMON22_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+//HPO_TOP_CLOCK_CONTROL
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_GATE_DIS__SHIFT 0x1
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_R_GATE_DIS__SHIFT 0x4
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_GATE_DIS__SHIFT 0x5
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_R_GATE_DIS__SHIFT 0x8
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_G_GATE_DIS__SHIFT 0x9
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_R_GATE_DIS__SHIFT 0xc
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_G_GATE_DIS__SHIFT 0xd
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_R_GATE_DIS__SHIFT 0x10
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_G_GATE_DIS__SHIFT 0x11
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_R_GATE_DIS__SHIFT 0x12
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_G_GATE_DIS__SHIFT 0x13
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_R_GATE_DIS__SHIFT 0x14
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_G_GATE_DIS__SHIFT 0x15
+#define HPO_TOP_CLOCK_CONTROL__HPO_TEST_CLK_SEL__SHIFT 0x18
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_GATE_DIS_MASK 0x00000002L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_R_GATE_DIS_MASK 0x00000010L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_GATE_DIS_MASK 0x00000020L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_R_GATE_DIS_MASK 0x00000100L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_G_GATE_DIS_MASK 0x00000200L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_R_GATE_DIS_MASK 0x00001000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_G_GATE_DIS_MASK 0x00002000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_R_GATE_DIS_MASK 0x00010000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_G_GATE_DIS_MASK 0x00020000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_R_GATE_DIS_MASK 0x00040000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_G_GATE_DIS_MASK 0x00080000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_R_GATE_DIS_MASK 0x00100000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_G_GATE_DIS_MASK 0x00200000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_TEST_CLK_SEL_MASK 0xFF000000L
+//HPO_TOP_HW_CONTROL
+#define HPO_TOP_HW_CONTROL__HPO_IO_EN__SHIFT 0x0
+#define HPO_TOP_HW_CONTROL__HPO_IO_EN_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_mapper_dispdec
+//DP_STREAM_MAPPER_CONTROL0
+#define DP_STREAM_MAPPER_CONTROL0__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL0__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+//DP_STREAM_MAPPER_CONTROL1
+#define DP_STREAM_MAPPER_CONTROL1__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL1__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+//DP_STREAM_MAPPER_CONTROL2
+#define DP_STREAM_MAPPER_CONTROL2__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL2__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+//DP_STREAM_MAPPER_CONTROL3
+#define DP_STREAM_MAPPER_CONTROL3__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL3__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON23_PERFCOUNTER_CNTL
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFCOUNTER_CNTL2
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFCOUNTER_STATE
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON23_PERFMON_CNTL
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON23_PERFMON_CNTL2
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON23_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON23_PERFMON_CVALUE_LOW
+#define DC_PERFMON23_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON23_PERFMON_HI
+#define DC_PERFMON23_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON23_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFMON_LOW
+#define DC_PERFMON23_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_hdmi_link_enc0_dispdec
+//HDMI_LINK_ENC_CONTROL
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_ENABLE__SHIFT 0x0
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_SOFT_RESET__SHIFT 0x4
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_ENABLE_MASK 0x00000001L
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_SOFT_RESET_MASK 0x00000010L
+//HDMI_LINK_ENC_CLK_CTRL
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_ON_HDMICHARCLK__SHIFT 0x1
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_ON_HDMICHARCLK_MASK 0x00000002L
+
+
+// addressBlock: dce_dc_hpo_hdmi_frl_enc0_dispdec
+//HDMI_FRL_ENC_CONFIG
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE_COUNT__SHIFT 0x0
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_TRAINING_ENABLE__SHIFT 0x1
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_SCRAMBLER_DISABLE__SHIFT 0x2
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE0_TRAINING_PATTERN__SHIFT 0x10
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE1_TRAINING_PATTERN__SHIFT 0x14
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE2_TRAINING_PATTERN__SHIFT 0x18
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE3_TRAINING_PATTERN__SHIFT 0x1c
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE_COUNT_MASK 0x00000001L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_TRAINING_ENABLE_MASK 0x00000002L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_SCRAMBLER_DISABLE_MASK 0x00000004L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE0_TRAINING_PATTERN_MASK 0x000F0000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE1_TRAINING_PATTERN_MASK 0x00F00000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE2_TRAINING_PATTERN_MASK 0x0F000000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE3_TRAINING_PATTERN_MASK 0xF0000000L
+//HDMI_FRL_ENC_CONFIG2
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE__SHIFT 0x0
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_THRESHOLD__SHIFT 0xc
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_CAL_EN__SHIFT 0x18
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_RC_COMPRESS_DISABLE__SHIFT 0x19
+#define HDMI_FRL_ENC_CONFIG2__HDMI_FRL_HDMISTREAMCLK_DB_SEL__SHIFT 0x1a
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_RESET__SHIFT 0x1c
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_EXCEED_STATUS__SHIFT 0x1d
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_METER_BUFFER_OVERFLOW_STATUS__SHIFT 0x1e
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_MASK 0x000001FFL
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_THRESHOLD_MASK 0x001FF000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_CAL_EN_MASK 0x01000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_RC_COMPRESS_DISABLE_MASK 0x02000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_FRL_HDMISTREAMCLK_DB_SEL_MASK 0x0C000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_RESET_MASK 0x10000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_EXCEED_STATUS_MASK 0x20000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_METER_BUFFER_OVERFLOW_STATUS_MASK 0x40000000L
+//HDMI_FRL_ENC_METER_BUFFER_STATUS
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_MAX_METER_BUFFER_LEVEL__SHIFT 0x0
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_METER_BUFFER_MAX_LEVEL_RESET__SHIFT 0x1f
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_MAX_METER_BUFFER_LEVEL_MASK 0x0000007FL
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_METER_BUFFER_MAX_LEVEL_RESET_MASK 0x80000000L
+//HDMI_FRL_ENC_MEM_CTRL
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_DIS__SHIFT 0x0
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_FORCE__SHIFT 0x1
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_STATE__SHIFT 0x4
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x8
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_DIS_MASK 0x00000001L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_FORCE_MASK 0x00000006L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_STATE_MASK 0x00000030L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dispdec
+//HDMI_STREAM_ENC_CLOCK_CONTROL
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_HDMISTREAMCLK__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_HDMISTREAMCLK_MASK 0x00001000L
+//HDMI_STREAM_ENC_INPUT_MUX_CONTROL
+#define HDMI_STREAM_ENC_INPUT_MUX_CONTROL__HDMI_STREAM_ENC_INPUT_MUX_SOURCE_SEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_INPUT_MUX_CONTROL__HDMI_STREAM_ENC_INPUT_MUX_SOURCE_SEL_MASK 0x00000007L
+//HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_PIXEL_ENCODING__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ODM_COMBINE_MODE__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_DSC_MODE__SHIFT 0x10
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_PIXEL_ENCODING_MASK 0x00000300L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ODM_COMBINE_MODE_MASK 0x00003000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_DSC_MODE_MASK 0x00030000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_START_LEVEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_CLOCK_SRC__SHIFT 0x5
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_PENDING__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_DISABLE__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_START_LEVEL_MASK 0x0000001FL
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_CLOCK_SRC_MASK 0x00000020L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_PENDING_MASK 0x00000100L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_DISABLE_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_afmt_afmt_dispdec
+//AFMT5_AFMT_ACP
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT5_AFMT_VBI_PACKET_CONTROL
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT5_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT5_AFMT_AUDIO_INFO0
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT5_AFMT_AUDIO_INFO1
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT5_AFMT_60958_0
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT5_AFMT_60958_1
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT5_AFMT_AUDIO_CRC_CONTROL
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT5_AFMT_RAMP_CONTROL0
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT5_AFMT_RAMP_CONTROL1
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT5_AFMT_RAMP_CONTROL2
+#define AFMT5_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT5_AFMT_RAMP_CONTROL3
+#define AFMT5_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT5_AFMT_60958_2
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT5_AFMT_AUDIO_CRC_RESULT
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT5_AFMT_STATUS
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT5_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT5_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT5_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT5_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT5_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT5_AFMT_INFOFRAME_CONTROL0
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT5_AFMT_AUDIO_SRC_CONTROL
+#define AFMT5_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT5_AFMT_MEM_PWR
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dme_dme_dispdec
+//DME5_DME_CONTROL
+#define DME5_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME5_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME5_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME5_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME5_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME5_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME5_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME5_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME5_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME5_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME5_DME_MEMORY_CONTROL
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_vpg_vpg_dispdec
+//VPG5_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG5_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG5_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG5_VPG_GENERIC_PACKET_DATA
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG5_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG5_VPG_GENERIC_STATUS
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG5_VPG_MEM_PWR
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG5_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG5_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG5_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG5_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG5_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG5_VPG_ISRC1_2_DATA
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG5_VPG_MPEG_INFO0
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG5_VPG_MPEG_INFO1
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_hdmi_tb_enc0_dispdec
+//HDMI_TB_ENC_CONTROL
+#define HDMI_TB_ENC_CONTROL__HDMI_TB_ENC_EN__SHIFT 0x0
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET__SHIFT 0x4
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_DONE__SHIFT 0x8
+#define HDMI_TB_ENC_CONTROL__HDMI_TB_ENC_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_MASK 0x00000010L
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_DONE_MASK 0x00000100L
+//HDMI_TB_ENC_PIXEL_FORMAT
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x8
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_PIXEL_ENCODING__SHIFT 0x10
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DSC_MODE__SHIFT 0x18
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_DEPTH_MASK 0x00000300L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_PIXEL_ENCODING_MASK 0x00030000L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DSC_MODE_MASK 0x03000000L
+//HDMI_TB_ENC_PACKET_CONTROL
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_PACKETS_PER_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_ISLANDS_PER_LINE__SHIFT 0x8
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_OVERFLOW__SHIFT 0xc
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_TB_ENC_PACKET_ERROR_CLEAR__SHIFT 0x10
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_PACKETS_PER_LINE_MASK 0x0000001FL
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_ISLANDS_PER_LINE_MASK 0x00000300L
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_OVERFLOW_MASK 0x00001000L
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_TB_ENC_PACKET_ERROR_CLEAR_MASK 0x00010000L
+//HDMI_TB_ENC_ACR_PACKET_CONTROL
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//HDMI_TB_ENC_VBI_PACKET_CONTROL1
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_LINE_REFERENCE__SHIFT 0x6
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_LINE_REFERENCE__SHIFT 0x9
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_REFERENCE__SHIFT 0xe
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_LINE_REFERENCE_MASK 0x00000040L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_LINE_REFERENCE_MASK 0x00000200L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_REFERENCE_MASK 0x00004000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x7FFF0000L
+//HDMI_TB_ENC_VBI_PACKET_CONTROL2
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ISRC_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ACP_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ISRC_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ACP_LINE_MASK 0x7FFF0000L
+//HDMI_TB_ENC_GC_CONTROL
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE__SHIFT 0x0
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+//HDMI_TB_ENC_GENERIC_PACKET_CONTROL0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LOCK_EN__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LOCK_EN__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LOCK_EN__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LOCK_EN__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LOCK_EN__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LOCK_EN__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LOCK_EN__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LOCK_EN__SHIFT 0x1e
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LOCK_EN_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LOCK_EN_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LOCK_EN_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LOCK_EN_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LOCK_EN_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LOCK_EN_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LOCK_EN_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x08000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LOCK_EN_MASK 0x40000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET_CONTROL1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LOCK_EN__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LOCK_EN__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LOCK_EN__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LOCK_EN__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LOCK_EN__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LOCK_EN__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LOCK_EN__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LOCK_EN_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LOCK_EN_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LOCK_EN_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LOCK_EN_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LOCK_EN_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LOCK_EN_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LOCK_EN_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x08000000L
+//HDMI_TB_ENC_GENERIC_PACKET_CONTROL2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//HDMI_TB_ENC_GENERIC_PACKET0_1_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET2_3_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET4_5_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET6_7_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET8_9_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET10_11_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET12_13_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET14_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_EMP_MASK 0x00008000L
+//HDMI_TB_ENC_DB_CONTROL
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define HDMI_TB_ENC_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define HDMI_TB_ENC_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+//HDMI_TB_ENC_ACR_32_0
+#define HDMI_TB_ENC_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//HDMI_TB_ENC_ACR_32_1
+#define HDMI_TB_ENC_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//HDMI_TB_ENC_ACR_44_0
+#define HDMI_TB_ENC_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//HDMI_TB_ENC_ACR_44_1
+#define HDMI_TB_ENC_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//HDMI_TB_ENC_ACR_48_0
+#define HDMI_TB_ENC_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//HDMI_TB_ENC_ACR_48_1
+#define HDMI_TB_ENC_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//HDMI_TB_ENC_ACR_STATUS_0
+#define HDMI_TB_ENC_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//HDMI_TB_ENC_ACR_STATUS_1
+#define HDMI_TB_ENC_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//HDMI_TB_ENC_BUFFER_CONTROL
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_EN__SHIFT 0x0
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_EN__SHIFT 0x1
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_MAX_MIN_LEVEL_RESET__SHIFT 0x4
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_LEVEL__SHIFT 0x8
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_LEVEL__SHIFT 0x18
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_EN_MASK 0x00000002L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_MAX_MIN_LEVEL_RESET_MASK 0x00000010L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_LEVEL_MASK 0x0000FF00L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_LEVEL_MASK 0x1F000000L
+//HDMI_TB_ENC_MEM_CTRL
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_DIS__SHIFT 0x0
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_FORCE__SHIFT 0x1
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_STATE__SHIFT 0x4
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x8
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_DIS_MASK 0x00000001L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_FORCE_MASK 0x00000006L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_STATE_MASK 0x00000030L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000300L
+//HDMI_TB_ENC_METADATA_PACKET_CONTROL
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//HDMI_TB_ENC_H_ACTIVE_BLANK
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_ACTIVE__SHIFT 0x0
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_BLANK__SHIFT 0x10
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_ACTIVE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_BLANK_MASK 0x7FFF0000L
+//HDMI_TB_ENC_HC_ACTIVE_BLANK
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_ACTIVE__SHIFT 0x0
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_BLANK__SHIFT 0x10
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_ACTIVE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_BLANK_MASK 0x7FFF0000L
+//HDMI_TB_ENC_CRC_CNTL
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_EN__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_CONT_EN__SHIFT 0x1
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_TYPE__SHIFT 0x8
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_SRC_SEL__SHIFT 0xa
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_EN__SHIFT 0x10
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_MODE__SHIFT 0x11
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_CONT_EN_MASK 0x00000002L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_TYPE_MASK 0x00000300L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_SRC_SEL_MASK 0x00000C00L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_EN_MASK 0x00010000L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_MODE_MASK 0x00060000L
+//HDMI_TB_ENC_CRC_RESULT_0
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE0__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE1__SHIFT 0x10
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE0_MASK 0x0000FFFFL
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE1_MASK 0xFFFF0000L
+//HDMI_TB_ENC_ENCRYPTION_CONTROL
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_WHEN_AVMUTE__SHIFT 0x4
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_WHEN_AVMUTE_MASK 0x00000010L
+//HDMI_TB_ENC_MODE
+#define HDMI_TB_ENC_MODE__HDMI_BORROW_MODE__SHIFT 0x0
+#define HDMI_TB_ENC_MODE__HDMI_SKIP_FIRST_HBLANK__SHIFT 0x8
+#define HDMI_TB_ENC_MODE__HDMI_BORROW_MODE_MASK 0x00000003L
+#define HDMI_TB_ENC_MODE__HDMI_SKIP_FIRST_HBLANK_MASK 0x00000100L
+//HDMI_TB_ENC_INPUT_FIFO_STATUS
+#define HDMI_TB_ENC_INPUT_FIFO_STATUS__INPUT_FIFO_ERROR__SHIFT 0x0
+#define HDMI_TB_ENC_INPUT_FIFO_STATUS__INPUT_FIFO_ERROR_MASK 0x00000001L
+//HDMI_TB_ENC_CRC_RESULT_1
+#define HDMI_TB_ENC_CRC_RESULT_1__CRC_TRIBYTE2__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_RESULT_1__CRC_TRIBYTE2_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dispdec
+//DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+//DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL
+#define DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL
+#define DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//DP_STREAM_ENC0_DP_STREAM_ENC_SPARE
+#define DP_STREAM_ENC0_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_apg_apg_dispdec
+//APG0_APG_CONTROL
+#define APG0_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG0_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG0_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG0_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+//APG0_APG_CONTROL2
+#define APG0_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG0_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG0_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG0_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG0_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG0_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+//APG0_APG_DBG_GEN_CONTROL
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//APG0_APG_PACKET_CONTROL
+#define APG0_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG0_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG0_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG0_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+//APG0_APG_AUDIO_CRC_CONTROL
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//APG0_APG_AUDIO_CRC_CONTROL2
+#define APG0_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+//APG0_APG_AUDIO_CRC_RESULT
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+//APG0_APG_STATUS
+#define APG0_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG0_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG0_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG0_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+//APG0_APG_STATUS2
+#define APG0_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG0_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+//APG0_APG_MEM_PWR
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG0_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG0_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+//APG0_APG_SPARE
+#define APG0_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG0_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dme_dme_dispdec
+//DME6_DME_CONTROL
+#define DME6_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME6_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME6_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME6_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME6_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME6_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME6_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME6_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME6_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME6_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME6_DME_MEMORY_CONTROL
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_vpg_vpg_dispdec
+//VPG6_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG6_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG6_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG6_VPG_GENERIC_PACKET_DATA
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG6_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG6_VPG_GENERIC_STATUS
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG6_VPG_MEM_PWR
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG6_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG6_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG6_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG6_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG6_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG6_VPG_ISRC1_2_DATA
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG6_VPG_MPEG_INFO0
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG6_VPG_MPEG_INFO1
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc0_dispdec
+//DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SPARE
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dispdec
+//DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+//DP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL
+#define DP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL
+#define DP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//DP_STREAM_ENC1_DP_STREAM_ENC_SPARE
+#define DP_STREAM_ENC1_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_apg_apg_dispdec
+//APG1_APG_CONTROL
+#define APG1_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG1_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG1_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG1_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+//APG1_APG_CONTROL2
+#define APG1_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG1_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG1_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG1_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG1_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG1_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+//APG1_APG_DBG_GEN_CONTROL
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//APG1_APG_PACKET_CONTROL
+#define APG1_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG1_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG1_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG1_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+//APG1_APG_AUDIO_CRC_CONTROL
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//APG1_APG_AUDIO_CRC_CONTROL2
+#define APG1_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+//APG1_APG_AUDIO_CRC_RESULT
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+//APG1_APG_STATUS
+#define APG1_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG1_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG1_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG1_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+//APG1_APG_STATUS2
+#define APG1_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG1_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+//APG1_APG_MEM_PWR
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG1_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG1_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+//APG1_APG_SPARE
+#define APG1_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG1_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dme_dme_dispdec
+//DME7_DME_CONTROL
+#define DME7_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME7_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME7_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME7_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME7_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME7_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME7_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME7_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME7_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME7_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME7_DME_MEMORY_CONTROL
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_vpg_vpg_dispdec
+//VPG7_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG7_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG7_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG7_VPG_GENERIC_PACKET_DATA
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG7_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG7_VPG_GENERIC_STATUS
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG7_VPG_MEM_PWR
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG7_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG7_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG7_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG7_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG7_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG7_VPG_ISRC1_2_DATA
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG7_VPG_MPEG_INFO0
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG7_VPG_MPEG_INFO1
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc1_dispdec
+//DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SPARE
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dispdec
+//DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+//DP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL
+#define DP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL
+#define DP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//DP_STREAM_ENC2_DP_STREAM_ENC_SPARE
+#define DP_STREAM_ENC2_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_apg_apg_dispdec
+//APG2_APG_CONTROL
+#define APG2_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG2_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG2_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG2_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+//APG2_APG_CONTROL2
+#define APG2_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG2_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG2_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG2_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG2_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG2_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+//APG2_APG_DBG_GEN_CONTROL
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//APG2_APG_PACKET_CONTROL
+#define APG2_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG2_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG2_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG2_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+//APG2_APG_AUDIO_CRC_CONTROL
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//APG2_APG_AUDIO_CRC_CONTROL2
+#define APG2_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+//APG2_APG_AUDIO_CRC_RESULT
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+//APG2_APG_STATUS
+#define APG2_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG2_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG2_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG2_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+//APG2_APG_STATUS2
+#define APG2_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG2_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+//APG2_APG_MEM_PWR
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG2_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG2_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+//APG2_APG_SPARE
+#define APG2_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG2_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dme_dme_dispdec
+//DME8_DME_CONTROL
+#define DME8_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME8_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME8_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME8_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME8_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME8_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME8_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME8_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME8_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME8_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME8_DME_MEMORY_CONTROL
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_vpg_vpg_dispdec
+//VPG8_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG8_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG8_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG8_VPG_GENERIC_PACKET_DATA
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG8_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG8_VPG_GENERIC_STATUS
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG8_VPG_MEM_PWR
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG8_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG8_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG8_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG8_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG8_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG8_VPG_ISRC1_2_DATA
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG8_VPG_MPEG_INFO0
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG8_VPG_MPEG_INFO1
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc2_dispdec
+//DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SPARE
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dispdec
+//DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+//DP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL
+#define DP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL
+#define DP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//DP_STREAM_ENC3_DP_STREAM_ENC_SPARE
+#define DP_STREAM_ENC3_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_apg_apg_dispdec
+//APG3_APG_CONTROL
+#define APG3_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG3_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG3_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG3_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+//APG3_APG_CONTROL2
+#define APG3_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG3_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG3_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG3_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG3_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG3_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+//APG3_APG_DBG_GEN_CONTROL
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//APG3_APG_PACKET_CONTROL
+#define APG3_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG3_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG3_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG3_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+//APG3_APG_AUDIO_CRC_CONTROL
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//APG3_APG_AUDIO_CRC_CONTROL2
+#define APG3_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+//APG3_APG_AUDIO_CRC_RESULT
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+//APG3_APG_STATUS
+#define APG3_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG3_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG3_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG3_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+//APG3_APG_STATUS2
+#define APG3_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG3_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+//APG3_APG_MEM_PWR
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG3_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG3_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+//APG3_APG_SPARE
+#define APG3_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG3_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dme_dme_dispdec
+//DME9_DME_CONTROL
+#define DME9_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME9_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME9_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME9_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME9_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME9_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME9_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME9_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME9_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME9_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME9_DME_MEMORY_CONTROL
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_vpg_vpg_dispdec
+//VPG9_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG9_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG9_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG9_VPG_GENERIC_PACKET_DATA
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG9_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG9_VPG_GENERIC_STATUS
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG9_VPG_MEM_PWR
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG9_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG9_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG9_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG9_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG9_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG9_VPG_ISRC1_2_DATA
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG9_VPG_MPEG_INFO0
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG9_VPG_MPEG_INFO1
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc3_dispdec
+//DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SPARE
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc0_dispdec
+//DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x4
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32_MASK 0x00000010L
+//DP_LINK_ENC0_DP_LINK_ENC_SPARE
+#define DP_LINK_ENC0_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE__SHIFT 0x0
+#define DP_LINK_ENC0_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym320_dispdec
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__MODE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__NUM_LANES__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__MODE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__NUM_LANES_MASK 0x00000300L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD_MASK 0x00001000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__STATUS__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RESET_STATUS__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__CURRENT_MODE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__STATUS_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RESET_STATUS_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__CURRENT_MODE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED_MASK 0x00000100L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING_MASK 0x00001000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING_MASK 0x00030000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE_MASK 0x00000003L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3__SHIFT 0x18
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3__SHIFT 0x1c
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0_MASK 0x00000070L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1_MASK 0x00000700L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1_MASK 0x00007000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2_MASK 0x00070000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2_MASK 0x00700000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3_MASK 0x07000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3_MASK 0x70000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH_MASK 0x000000FFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR__SHIFT 0x3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION__SHIFT 0x6
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW__SHIFT 0x7
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR_MASK 0x00000008L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION_MASK 0x00000040L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW_MASK 0x00000080L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR_MASK 0x00000100L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE__SHIFT 0xa
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE__SHIFT 0x12
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE__SHIFT 0x18
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE__SHIFT 0x1a
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL__SHIFT 0x1c
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE_MASK 0x00000003L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL_MASK 0x000000F0L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE_MASK 0x00000300L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE_MASK 0x00000400L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL_MASK 0x0000F000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE_MASK 0x00030000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE_MASK 0x00040000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL_MASK 0x00F00000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE_MASK 0x03000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE_MASK 0x04000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL_MASK 0xF0000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT_MASK 0x0000FFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET__SHIFT 0x3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET_MASK 0x00000008L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE__SHIFT 0x6
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT__SHIFT 0x11
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT__SHIFT 0x15
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE_MASK 0x000000C0L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE_MASK 0x00003F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF_MASK 0x00010000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT_MASK 0x000E0000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS_MASK 0x00100000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT_MASK 0x00600000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS_MASK 0xFFFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE_MASK 0x00FFFF00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc1_dispdec
+//DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x4
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32_MASK 0x00000010L
+//DP_LINK_ENC1_DP_LINK_ENC_SPARE
+#define DP_LINK_ENC1_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE__SHIFT 0x0
+#define DP_LINK_ENC1_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym321_dispdec
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__MODE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__NUM_LANES__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__MODE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__NUM_LANES_MASK 0x00000300L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD_MASK 0x00001000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__STATUS__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RESET_STATUS__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__CURRENT_MODE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__STATUS_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RESET_STATUS_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__CURRENT_MODE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED_MASK 0x00000100L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING_MASK 0x00001000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING_MASK 0x00030000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE_MASK 0x00000003L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3__SHIFT 0x18
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3__SHIFT 0x1c
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0_MASK 0x00000070L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1_MASK 0x00000700L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1_MASK 0x00007000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2_MASK 0x00070000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2_MASK 0x00700000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3_MASK 0x07000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3_MASK 0x70000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH_MASK 0x000000FFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR__SHIFT 0x3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION__SHIFT 0x6
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW__SHIFT 0x7
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR_MASK 0x00000008L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION_MASK 0x00000040L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW_MASK 0x00000080L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR_MASK 0x00000100L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE__SHIFT 0xa
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE__SHIFT 0x12
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE__SHIFT 0x18
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE__SHIFT 0x1a
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL__SHIFT 0x1c
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE_MASK 0x00000003L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL_MASK 0x000000F0L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE_MASK 0x00000300L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE_MASK 0x00000400L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL_MASK 0x0000F000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE_MASK 0x00030000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE_MASK 0x00040000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL_MASK 0x00F00000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE_MASK 0x03000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE_MASK 0x04000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL_MASK 0xF0000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT_MASK 0x0000FFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET__SHIFT 0x3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET_MASK 0x00000008L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE__SHIFT 0x6
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT__SHIFT 0x11
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT__SHIFT 0x15
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE_MASK 0x000000C0L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE_MASK 0x00003F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF_MASK 0x00010000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT_MASK 0x000E0000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS_MASK 0x00100000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT_MASK 0x00600000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS_MASK 0xFFFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE_MASK 0x00FFFF00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchvm_hvm_dispdec
+//DCHVM_CTRL0
+#define DCHVM_CTRL0__HOSTVM_INIT_REQ__SHIFT 0x0
+#define DCHVM_CTRL0__HOSTVM_INIT_REQ_MASK 0x00000001L
+//DCHVM_CTRL1
+#define DCHVM_CTRL1__DUMMY1__SHIFT 0x0
+#define DCHVM_CTRL1__DUMMY1_MASK 0xFFFFFFFFL
+//DCHVM_CLK_CTRL
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_G_GATE_DIS__SHIFT 0x1
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_R_GATE_DIS__SHIFT 0x4
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_G_GATE_DIS__SHIFT 0x5
+#define DCHVM_CLK_CTRL__TR_REQ_REQCLKREQ_MODE__SHIFT 0x8
+#define DCHVM_CLK_CTRL__TW_RSP_COMPCLKREQ_MODE__SHIFT 0xa
+#define DCHVM_CLK_CTRL__HVM_FGCG_REP_DIS__SHIFT 0xc
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_G_GATE_DIS_MASK 0x00000002L
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_R_GATE_DIS_MASK 0x00000010L
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_G_GATE_DIS_MASK 0x00000020L
+#define DCHVM_CLK_CTRL__TR_REQ_REQCLKREQ_MODE_MASK 0x00000300L
+#define DCHVM_CLK_CTRL__TW_RSP_COMPCLKREQ_MODE_MASK 0x00000C00L
+#define DCHVM_CLK_CTRL__HVM_FGCG_REP_DIS_MASK 0x00001000L
+//DCHVM_MEM_CTRL
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_PWR_REQ_DIS__SHIFT 0x0
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_FORCE_REQ__SHIFT 0x2
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_POWER_STATUS__SHIFT 0x4
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_PWR_REQ_DIS_MASK 0x00000001L
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_FORCE_REQ_MASK 0x0000000CL
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_POWER_STATUS_MASK 0x00000030L
+//DCHVM_RIOMMU_CTRL0
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_PREFETCH_REQ__SHIFT 0x0
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_POWERSTATUS__SHIFT 0x1
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_PREFETCH_REQ_MASK 0x00000001L
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_POWERSTATUS_MASK 0x00000002L
+//DCHVM_RIOMMU_STAT0
+#define DCHVM_RIOMMU_STAT0__RIOMMU_ACTIVE__SHIFT 0x0
+#define DCHVM_RIOMMU_STAT0__HOSTVM_PREFETCH_DONE__SHIFT 0x1
+#define DCHVM_RIOMMU_STAT0__RIOMMU_ACTIVE_MASK 0x00000001L
+#define DCHVM_RIOMMU_STAT0__HOSTVM_PREFETCH_DONE_MASK 0x00000002L
+
+
+// addressBlock: dce_dc_dlpc_dlpc_dispdec
+//DLPC_ENABLE
+#define DLPC_ENABLE__DLPC_EN__SHIFT 0x0
+#define DLPC_ENABLE__PWRUP_TRIGGER_EN__SHIFT 0x4
+#define DLPC_ENABLE__PWRUP_TRIGGER_CLR__SHIFT 0x5
+#define DLPC_ENABLE__PWRUP_TRIGGER_STATUS__SHIFT 0x6
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_EN__SHIFT 0x8
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_CLR__SHIFT 0x9
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_STATUS__SHIFT 0xa
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_EN__SHIFT 0xc
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_CLR__SHIFT 0xd
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_STATUS__SHIFT 0xe
+#define DLPC_ENABLE__DLPC_EN_MASK 0x00000001L
+#define DLPC_ENABLE__PWRUP_TRIGGER_EN_MASK 0x00000010L
+#define DLPC_ENABLE__PWRUP_TRIGGER_CLR_MASK 0x00000020L
+#define DLPC_ENABLE__PWRUP_TRIGGER_STATUS_MASK 0x00000040L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_EN_MASK 0x00000100L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_CLR_MASK 0x00000200L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_STATUS_MASK 0x00000400L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_EN_MASK 0x00001000L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_CLR_MASK 0x00002000L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_STATUS_MASK 0x00004000L
+//DLPC_CURRENT_COUNT
+#define DLPC_CURRENT_COUNT__VALUE__SHIFT 0x0
+#define DLPC_CURRENT_COUNT__VALUE_MASK 0xFFFFFFFFL
+//DLPC_OPTC_SNAPSHOT
+#define DLPC_OPTC_SNAPSHOT__VALUE__SHIFT 0x0
+#define DLPC_OPTC_SNAPSHOT__VALUE_MASK 0xFFFFFFFFL
+//DLPC_PWRUP
+#define DLPC_PWRUP__VALUE__SHIFT 0x0
+#define DLPC_PWRUP__VALUE_MASK 0xFFFFFFFFL
+//DLPC_OTG_RESYNC
+#define DLPC_OTG_RESYNC__VALUE__SHIFT 0x0
+#define DLPC_OTG_RESYNC__VALUE_MASK 0xFFFFFFFFL
+//DLPC_DCN_ZSC_LONO_PWRUP
+#define DLPC_DCN_ZSC_LONO_PWRUP__VALUE__SHIFT 0x0
+#define DLPC_DCN_ZSC_LONO_PWRUP__VALUE_MASK 0xFFFFFFFFL
+//DLPC_SPARE
+#define DLPC_SPARE__SPARE__SHIFT 0x0
+#define DLPC_SPARE__SPARE_MASK 0xFFFFFFFFL
+//DLPC_COUNTER_INIT_VALUE
+#define DLPC_COUNTER_INIT_VALUE__DLPC_COUNTER_INIT_VALUE__SHIFT 0x0
+#define DLPC_COUNTER_INIT_VALUE__DLPC_COUNTER_INIT_VALUE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dpia_dpia_mu0_dpiadec
+//DPIA_MU_CLOCK_CTRL
+#define DPIA_MU_CLOCK_CTRL__DPIA_REFCLK_GATE_DIS__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL__DPIA_CIO_CLKS_GATE_DIS__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_REFCLK_R_GATE_DIS__SHIFT 0x8
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_ML_SSCLK_G_GATE_DIS__SHIFT 0xb
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXIN_SSCLK_G_GATE_DIS__SHIFT 0xc
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXOUT_SSCLK_G_GATE_DIS__SHIFT 0xd
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TMUCLK_G_GATE_DIS__SHIFT 0xe
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TEST_CLK_SEL__SHIFT 0x18
+#define DPIA_MU_CLOCK_CTRL__DPIA_REFCLK_GATE_DIS_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL__DPIA_CIO_CLKS_GATE_DIS_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_REFCLK_R_GATE_DIS_MASK 0x00000100L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_ML_SSCLK_G_GATE_DIS_MASK 0x00000800L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXIN_SSCLK_G_GATE_DIS_MASK 0x00001000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXOUT_SSCLK_G_GATE_DIS_MASK 0x00002000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TMUCLK_G_GATE_DIS_MASK 0x00004000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TEST_CLK_SEL_MASK 0xFF000000L
+//DPIA_MU_CLOCK_CTRL_DPIA_PORT0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+//DPIA_MU_RESET_CTRL_DPIA_PORT0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_RESET_DONE_MASK 0x00000200L
+//DPIA_MU_CLOCK_CTRL_DPIA_PORT1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+//DPIA_MU_RESET_CTRL_DPIA_PORT1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_RESET_DONE_MASK 0x00000200L
+//DPIA_MU_CLOCK_CTRL_DPIA_PORT2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+//DPIA_MU_RESET_CTRL_DPIA_PORT2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_RESET_DONE_MASK 0x00000200L
+//DPIA_MU_CLOCK_CTRL_DPIA_PORT3
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+//DPIA_MU_RESET_CTRL_DPIA_PORT3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_RESET_DONE_MASK 0x00000200L
+//DPIA_MU_TPI_STATUS_DPIA_PORT0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+//DPIA_MU_TPI_STATUS_DPIA_PORT1
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+//DPIA_MU_TPI_STATUS_DPIA_PORT2
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+//DPIA_MU_TPI_STATUS_DPIA_PORT3
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+//DPIA_MU_TPI_MAX_CREDIT_COUNT
+#define DPIA_MU_TPI_MAX_CREDIT_COUNT__DPIA_TPI_MAX_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_MAX_CREDIT_COUNT__DPIA_TPI_MAX_CREDIT_COUNT_MASK 0x0000003FL
+//DPIA_MU_INTERRUPT_STATUS
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT0_INT_STATUS__SHIFT 0x0
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT1_INT_STATUS__SHIFT 0x3
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT2_INT_STATUS__SHIFT 0x6
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT3_INT_STATUS__SHIFT 0x9
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_MU_LOCAL_INT_STATUS__SHIFT 0x1f
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT0_INT_STATUS_MASK 0x00000007L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT1_INT_STATUS_MASK 0x00000038L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT2_INT_STATUS_MASK 0x000001C0L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT3_INT_STATUS_MASK 0x00000E00L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_MU_LOCAL_INT_STATUS_MASK 0x80000000L
+//DPIA_MU_INTERRUPT_CTRL
+#define DPIA_MU_INTERRUPT_CTRL__DPIA_DCN_INT_EN__SHIFT 0x0
+#define DPIA_MU_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_MASK__SHIFT 0x18
+#define DPIA_MU_INTERRUPT_CTRL__DPIA_DCN_INT_EN_MASK 0x00000001L
+#define DPIA_MU_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_MASK_MASK 0x01000000L
+//DPIA_MU_LOCAL_INTERRUPT_CTRL
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_STATUS__SHIFT 0x0
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_ADDR__SHIFT 0x2
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_OP__SHIFT 0x14
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__DPIA_RST_DONE_INT_STATUS__SHIFT 0x18
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_STATUS_MASK 0x00000001L
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_ADDR_MASK 0x000FFFFCL
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_OP_MASK 0x00100000L
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__DPIA_RST_DONE_INT_STATUS_MASK 0x01000000L
+//DPIA_MU_LOCAL_INTERRUPT_ACK
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__RBBMIF_TIMEOUT_INT_ACK__SHIFT 0x0
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__DPIA_RST_DONE_INT_ACK__SHIFT 0x1
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__RBBMIF_TIMEOUT_INT_ACK_MASK 0x00000001L
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__DPIA_RST_DONE_INT_ACK_MASK 0x00000002L
+//DPIA_MU_RBBMIF_TIMEOUT_CTRL
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_HOLD__SHIFT 0x14
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_DELAY_MASK 0x000FFFFFL
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_HOLD_MASK 0xFFF00000L
+//DPIA_MU_RBBMIF_TIMEOUT_CTRL2
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL2__RBBMIF_TIMEOUT_DIS__SHIFT 0x0
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL2__RBBMIF_TIMEOUT_DIS_MASK 0x00000001L
+//DPIA_MU_RBBMIF_STATUS
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_FLAG__SHIFT 0x0
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_TYPE__SHIFT 0x1
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_ADDR__SHIFT 0x5
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_TIMEOUT_STATUS_READBACK__SHIFT 0x18
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_STATUS_CLEAR__SHIFT 0x1f
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_FLAG_MASK 0x00000001L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_TYPE_MASK 0x00000006L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_ADDR_MASK 0x007FFFE0L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_TIMEOUT_STATUS_READBACK_MASK 0x01000000L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_STATUS_CLEAR_MASK 0x80000000L
+//DPIA_MU_MICROSECOND_REF_CTRL
+#define DPIA_MU_MICROSECOND_REF_CTRL__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define DPIA_MU_MICROSECOND_REF_CTRL__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+//DPIA_MU_PORT_ADP_STATUS
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT0_HIDDEN_STATUS__SHIFT 0x0
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT1_HIDDEN_STATUS__SHIFT 0x1
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT2_HIDDEN_STATUS__SHIFT 0x2
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT3_HIDDEN_STATUS__SHIFT 0x3
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT0_HIDDEN_STATUS_MASK 0x00000001L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT1_HIDDEN_STATUS_MASK 0x00000002L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT2_HIDDEN_STATUS_MASK 0x00000004L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT3_HIDDEN_STATUS_MASK 0x00000008L
+//DPIA_GLUE_CTRL
+#define DPIA_GLUE_CTRL__DPIA_IO_EN__SHIFT 0x0
+#define DPIA_GLUE_CTRL__DPIA_IO_EN_MASK 0x00000001L
+//DPIA_DEBUG_CONTROL
+//DPIA_PERF_COUNT_CONTROL0
+#define DPIA_PERF_COUNT_CONTROL0__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL0__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL0__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL0__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL0__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL0__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL0__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL0__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL0__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL0__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL0__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL0__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL1
+#define DPIA_PERF_COUNT_CONTROL1__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL1__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL1__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL1__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL1__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL1__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL1__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL1__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL1__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL1__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL1__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL1__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL2
+#define DPIA_PERF_COUNT_CONTROL2__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL2__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL2__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL2__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL2__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL2__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL2__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL2__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL2__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL2__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL2__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL2__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL3
+#define DPIA_PERF_COUNT_CONTROL3__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL3__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL3__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL3__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL3__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL3__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL3__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL3__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL3__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL3__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL3__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL3__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL4
+#define DPIA_PERF_COUNT_CONTROL4__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL4__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL4__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL4__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL4__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL4__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL4__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL4__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL4__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL4__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL4__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL4__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL5
+#define DPIA_PERF_COUNT_CONTROL5__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL5__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL5__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL5__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL5__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL5__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL5__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL5__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL5__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL5__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL5__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL5__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_INDEX
+#define DPIA_PERF_COUNT_INDEX__COUNTER_SELECT__SHIFT 0x0
+#define DPIA_PERF_COUNT_INDEX__MEAS_SELECT__SHIFT 0x4
+#define DPIA_PERF_COUNT_INDEX__COUNTER_SELECT_MASK 0x00000007L
+#define DPIA_PERF_COUNT_INDEX__MEAS_SELECT_MASK 0x00000070L
+//DPIA_PERF_COUNT_DATA_LO
+#define DPIA_PERF_COUNT_DATA_LO__VALUE__SHIFT 0x0
+#define DPIA_PERF_COUNT_DATA_LO__VALUE_MASK 0xFFFFFFFFL
+//DPIA_MU_SPARE
+#define DPIA_MU_SPARE__DPIA_MU_SPARE__SHIFT 0x0
+#define DPIA_MU_SPARE__DPIA_MU_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azendpoint_f2codecind
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R_MASK 0x00008000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC_MASK 0x0000007FL
+//AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE_MASK 0x00000080L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000C0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO_MASK 0x0000FC00L
+//AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT_MASK 0x00000080L
+//AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__SUPPORTS_AI__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_INDEX_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__SUPPORTS_AI_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_PACKET_ENABLE_MASK 0x00000080L
+//AZALIA_F2_CODEC_PIN_CONTROL_ACP_DATA
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_DATA__ACP_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_DATA__ACP_DATA_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_PIN_CONTROL_HBR
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azendpoint_descriptorind
+//AUDIO_DESCRIPTOR0
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR1
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR2
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR3
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR4
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR5
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR6
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR7
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR8
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR9
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR10
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR11
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR12
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR13
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+
+
+// addressBlock: azendpoint_sinkinfoind
+//AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID_MASK 0x0000FFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID_MASK 0x0000FFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PORTID0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PORTID1
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID_MASK 0xFFFFFFFFL
+//SINK_DESCRIPTION0
+#define SINK_DESCRIPTION0__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION0__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION1
+#define SINK_DESCRIPTION1__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION1__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION2
+#define SINK_DESCRIPTION2__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION2__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION3
+#define SINK_DESCRIPTION3__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION3__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION4
+#define SINK_DESCRIPTION4__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION4__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION5
+#define SINK_DESCRIPTION5__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION5__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION6
+#define SINK_DESCRIPTION6__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION6__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION7
+#define SINK_DESCRIPTION7__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION7__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION8
+#define SINK_DESCRIPTION8__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION8__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION9
+#define SINK_DESCRIPTION9__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION9__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION10
+#define SINK_DESCRIPTION10__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION10__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION11
+#define SINK_DESCRIPTION11__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION11__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION12
+#define SINK_DESCRIPTION12__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION12__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION13
+#define SINK_DESCRIPTION13__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION13__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION14
+#define SINK_DESCRIPTION14__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION14__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION15
+#define SINK_DESCRIPTION15__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION15__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION16
+#define SINK_DESCRIPTION16__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION16__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION17
+#define SINK_DESCRIPTION17__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION17__DESCRIPTION_MASK 0x000000FFL
+
+
+// addressBlock: azf0controller_azinputcrc0resultind
+//AZALIA_INPUT_CRC0_CHANNEL0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL1
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL2
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL3
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL4
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL5
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL6
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL7
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azinputcrc1resultind
+//AZALIA_INPUT_CRC1_CHANNEL0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL1
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL2
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL3
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL4
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL5
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL6
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL7
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azcrc0resultind
+//AZALIA_CRC0_CHANNEL0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL1
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL2
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL3
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL4
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL5
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL6
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL7
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azcrc1resultind
+//AZALIA_CRC1_CHANNEL0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL1
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL2
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL3
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL4
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL5
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL6
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL7
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azinputendpoint_f2codecind
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000C0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+
+
+// addressBlock: azroot_f2codecind
+//AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+
+
+// addressBlock: azf0stream0_streamind
+//AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream1_streamind
+//AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream2_streamind
+//AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream3_streamind
+//AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream4_streamind
+//AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream5_streamind
+//AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream6_streamind
+//AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream7_streamind
+//AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream8_streamind
+//AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream9_streamind
+//AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream10_streamind
+//AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream11_streamind
+//AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream12_streamind
+//AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream13_streamind
+//AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream14_streamind
+//AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream15_streamind
+//AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0endpoint0_endpointind
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint1_endpointind
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint2_endpointind
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint3_endpointind
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint4_endpointind
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint5_endpointind
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint6_endpointind
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint7_endpointind
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint2_inputendpointind
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint3_inputendpointind
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint4_inputendpointind
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint5_inputendpointind
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint6_inputendpointind
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint7_inputendpointind
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
index 15e5a65cf492..70ee6be94a9b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
@@ -9776,6 +9776,14 @@
#define regDIG0_DIG_BE_CNTL_BASE_IDX 2
#define regDIG0_DIG_BE_EN_CNTL 0x20bd
#define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG0_HDCP_INT_CONTROL 0x20c0
+#define regDIG0_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG0_HDCP_LINK0_STATUS 0x20c1
+#define regDIG0_HDCP_LINK0_STATUS_BASE_IDX 2
+#define regDIG0_HDCP_I2C_CONTROL_0 0x20c2
+#define regDIG0_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG0_HDCP_I2C_CONTROL_1 0x20c3
+#define regDIG0_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG0_TMDS_CNTL 0x20e4
#define regDIG0_TMDS_CNTL_BASE_IDX 2
#define regDIG0_TMDS_CONTROL_CHAR 0x20e5
@@ -10081,6 +10089,12 @@
#define regDIG1_DIG_BE_CNTL_BASE_IDX 2
#define regDIG1_DIG_BE_EN_CNTL 0x21e1
#define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG1_HDCP_INT_CONTROL 0x21e4
+#define regDIG1_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG1_HDCP_I2C_CONTROL_0 0x21e6
+#define regDIG1_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG1_HDCP_I2C_CONTROL_1 0x21e7
+#define regDIG1_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG1_TMDS_CNTL 0x2208
#define regDIG1_TMDS_CNTL_BASE_IDX 2
#define regDIG1_TMDS_CONTROL_CHAR 0x2209
@@ -10386,6 +10400,12 @@
#define regDIG2_DIG_BE_CNTL_BASE_IDX 2
#define regDIG2_DIG_BE_EN_CNTL 0x2305
#define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG2_HDCP_INT_CONTROL 0x2308
+#define regDIG2_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG2_HDCP_I2C_CONTROL_0 0x230a
+#define regDIG2_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG2_HDCP_I2C_CONTROL_1 0x230b
+#define regDIG2_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG2_TMDS_CNTL 0x232c
#define regDIG2_TMDS_CNTL_BASE_IDX 2
#define regDIG2_TMDS_CONTROL_CHAR 0x232d
@@ -10691,6 +10711,12 @@
#define regDIG3_DIG_BE_CNTL_BASE_IDX 2
#define regDIG3_DIG_BE_EN_CNTL 0x2429
#define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG3_HDCP_INT_CONTROL 0x242c
+#define regDIG3_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG3_HDCP_I2C_CONTROL_0 0x242e
+#define regDIG3_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG3_HDCP_I2C_CONTROL_1 0x242f
+#define regDIG3_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG3_TMDS_CNTL 0x2450
#define regDIG3_TMDS_CNTL_BASE_IDX 2
#define regDIG3_TMDS_CONTROL_CHAR 0x2451
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
index 5d9d5fea6e06..e3d841b2e9af 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
@@ -2847,6 +2847,14 @@
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x1
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x2
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x4
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x5
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x6
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x8
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x9
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xa
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0xb
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0xc
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0xd
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xe
@@ -2871,6 +2879,14 @@
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000002L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000004L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000010L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000020L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000040L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000100L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000200L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000400L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000800L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00001000L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00002000L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00004000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
index abdb8728156e..d6c02cf815be 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
@@ -9478,6 +9478,8 @@
#define regRLC_GFX_IMU_CMD_BASE_IDX 1
#define regGFX_IMU_RLC_STATUS 0x4054
#define regGFX_IMU_RLC_STATUS_BASE_IDX 1
+#define regGFX_IMU_STATUS 0x4055
+#define regGFX_IMU_STATUS_BASE_IDX 1
#define regGFX_IMU_SOC_DATA 0x4059
#define regGFX_IMU_SOC_DATA_BASE_IDX 1
#define regGFX_IMU_SOC_ADDR 0x405a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
index 2bd9f3f1026f..0122a21c50cf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
@@ -2261,11 +2261,13 @@
#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x3
#define SH_MEM_CONFIG__F8_MODE__SHIFT 0x8
+#define SH_MEM_CONFIG__PRECISION_MODE__SHIFT 0x9
#define SH_MEM_CONFIG__RETRY_DISABLE__SHIFT 0xc
#define SH_MEM_CONFIG__PRIVATE_NV__SHIFT 0xd
#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x00000018L
#define SH_MEM_CONFIG__F8_MODE_MASK 0x00000100L
+#define SH_MEM_CONFIG__PRECISION_MODE_MASK 0x00000200L
#define SH_MEM_CONFIG__RETRY_DISABLE_MASK 0x00001000L
#define SH_MEM_CONFIG__PRIVATE_NV_MASK 0x00002000L
//SP_MFMA_PORTD_RD_CONFIG
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
index c75aee25619e..6f44345277af 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
@@ -1779,6 +1779,8 @@
#define mmRLC_TTOP_D 0x3105
#define mmRLC_CLEAR_STATE_RESTORE_BASE 0x30C8
#define mmRLC_PG_AO_CU_MASK 0x310B
+#define mmSPI_STATIC_THREAD_MGMT_1 0x2438
+#define mmSPI_STATIC_THREAD_MGMT_2 0x2439
#define mmSPI_STATIC_THREAD_MGMT_3 0x243A
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
index c488d4a50cf4..b2252deabc17 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
@@ -203,6 +203,10 @@
#define mmDAGB0_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB0_WR_MISC_CREDIT 0x0058
#define mmDAGB0_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE 0x005b
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x005c
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB0_WRCLI_ASK_PENDING 0x005d
#define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB0_WRCLI_GO_PENDING 0x005e
@@ -455,6 +459,10 @@
#define mmDAGB1_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB1_WR_MISC_CREDIT 0x00d8
#define mmDAGB1_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE 0x00db
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x00dc
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB1_WRCLI_ASK_PENDING 0x00dd
#define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB1_WRCLI_GO_PENDING 0x00de
@@ -707,6 +715,10 @@
#define mmDAGB2_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB2_WR_MISC_CREDIT 0x0158
#define mmDAGB2_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE 0x015b
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x015c
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB2_WRCLI_ASK_PENDING 0x015d
#define mmDAGB2_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB2_WRCLI_GO_PENDING 0x015e
@@ -959,6 +971,10 @@
#define mmDAGB3_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB3_WR_MISC_CREDIT 0x01d8
#define mmDAGB3_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE 0x01db
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x01dc
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB3_WRCLI_ASK_PENDING 0x01dd
#define mmDAGB3_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB3_WRCLI_GO_PENDING 0x01de
@@ -1211,6 +1227,10 @@
#define mmDAGB4_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB4_WR_MISC_CREDIT 0x0258
#define mmDAGB4_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE 0x025b
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x025c
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB4_WRCLI_ASK_PENDING 0x025d
#define mmDAGB4_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB4_WRCLI_GO_PENDING 0x025e
@@ -4793,6 +4813,10 @@
#define mmDAGB5_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB5_WR_MISC_CREDIT 0x3058
#define mmDAGB5_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE 0x305b
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x305c
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB5_WRCLI_ASK_PENDING 0x305d
#define mmDAGB5_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB5_WRCLI_GO_PENDING 0x305e
@@ -5045,6 +5069,10 @@
#define mmDAGB6_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB6_WR_MISC_CREDIT 0x30d8
#define mmDAGB6_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE 0x30db
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x30dc
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB6_WRCLI_ASK_PENDING 0x30dd
#define mmDAGB6_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB6_WRCLI_GO_PENDING 0x30de
@@ -5297,6 +5325,10 @@
#define mmDAGB7_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB7_WR_MISC_CREDIT 0x3158
#define mmDAGB7_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE 0x315b
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x315c
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB7_WRCLI_ASK_PENDING 0x315d
#define mmDAGB7_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB7_WRCLI_GO_PENDING 0x315e
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 2969fbf282b7..5069d2fd467f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -1532,6 +1532,12 @@
//DAGB0_WRCLI_DBUS_GO_PENDING
#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB0_DAGB_DLY
#define DAGB0_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB0_DAGB_DLY__CLI__SHIFT 0x8
@@ -3207,6 +3213,12 @@
//DAGB1_WRCLI_DBUS_GO_PENDING
#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB1_DAGB_DLY
#define DAGB1_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB1_DAGB_DLY__CLI__SHIFT 0x8
@@ -4882,6 +4894,12 @@
//DAGB2_WRCLI_DBUS_GO_PENDING
#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB2_DAGB_DLY
#define DAGB2_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB2_DAGB_DLY__CLI__SHIFT 0x8
@@ -6557,6 +6575,12 @@
//DAGB3_WRCLI_DBUS_GO_PENDING
#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB3_DAGB_DLY
#define DAGB3_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB3_DAGB_DLY__CLI__SHIFT 0x8
@@ -8232,6 +8256,12 @@
//DAGB4_WRCLI_DBUS_GO_PENDING
#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB4_DAGB_DLY
#define DAGB4_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB4_DAGB_DLY__CLI__SHIFT 0x8
@@ -28737,6 +28767,12 @@
//DAGB5_WRCLI_DBUS_GO_PENDING
#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB5_DAGB_DLY
#define DAGB5_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB5_DAGB_DLY__CLI__SHIFT 0x8
@@ -30412,6 +30448,12 @@
//DAGB6_WRCLI_DBUS_GO_PENDING
#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB6_DAGB_DLY
#define DAGB6_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB6_DAGB_DLY__CLI__SHIFT 0x8
@@ -32087,6 +32129,12 @@
//DAGB7_WRCLI_DBUS_GO_PENDING
#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB7_DAGB_DLY
#define DAGB7_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB7_DAGB_DLY__CLI__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
index edc8a793a95d..4dd386b98748 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
@@ -234,6 +234,26 @@
#define mmIH_RB_WPTR_ADDR_HI 0x0F84
#define mmIH_RB_WPTR_ADDR_LO 0x0F85
#define mmIH_STATUS 0x0F88
+
+#define mmDMA_GFX_RB_CNTL 0x3400
+#define mmDMA_GFX_RB_BASE 0x3401
+#define mmDMA_GFX_RB_RPTR 0x3402
+#define mmDMA_GFX_RB_WPTR 0x3403
+#define mmDMA_GFX_RB_RPTR_ADDR_HI 0x3407
+#define mmDMA_GFX_RB_RPTR_ADDR_LO 0x3408
+#define mmDMA_GFX_IB_CNTL 0x3409
+#define mmDMA_GFX_IB_RPTR 0x340a
+#define mmDMA_CNTL 0x340b
+#define mmDMA_STATUS_REG 0x340D
+#define mmDMA_TILING_CONFIG 0x342E
+#define mmDMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
+#define mmDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
+#define mmDMA_POWER_CNTL 0x342F
+#define mmDMA_CLK_CTRL 0x3430
+#define mmDMA_PG 0x3435
+#define mmDMA_PGFSM_CONFIG 0x3436
+#define mmDMA_PGFSM_WRITE 0x3437
+
#define mmSEM_MAILBOX 0x0F9B
#define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A
#define mmSEM_MAILBOX_CONTROL 0x0F9C
@@ -269,7 +289,4 @@
#define mmVCE_CONFIG 0x0F94
#define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8
-/* from the old sid.h */
-#define mmDMA_TILING_CONFIG 0x342E
-
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
index 1c540fe136cb..9f7fc2428b69 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
@@ -823,6 +823,43 @@
#define LX3__RESERVED__SHIFT 0x00000000
#define RINGOSC_MASK__MASK_MASK 0x0000ffffL
#define RINGOSC_MASK__MASK__SHIFT 0x00000000
+
+#define DMA_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define DMA_CNTL__TRAP_ENABLE__SHIFT 0x00000000
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE_MASK 0x00000002L
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE__SHIFT 0x00000001
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x00000002
+#define DMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define DMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x00000003
+#define DMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x0000001C
+#define DMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x00000001
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x00000009
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0x0000000C
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0x0000000D
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x00000010
+#define DMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK 0x80000000L
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE__SHIFT 0x0000001F
+
+#define DMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define DMA_STATUS_REG__IDLE__SHIFT 0x00000000
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x00000008
+#define DMA_PG__PG_CNTL_ENABLE_MASK 0x00000001L
+#define DMA_PG__PG_CNTL_ENABLE__SHIFT 0x00000000
+
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x00000000
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
@@ -1015,6 +1052,10 @@
#define SRBM_STATUS2__VCE_BUSY__SHIFT 0x00000007
#define SRBM_STATUS2__VCE_RQ_PENDING_MASK 0x00000008L
#define SRBM_STATUS2__VCE_RQ_PENDING__SHIFT 0x00000003
+#define SRBM_STATUS2__DMA_BUSY_MASK 0x00000020L
+#define SRBM_STATUS2__DMA_BUSY__SHIFT 0x00000005
+#define SRBM_STATUS2__DMA1_BUSY_MASK 0x00000040L
+#define SRBM_STATUS2__DMA1_BUSY__SHIFT 0x00000006
#define SRBM_STATUS2__XDMA_BUSY_MASK 0x00000100L
#define SRBM_STATUS2__XDMA_BUSY__SHIFT 0x00000008
#define SRBM_STATUS2__XSP_BUSY_MASK 0x00000010L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
index 6b10be61efc3..bdef1f743df7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
@@ -41,7 +41,49 @@
#define ixLCAC_MC5_CNTL 0x012B
#define ixLCAC_MC5_OVR_SEL 0x012C
#define ixLCAC_MC5_OVR_VAL 0x012D
+
+#define mmCG_SPLL_FUNC_CNTL 0x0180
+#define mmCG_SPLL_FUNC_CNTL_2 0x0181
+#define mmCG_SPLL_FUNC_CNTL_3 0x0182
+#define mmCG_SPLL_FUNC_CNTL_4 0x0183
+#define mmCG_SPLL_STATUS 0x0185
+#define mmSPLL_CNTL_MODE 0x0186
+#define mmCG_SPLL_SPREAD_SPECTRUM 0x0188
+#define mmCG_SPLL_SPREAD_SPECTRUM_2 0x0189
+#define mmCG_SPLL_AUTOSCALE_CNTL 0x018B
+#define mmMPLL_BYPASSCLK_SEL 0x0197
+#define mmCG_CLKPIN_CNTL 0x0198
+#define mmCG_CLKPIN_CNTL_2 0x0199
+#define mmTHM_CLK_CNTL 0x019B
+#define mmMISC_CLK_CNTL 0x019C
+#define mmCG_THERMAL_CTRL 0x01C0
+#define mmCG_THERMAL_STATUS 0x01C1
+#define mmCG_THERMAL_INT 0x01C2
+#define mmCG_MULT_THERMAL_CTRL 0x01C4
+#define mmCG_MULT_THERMAL_STATUS 0x01C5
+#define mmCG_FDO_CTRL0 0x01D5
+#define mmCG_FDO_CTRL1 0x01D6
+#define mmCG_FDO_CTRL2 0x01D7
+#define mmCG_TACH_CTRL 0x01DC
+#define mmCG_TACH_STATUS 0x01DD
+#define mmGENERAL_PWRMGT 0x1E0
+#define mmCG_TPC 0x1E1
+#define mmSCLK_PWRMGT_CNTL 0x1E2
+#define mmTARGET_AND_CURRENT_PROFILE_INDEX 0x01E6
+#define mmCG_FTV 0x01EF
+#define mmCG_FFCT_0 0x01F0
+#define mmCG_BSP 0x01FF
+#define mmCG_AT 0x0200
+#define mmCG_GIT 0x0201
+#define mmCG_SSP 0x0203
+#define mmCG_DISPLAY_GAP_CNTL 0x020A
+#define mmCG_ULV_CONTROL 0x021E
+#define mmCG_ULV_PARAMETER 0x021F
+#define mmSMC_SCRATCH0 0x0221
+#define mmCG_CAC_CTRL 0x022E
+
#define ixSMC_PC_C 0x80000370
+
#define ixTHM_TMON0_DEBUG 0x03F0
#define ixTHM_TMON0_INT_DATA 0x0380
#define ixTHM_TMON0_RDIL0_DATA 0x0300
@@ -110,6 +152,7 @@
#define ixTHM_TMON1_RDIR7_DATA 0x0337
#define ixTHM_TMON1_RDIR8_DATA 0x0338
#define ixTHM_TMON1_RDIR9_DATA 0x0339
+
#define mmGPIOPAD_A 0x05E7
#define mmGPIOPAD_EN 0x05E8
#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1
@@ -127,6 +170,7 @@
#define mmGPIOPAD_STRENGTH 0x05E5
#define mmGPIOPAD_SW_INT_STAT 0x05E4
#define mmGPIOPAD_Y 0x05E9
+
#define mmSMC_IND_ACCESS_CNTL 0x008A
#define mmSMC_IND_DATA_0 0x0081
#define mmSMC_IND_DATA 0x0081
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
index 7d3925b7266e..67d3c7e13a48 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
@@ -23,10 +23,142 @@
#ifndef SMU_6_0_SH_MASK_H
#define SMU_6_0_SH_MASK_H
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
-#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L
+#define CG_AT__CG_R_MASK 0x0000FFFFL
+#define CG_AT__CG_R__SHIFT 0x00000000
+#define CG_AT__CG_L_MASK 0xFFFF0000L
+#define CG_AT__CG_L__SHIFT 0x00000010
+
+#define CG_BSP__BSP_MASK 0x0000FFFFL
+#define CG_BSP__BSP__SHIFT 0x00000000
+#define CG_BSP__BSU_MASK 0x000F0000L
+#define CG_BSP__BSU__SHIFT 0x00000010
+
+#define CG_CAC_CTRL__CAC_WINDOW_MASK 0x00FFFFFFL
+#define CG_CAC_CTRL__CAC_WINDOW__SHIFT 0x00000000
+
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x00000002L
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x00000001
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x00000004L
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x00000002
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x00000008L
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x00000003
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x00000100L
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x00000008
+
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK 0x00000003L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT 0x00000000
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK 0x0000000CL
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT 0x00000002
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x0003FFF0L
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x00000004
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x00700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x00000014
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK 0x03000000L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT 0x00000018
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK 0x0C000000L
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT 0x0000001A
+
+#define CG_FFCT_0__UTC_0_MASK 0x000003FFL
+#define CG_FFCT_0__UTC_0__SHIFT 0x00000000
+#define CG_FFCT_0__DTC_0_MASK 0x000FFC00L
+#define CG_FFCT_0__DTC_0__SHIFT 0x0000000A
+
+#define CG_GIT__CG_GICST_MASK 0x0000FFFFL
+#define CG_GIT__CG_GICST__SHIFT 0x00000000
+#define CG_GIT__CG_GIPOT_MASK 0xFFFF0000L
+#define CG_GIT__CG_GIPOT__SHIFT 0x00000010
+
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x00000001L
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK 0x00000002L
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP__SHIFT 0x00000001
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x00000008L
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x00000003
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003F0L
#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x007F00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x00000014
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x0000001FF
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x00800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x00000017
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x04000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x0000001A
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03FFFFFFL
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000L
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x0000001C
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x00000002L
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x00000001
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x00000001L
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x00000000
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK 0x0000FFF0L
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT 0x00000004
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK 0x00000200L
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT 0x00000000
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK 0x03FFFFFFL
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR__SHIFT 0x00000009
+
+#define CG_SSP__SST_MASK 0x0000FFFFL
+#define CG_SSP__SST__SHIFT 0x00000000
+#define CG_SSP__SSTU_MASK 0x000F0000L
+#define CG_SSP__SSTU__SHIFT 0x00000010
+
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x00000007L
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x00000000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x003FC000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0x0000000E
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x0001FE00L
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x00000009
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0x0000FF00L
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x00000008
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0x00FF0000L
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x00000010
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK 0x01000000L
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH__SHIFT 0x00000018
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK 0x02000000
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW__SHIFT 0x00000019
+
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0x0FF00000L
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x00000014
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x00000000
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003fe00L
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x00000009
+
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x00000000
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x00000000
+#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x00000000
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0x0000000B
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xFE000000L
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x00000019
+
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x00000007L
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x00000000
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x00000003
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xFFFFFFFFL
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x00000000
+
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x00000001L
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x00000000
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x00000002L
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x00000001
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x00000004L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x00000002
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x00000008L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x00000003
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x00000040L
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x00000006
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x00000400L
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0x0000000A
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x00800000L
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x00000017
+
#define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL
#define GPIOPAD_A__GPIO_A__SHIFT 0x00000000
#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL
@@ -195,6 +327,7 @@
#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000
#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL
#define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000
+
#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L
#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000
#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL
@@ -243,6 +376,37 @@
#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000
+
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0x0000FF00L
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x00000008
+
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x00000001L
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x00000000
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1_MASK 0x00000002L
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1__SHIFT 0x00000001
+#define SCLK_PWRMGT_CNTL__FIR_RESET_MASK 0x00000010L
+#define SCLK_PWRMGT_CNTL__FIR_RESET__SHIFT 0x00000004
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK 0x00000020L
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL__SHIFT 0x00000005
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK 0x00000040L
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE__SHIFT 0x00000006
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN_MASK 0x00000080L
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN__SHIFT 0x00000007
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON_MASK 0x00000100L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON__SHIFT 0x00000008
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF_MASK 0x00000200L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF__SHIFT 0x00000009
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF_MASK 0x00000400L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF__SHIFT 0x0000000A
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1_MASK 0x00000800L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1__SHIFT 0x0000000B
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2_MASK 0x00001000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2__SHIFT 0x0000000C
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3_MASK 0x00002000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3__SHIFT 0x0000000D
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x00004000L
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0x0000000E
+
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L
@@ -285,6 +449,7 @@
#define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000
#define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL
#define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000
+
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c
#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L
@@ -293,6 +458,8 @@
#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK 0x0C000000L
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL__SHIFT 0x0000001A
#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L
#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L
@@ -303,10 +470,25 @@
#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002
#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L
#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d
+
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK 0x000000F0L
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT 0x00000004
+
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0x000000FFL
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x00000000
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0x0000FF00L
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x00000008
+
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK 0x000000FFL
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT 0x00000000
+#define MISC_CLK_CNTL__ZCLK_SEL_MASK 0x0000FF00L
+#define MISC_CLK_CNTL__ZCLK_SEL__SHIFT 0x00000008
+
#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL
#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000
#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
index 2176548e9203..9778822dd2a0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
@@ -60,5 +60,10 @@
#define mmVCE_VCPU_CACHE_SIZE1 0x800C
#define mmVCE_VCPU_CACHE_SIZE2 0x800E
#define mmVCE_VCPU_CNTL 0x8005
+#define mmVCE_VCPU_SCRATCH7 0x8037
+#define mmVCE_FW_REG_STATUS 0x8384
+#define mmVCE_LMI_FW_PERIODIC_CTRL 0x8388
+#define mmVCE_LMI_FW_START_KEYSEL 0x8386
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
index ea5b26b11cb1..1f82d6f5abde 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
@@ -61,6 +61,8 @@
#define VCE_RB_WPTR__RB_WPTR__SHIFT 0x00000004
#define VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK 0x00000001L
#define VCE_SOFT_RESET__ECPU_SOFT_RESET__SHIFT 0x00000000
+#define VCE_SOFT_RESET__FME_SOFT_RESET_MASK 0x00000004L
+#define VCE_SOFT_RESET__FME_SOFT_RESET__SHIFT 0x00000002
#define VCE_STATUS__JOB_BUSY_MASK 0x00000001L
#define VCE_STATUS__JOB_BUSY__SHIFT 0x00000000
#define VCE_STATUS__UENC_BUSY_MASK 0x00000100L
@@ -95,5 +97,13 @@
#define VCE_VCPU_CNTL__CLK_EN__SHIFT 0x00000000
#define VCE_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00040000L
#define VCE_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000012
+#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK 0x00010000
+#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_SHIFT 0x00000010
+#define VCE_FW_REG_STATUS__BUSY_MASK 0x0000001
+#define VCE_FW_REG_STATUS__BUSY__SHIFT 0x0000001
+#define VCE_FW_REG_STATUS__PASS_MASK 0x0000008
+#define VCE_FW_REG_STATUS__PASS__SHIFT 0x0000003
+#define VCE_FW_REG_STATUS__DONE_MASK 0x0000800
+#define VCE_FW_REG_STATUS__DONE__SHIFT 0x000000b
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h
index e9742d10de1c..3ed10e60afbf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h
@@ -779,7 +779,8 @@
#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
#define regVCN_RAS_CNTL 0x02df
#define regVCN_RAS_CNTL_BASE_IDX 1
-
+#define regVCN_RRMT_CNTL 0x0940
+#define regVCN_RRMT_CNTL_BASE_IDX 1
// addressBlock: aid_uvd0_uvd_jpeg0_jpegnpdec
// base address: 0x20f00
@@ -953,6 +954,10 @@
#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0679
#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL 0x067a
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS 0x067b
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 1
#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL2 0x067d
#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 1
@@ -1055,6 +1060,8 @@
#define regJPEG_PERF_BANK_COUNT2_BASE_IDX 1
#define regJPEG_PERF_BANK_COUNT3 0x072c
#define regJPEG_PERF_BANK_COUNT3_BASE_IDX 1
+#define regJPEG_CORE_RST_CTRL 0x072e
+#define regJPEG_CORE_RST_CTRL_BASE_IDX 1
// addressBlock: aid_uvd0_uvd_pg_dec
@@ -1929,6 +1936,10 @@
#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL2 0x003d
#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_CLIENT_STALL 0x003a
+#define regUVD_JMI1_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_CLIENT_CLEAN_STATUS 0x003b
+#define regUVD_JMI1_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi2_uvd_jmi_dec
@@ -1987,6 +1998,10 @@
#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL2 0x007d
#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_CLIENT_STALL 0x007a
+#define regUVD_JMI2_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_CLIENT_CLEAN_STATUS 0x007b
+#define regUVD_JMI2_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi3_uvd_jmi_dec
@@ -2045,6 +2060,10 @@
#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL2 0x00bd
#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_CLIENT_STALL 0x00ba
+#define regUVD_JMI3_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_CLIENT_CLEAN_STATUS 0x00bb
+#define regUVD_JMI3_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi4_uvd_jmi_dec
@@ -2103,6 +2122,10 @@
#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL2 0x00fd
#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_CLIENT_STALL 0x00fa
+#define regUVD_JMI4_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_CLIENT_CLEAN_STATUS 0x00fb
+#define regUVD_JMI4_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi5_uvd_jmi_dec
@@ -2161,6 +2184,10 @@
#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL2 0x013d
#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_CLIENT_STALL 0x013a
+#define regUVD_JMI5_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_CLIENT_CLEAN_STATUS 0x013b
+#define regUVD_JMI5_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi6_uvd_jmi_dec
@@ -2219,6 +2246,10 @@
#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL2 0x017d
#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_CLIENT_STALL 0x017a
+#define regUVD_JMI6_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_CLIENT_CLEAN_STATUS 0x017b
+#define regUVD_JMI6_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi7_uvd_jmi_dec
@@ -2277,6 +2308,10 @@
#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL2 0x01bd
#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_CLIENT_STALL 0x01ba
+#define regUVD_JMI7_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_CLIENT_CLEAN_STATUS 0x01bb
+#define regUVD_JMI7_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: uvdctxind
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
index 14574112c469..72a118b2af69 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
@@ -1067,7 +1067,13 @@
#define regVCN_FEATURES_BASE_IDX 1
#define regUVD_GPUIOV_STATUS 0x0055
#define regUVD_GPUIOV_STATUS_BASE_IDX 1
+#define regUVD_RAS_VCPU_VCODEC_STATUS 0x0057
+#define regUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1
#define regUVD_SCRATCH15 0x005c
+#define regUVD_RAS_JPEG0_STATUS 0x0059
+#define regUVD_RAS_JPEG0_STATUS_BASE_IDX 1
+#define regUVD_RAS_JPEG1_STATUS 0x005a
+#define regUVD_RAS_JPEG1_STATUS_BASE_IDX 1
#define regUVD_SCRATCH15_BASE_IDX 1
#define regUVD_VERSION 0x005d
#define regUVD_VERSION_BASE_IDX 1
@@ -1147,6 +1153,22 @@
#define regUVD_DPG_LMA_CTL2_BASE_IDX 1
+// addressBlock: uvd_mmsch_dec
+// base address: 0x20d2c
+#define regMMSCH_VF_VMID 0x054b
+#define regMMSCH_VF_VMID_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_LO 0x054c
+#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_HI 0x054d
+#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1
+#define regMMSCH_VF_CTX_SIZE 0x054e
+#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_HOST 0x0552
+#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_RESP 0x0553
+#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1
+
+
// addressBlock: uvd_vcn_umsch_dec
// base address: 0x21500
#define regVCN_UMSCH_MES_CNTL 0x0740
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
index 5c119a6b87fb..c78b09d6fbae 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
@@ -5714,6 +5714,22 @@
//UVD_GPUIOV_STATUS
#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0
#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L
+//UVD_RAS_VCPU_VCODEC_STATUS
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L
+
+//UVD_RAS_JPEG0_STATUS
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L
+//UVD_RAS_JPEG1_STATUS
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L
//UVD_SCRATCH15
#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0
#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL
@@ -5929,6 +5945,29 @@
#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L
+// addressBlock: uvd_mmsch_dec
+//MMSCH_VF_VMID
+#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0
+#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5
+#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL
+#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L
+//MMSCH_VF_CTX_ADDR_LO
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_CTX_ADDR_HI
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_CTX_SIZE
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_HOST
+#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_RESP
+#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+
+
// addressBlock: uvd_vcn_umsch_dec
//VCN_UMSCH_MES_CNTL
#define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index b78360a71bc9..b344acefc606 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4308,7 +4308,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged
constant across VGA or non VGA adapter,
@@ -6017,7 +6017,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
+#define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE 0x00010000
@@ -6460,7 +6460,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9
// ulGPUCapInfo
#define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS 0x10
+#define SYS_INFO_V1_9_GPUCAPSINFO_ENABLE_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE 0x00010000
//ulGPUCapInfo[18]=1 indicate the IOMMU is not available
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0160d65f3f5e..3d083010e734 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -183,6 +183,7 @@ enum atom_dgpu_vram_type {
ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61,
ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70,
ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80,
+ ATOM_DGPU_VRAM_TYPE_HBM3E = 0x81,
};
enum atom_dp_vs_preemph_def{
@@ -210,7 +211,7 @@ atom_bios_string = "ATOM"
};
*/
-#pragma pack(1) /* BIOS data must use byte aligment*/
+#pragma pack(1) /* BIOS data must use byte alignment*/
enum atombios_image_offset{
OFFSET_TO_ATOM_ROM_HEADER_POINTER = 0x00000048,
@@ -254,8 +255,8 @@ struct atom_rom_header_v2_2
uint16_t subsystem_vendor_id;
uint16_t subsystem_id;
uint16_t pci_info_offset;
- uint16_t masterhwfunction_offset; //Offest for SW to get all command function offsets, Don't change the position
- uint16_t masterdatatable_offset; //Offest for SW to get all data table offsets, Don't change the position
+ uint16_t masterhwfunction_offset; //Offset for SW to get all command function offsets, Don't change the position
+ uint16_t masterdatatable_offset; //Offset for SW to get all data table offsets, Don't change the position
uint16_t reserved;
uint32_t pspdirtableoffset;
};
@@ -452,7 +453,7 @@ struct atom_dtd_format
uint8_t refreshrate;
};
-/* atom_dtd_format.modemiscinfo defintion */
+/* atom_dtd_format.modemiscinfo definition */
enum atom_dtd_format_modemiscinfo{
ATOM_HSYNC_POLARITY = 0x0002,
ATOM_VSYNC_POLARITY = 0x0004,
@@ -677,7 +678,7 @@ struct lcd_info_v2_1
uint32_t reserved1[8];
};
-/* lcd_info_v2_1.panel_misc defintion */
+/* lcd_info_v2_1.panel_misc definition */
enum atom_lcd_info_panel_misc{
ATOM_PANEL_MISC_FPDI =0x0002,
};
@@ -715,7 +716,7 @@ enum atom_gpio_pin_assignment_gpio_id {
/* gpio_id pre-define id for multiple usage */
/* GPIO use to control PCIE_VDDC in certain SLT board */
PCIE_VDDC_CONTROL_GPIO_PINID = 56,
- /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC swithing feature is enable */
+ /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC switching feature is enable */
PP_AC_DC_SWITCH_GPIO_PINID = 60,
/* VDDC_REGULATOR_VRHOT_GPIO_PINID in Gpio_Pin_LutTable, VRHot feature is enable */
VDDC_VRHOT_GPIO_PINID = 61,
@@ -733,7 +734,7 @@ enum atom_gpio_pin_assignment_gpio_id {
struct atom_gpio_pin_lut_v2_1
{
struct atom_common_table_header table_header;
- /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
+ /*the real number of this included in the structure is calculated by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
struct atom_gpio_pin_assignment gpio_pin[];
};
@@ -996,7 +997,7 @@ enum atom_connector_layout_info_mini_type_def {
enum atom_display_device_tag_def{
ATOM_DISPLAY_LCD1_SUPPORT = 0x0002, //an embedded display is either an LVDS or eDP signal type of display
- ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compability
+ ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compatibility
ATOM_DISPLAY_DFP1_SUPPORT = 0x0008,
ATOM_DISPLAY_DFP2_SUPPORT = 0x0080,
ATOM_DISPLAY_DFP3_SUPPORT = 0x0200,
@@ -1010,7 +1011,7 @@ struct atom_display_object_path_v2
{
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t extencoderobjid; //2nd encoder after the first encoder, from the connector point of view;
uint16_t encoder_recordoffset;
uint16_t extencoder_recordoffset;
@@ -1022,7 +1023,7 @@ struct atom_display_object_path_v2
struct atom_display_object_path_v3 {
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t reserved1; //only on USBC case, otherwise always = 0
uint16_t reserved2; //reserved and always = 0
uint16_t reserved3; //reserved and always = 0
@@ -1713,7 +1714,7 @@ enum atom_system_vbiosmisc_def{
// gpucapinfo
enum atom_system_gpucapinf_def{
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS = 0x10,
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS = 0x10,
};
//dpphy_override
@@ -3546,7 +3547,7 @@ struct atom_voltage_object_header_v4{
enum atom_voltage_object_mode
{
VOLTAGE_OBJ_GPIO_LUT = 0, //VOLTAGE and GPIO Lookup table ->atom_gpio_voltage_object_v4
- VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequece through I2C -> atom_i2c_voltage_object_v4
+ VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequence through I2C -> atom_i2c_voltage_object_v4
VOLTAGE_OBJ_PHASE_LUT = 4, //Set Vregulator Phase lookup table ->atom_gpio_voltage_object_v4
VOLTAGE_OBJ_SVID2 = 7, //Indicate voltage control by SVID2 ->atom_svid2_voltage_object_v4
VOLTAGE_OBJ_EVV = 8,
@@ -3584,7 +3585,7 @@ struct atom_gpio_voltage_object_v4
{
struct atom_voltage_object_header_v4 header; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
uint8_t gpio_control_id; // default is 0 which indicate control through CG VID mode
- uint8_t gpio_entry_num; // indiate the entry numbers of Votlage/Gpio value Look up table
+ uint8_t gpio_entry_num; // indicate the entry numbers of Votlage/Gpio value Look up table
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
@@ -4506,8 +4507,8 @@ struct amd_acpi_description_header{
struct uefi_acpi_vfct{
struct amd_acpi_description_header sheader;
uint8_t tableUUID[16]; //0x24
- uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
- uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+ uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
+ uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
uint32_t reserved[4]; //0x3C
};
@@ -4539,7 +4540,7 @@ struct gop_lib1_content {
/*
***************************************************************************
Scratch Register definitions
- Each number below indicates which scratch regiser request, Active and
+ Each number below indicates which scratch register request, Active and
Connect all share the same definitions as display_device_tag defines
***************************************************************************
*/
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index acd1cef61b7c..349544504c93 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -65,6 +65,7 @@ struct single_display_configuration {
uint32_t view_resolution_cy;
enum amd_pp_display_config_type displayconfigtype;
uint32_t vertical_refresh; /* for active display */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
};
#define MAX_NUM_DISPLAY 32
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
index 3a4670bc4449..b98b7ae551b5 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
@@ -48,6 +48,7 @@
#define GFX_11_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error
#define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
#define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
+#define GFX_11_0_0__SRCID__SDMA_FENCE 67 // 0x43 User fence
#define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h
new file mode 100644
index 000000000000..467897ec2e65
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __IRQSRCS_GFX_12_0_0_H__
+#define __IRQSRCS_GFX_12_0_0_H__
+
+#define GFX_12_0_0__SRCID__UTCL2_FAULT 0 // UTCL2 has encountered a fault or retry scenario
+#define GFX_12_0_0__SRCID__UTCL2_DATA_POISONING 1 // UTCL2 for data poisoning
+#define GFX_12_0_0__SRCID__MEM_ACCES_MON 10 // 0x0A EA memory access monitor interrupt
+#define GFX_12_0_0__SRCID__SDMA_ATOMIC_RTN_DONE 48 // 0x30 SDMA atomic*_rtn ops complete
+#define GFX_12_0_0__SRCID__SDMA_TRAP 49 // 0x31 Trap
+#define GFX_12_0_0__SRCID__SDMA_SRBMWRITE 50 // 0x32 SRBM write Protection
+#define GFX_12_0_0__SRCID__SDMA_CTXEMPTY 51 // 0x33 Context Empty
+#define GFX_12_0_0__SRCID__SDMA_PREEMPT 52 // 0x34 SDMA New Run List
+#define GFX_12_0_0__SRCID__SDMA_IB_PREEMPT 53 // 0x35 sdma mid - command buffer preempt interrupt
+#define GFX_12_0_0__SRCID__SDMA_DOORBELL_INVALID 54 // 0x36 Doorbell BE invalid
+#define GFX_12_0_0__SRCID__SDMA_QUEUE_HANG 55 // 0x37 Queue hang or Command timeout
+#define GFX_12_0_0__SRCID__SDMA_ATOMIC_TIMEOUT 56 // 0x38 SDMA atomic CMPSWAP loop timeout
+#define GFX_12_0_0__SRCID__SDMA_POLL_TIMEOUT 57 // 0x39 SRBM read poll timeout
+#define GFX_12_0_0__SRCID__SDMA_PAGE_TIMEOUT 58 // 0x3A Page retry timeout after UTCL2 return nack = 1
+#define GFX_12_0_0__SRCID__SDMA_PAGE_NULL 59 // 0x3B Page Null from UTCL2 when nack = 2
+#define GFX_12_0_0__SRCID__SDMA_PAGE_FAULT 60 // 0x3C Page Fault Error from UTCL2 when nack = 3
+#define GFX_12_0_0__SRCID__SDMA_VM_HOLE 61 // 0x3D MC or SEM address in VM hole
+#define GFX_12_0_0__SRCID__SDMA_ECC 62 // 0x3E ECC Error
+#define GFX_12_0_0__SRCID__SDMA_FROZEN 63 // 0x3F SDMA Frozen
+#define GFX_12_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error
+#define GFX_12_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
+#define GFX_12_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
+#define GFX_12_0_0__SRCID__SDMA_FENCE 70 // 0x46 User fence
+#define GFX_12_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
+#define GFX_12_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int
+#define GFX_12_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error
+#define GFX_12_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt
+#define GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR 183 // 0xB7 Bad Opcode Error
+#define GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT 184 // 0xB8 Privileged Register Fault
+#define GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT 185 // 0xB9 Privileged Instr Fault
+#define GFX_12_0_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 // 0xBA Wait Memory Semaphore Fault (Sync Object Fault)
+#define GFX_12_0_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 // 0xBB Context Empty Interrupt
+#define GFX_12_0_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 // 0xBC Context Busy Interrupt
+#define GFX_12_0_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 // 0xC0 CP.ME Wait_Reg_Mem Poll Timeout
+#define GFX_12_0_0__SRCID__CP_SIG_INCOMPLETE 193 // 0xC1 "Surface Probe Fault Signal Incomplete"
+#define GFX_12_0_0__SRCID__CP_PREEMPT_ACK 194 // 0xC2 Preemption Ack-wledge
+#define GFX_12_0_0__SRCID__CP_GPF 195 // 0xC3 General Protection Fault (GPF)
+#define GFX_12_0_0__SRCID__CP_GDS_ALLOC_ERROR 196 // 0xC4 GDS Alloc Error
+#define GFX_12_0_0__SRCID__CP_ECC_ERROR 197 // 0xC5 ECC Error
+#define GFX_12_0_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 // 0xC7 Compute query status
+#define GFX_12_0_0__SRCID__CP_VM_DOORBELL 200 // 0xC8 Unattached VM Doorbell Received
+#define GFX_12_0_0__SRCID__CP_FUE_ERROR 201 // 0xC9 ECC FUE Error
+#define GFX_12_0_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 // 0xCA Streaming Perf Monitor Interrupt
+#define GFX_12_0_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 // 0xE8 CRead timeout error
+#define GFX_12_0_0__SRCID__GRBM_REG_GUI_IDLE 233 // 0xE9 Register GUI Idle
+#define GFX_12_0_0__SRCID__SQ_INTERRUPT_ID 239 // 0xEF SQ Interrupt (ttrace wrap, errors)
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
index 64b553e7de1a..e7fdcee22a71 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index e3e635a31b8a..9aba8596faa7 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -313,9 +313,10 @@ struct kfd2kgd_calls {
void (*get_iq_wait_times)(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
- void (*build_grace_period_packet_info)(struct amdgpu_device *adev,
+ void (*build_dequeue_wait_counts_packet_info)(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data);
void (*get_cu_occupancy)(struct amdgpu_device *adev,
@@ -330,6 +331,8 @@ struct kfd2kgd_calls {
uint64_t (*hqd_reset)(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t inst, unsigned int utimeout);
+ uint32_t (*hqd_sdma_get_doorbell)(struct amdgpu_device *adev,
+ int engine, int queue);
};
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 9189dcb65188..2366e68262e6 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -30,6 +30,12 @@ extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v14_0_ip_block;
+enum smu_temp_metric_type {
+ SMU_TEMP_METRIC_BASEBOARD,
+ SMU_TEMP_METRIC_GPUBOARD,
+ SMU_TEMP_METRIC_MAX,
+};
+
enum smu_event_type {
SMU_EVENT_RESET_COMPLETE = 0,
};
@@ -108,6 +114,8 @@ enum pp_clock_type {
PP_VCLK1,
PP_DCLK,
PP_DCLK1,
+ PP_ISPICLK,
+ PP_ISPXCLK,
OD_SCLK,
OD_MCLK,
OD_VDDC_CURVE,
@@ -128,6 +136,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_CPU_CLK,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
+ AMDGPU_PP_SENSOR_VDDBOARD,
AMDGPU_PP_SENSOR_UVD_VCLK,
AMDGPU_PP_SENSOR_UVD_DCLK,
AMDGPU_PP_SENSOR_VCE_ECCLK,
@@ -153,6 +162,10 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
AMDGPU_PP_SENSOR_VCN_LOAD,
+ AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
+ AMDGPU_PP_SENSOR_NODEPOWER,
+ AMDGPU_PP_SENSOR_GPPTRESIDENCY,
+ AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
};
enum amd_pp_task {
@@ -341,6 +354,7 @@ enum pp_policy_soc_pstate {
#define MAX_CLKS 4
#define NUM_VCN 4
#define NUM_JPEG_ENG 32
+#define NUM_JPEG_ENG_V1 40
#define MAX_XCC 8
#define NUM_XCP 8
struct seq_file;
@@ -376,6 +390,20 @@ struct amdgpu_xcp_metrics_v1_1 {
uint64_t gfx_below_host_limit_acc[MAX_XCC];
};
+struct amdgpu_xcp_metrics_v1_2 {
+ /* Utilization Instantaneous (%) */
+ uint32_t gfx_busy_inst[MAX_XCC];
+ uint16_t jpeg_busy[NUM_JPEG_ENG_V1];
+ uint16_t vcn_busy[NUM_VCN];
+ /* Utilization Accumulated (%) */
+ uint64_t gfx_busy_acc[MAX_XCC];
+ /* Total App Clock Counter Accumulated */
+ uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_thm_acc[MAX_XCC];
+ uint64_t gfx_low_utilization_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_total_acc[MAX_XCC];
+};
+
struct amd_pm_funcs {
/* export for dpm on ci and si */
int (*pre_set_power_state)(void *handle);
@@ -414,6 +442,7 @@ struct amd_pm_funcs {
int (*set_pp_table)(void *handle, const char *buf, size_t size);
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
+ int (*pause_power_profile)(void *handle, bool pause);
/* export to amdgpu */
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
@@ -425,7 +454,7 @@ struct amd_pm_funcs {
bool gate,
int inst);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
- int (*set_power_limit)(void *handle, uint32_t n);
+ int (*set_power_limit)(void *handle, uint32_t limit_type, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
@@ -477,6 +506,9 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ ssize_t (*get_temp_metrics)(void *handle, enum smu_temp_metric_type type, void *table);
+ bool (*temp_metrics_is_supported)(void *handle, enum smu_temp_metric_type type);
+ ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
struct pp_smu_wm_range_sets *ranges);
@@ -500,6 +532,110 @@ struct metrics_table_header {
uint8_t content_revision;
};
+enum amdgpu_metrics_attr_id {
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HOTSPOT,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MEM,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_VRSOC,
+ AMDGPU_METRICS_ATTR_ID_CURR_SOCKET_POWER,
+ AMDGPU_METRICS_ATTR_ID_AVERAGE_GFX_ACTIVITY,
+ AMDGPU_METRICS_ATTR_ID_AVERAGE_UMC_ACTIVITY,
+ AMDGPU_METRICS_ATTR_ID_MEM_MAX_BANDWIDTH,
+ AMDGPU_METRICS_ATTR_ID_ENERGY_ACCUMULATOR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_CLOCK_COUNTER,
+ AMDGPU_METRICS_ATTR_ID_ACCUMULATION_COUNTER,
+ AMDGPU_METRICS_ATTR_ID_PROCHOT_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_PPT_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_SOCKET_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_VR_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_HBM_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFXCLK_LOCK_STATUS,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LINK_WIDTH,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LINK_SPEED,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_WIDTH,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_SPEED,
+ AMDGPU_METRICS_ATTR_ID_GFX_ACTIVITY_ACC,
+ AMDGPU_METRICS_ATTR_ID_MEM_ACTIVITY_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_INST,
+ AMDGPU_METRICS_ATTR_ID_PCIE_L0_TO_RECOV_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_ROVER_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_NAK_SENT_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_NAK_RCVD_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_READ_DATA_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_WRITE_DATA_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_STATUS,
+ AMDGPU_METRICS_ATTR_ID_FIRMWARE_TIMESTAMP,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_GFXCLK,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_SOCCLK,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_VCLK0,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_DCLK0,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_UCLK,
+ AMDGPU_METRICS_ATTR_ID_NUM_PARTITION,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LC_PERF_OTHER_END_RECOVERY,
+ AMDGPU_METRICS_ATTR_ID_GFX_BUSY_INST,
+ AMDGPU_METRICS_ATTR_ID_JPEG_BUSY,
+ AMDGPU_METRICS_ATTR_ID_VCN_BUSY,
+ AMDGPU_METRICS_ATTR_ID_GFX_BUSY_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_PPT_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
+ AMDGPU_METRICS_ATTR_ID_MAX,
+};
+
+enum amdgpu_metrics_attr_type {
+ AMDGPU_METRICS_TYPE_U8,
+ AMDGPU_METRICS_TYPE_S8,
+ AMDGPU_METRICS_TYPE_U16,
+ AMDGPU_METRICS_TYPE_S16,
+ AMDGPU_METRICS_TYPE_U32,
+ AMDGPU_METRICS_TYPE_S32,
+ AMDGPU_METRICS_TYPE_U64,
+ AMDGPU_METRICS_TYPE_S64,
+ AMDGPU_METRICS_TYPE_MAX,
+};
+
+enum amdgpu_metrics_attr_unit {
+ /* None */
+ AMDGPU_METRICS_UNIT_NONE,
+ /* MHz*/
+ AMDGPU_METRICS_UNIT_CLOCK_1,
+ /* Degree Celsius*/
+ AMDGPU_METRICS_UNIT_TEMP_1,
+ /* Watts*/
+ AMDGPU_METRICS_UNIT_POWER_1,
+ /* In nanoseconds*/
+ AMDGPU_METRICS_UNIT_TIME_1,
+ /* In 10 nanoseconds*/
+ AMDGPU_METRICS_UNIT_TIME_2,
+ /* Speed in GT/s */
+ AMDGPU_METRICS_UNIT_SPEED_1,
+ /* Speed in 0.1 GT/s */
+ AMDGPU_METRICS_UNIT_SPEED_2,
+ /* Bandwidth GB/s */
+ AMDGPU_METRICS_UNIT_BW_1,
+ /* Data in KB */
+ AMDGPU_METRICS_UNIT_DATA_1,
+ /* Percentage */
+ AMDGPU_METRICS_UNIT_PERCENT,
+ AMDGPU_METRICS_UNIT_MAX,
+};
+
+#define AMDGPU_METRICS_ATTR_UNIT_MASK 0xFF000000
+#define AMDGPU_METRICS_ATTR_UNIT_SHIFT 24
+#define AMDGPU_METRICS_ATTR_TYPE_MASK 0x00F00000
+#define AMDGPU_METRICS_ATTR_TYPE_SHIFT 20
+#define AMDGPU_METRICS_ATTR_ID_MASK 0x000FFC00
+#define AMDGPU_METRICS_ATTR_ID_SHIFT 10
+#define AMDGPU_METRICS_ATTR_INST_MASK 0x000003FF
+#define AMDGPU_METRICS_ATTR_INST_SHIFT 0
+
+#define AMDGPU_METRICS_ENC_ATTR(unit, type, id, inst) \
+ (((u64)(unit) << AMDGPU_METRICS_ATTR_UNIT_SHIFT) | \
+ ((u64)(type) << AMDGPU_METRICS_ATTR_TYPE_SHIFT) | \
+ ((u64)(id) << AMDGPU_METRICS_ATTR_ID_SHIFT) | (inst))
+
/*
* gpu_metrics_v1_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v1_1 or later instead.
@@ -1090,6 +1226,118 @@ struct gpu_metrics_v1_7 {
uint32_t pcie_lc_perf_other_end_recovery;
};
+struct gpu_metrics_v1_8 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (Celsius) */
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrsoc;
+
+ /* Power (Watts) */
+ uint16_t curr_socket_power;
+
+ /* Utilization (%) */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+
+ /* VRAM max bandwidthi (in GB/sec) at max memory clock */
+ uint64_t mem_max_bandwidth;
+
+ /* Energy (15.259uJ (2^-16) units) */
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Accumulation cycle counter */
+ uint32_t accumulation_counter;
+
+ /* Accumulated throttler residencies */
+ uint32_t prochot_residency_acc;
+ uint32_t ppt_residency_acc;
+ uint32_t socket_thm_residency_acc;
+ uint32_t vr_thm_residency_acc;
+ uint32_t hbm_thm_residency_acc;
+
+ /* Clock Lock Status. Each bit corresponds to clock instance */
+ uint32_t gfxclk_lock_status;
+
+ /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+
+ /* XGMI bus width and bitrate (in Gbps) */
+ uint16_t xgmi_link_width;
+ uint16_t xgmi_link_speed;
+
+ /* Utilization Accumulated (%) */
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ /*PCIE accumulated bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_acc;
+
+ /*PCIE instantaneous bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_inst;
+
+ /* PCIE L0 to recovery state transition accumulated count */
+ uint64_t pcie_l0_to_recov_count_acc;
+
+ /* PCIE replay accumulated count */
+ uint64_t pcie_replay_count_acc;
+
+ /* PCIE replay rollover accumulated count */
+ uint64_t pcie_replay_rover_count_acc;
+
+ /* PCIE NAK sent accumulated count */
+ uint32_t pcie_nak_sent_count_acc;
+
+ /* PCIE NAK received accumulated count */
+ uint32_t pcie_nak_rcvd_count_acc;
+
+ /* XGMI accumulated data transfer size(KiloBytes) */
+ uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS];
+ uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+ /* XGMI link status(active/inactive) */
+ uint16_t xgmi_link_status[NUM_XGMI_LINKS];
+
+ uint16_t padding;
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_GFX_CLKS];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+
+ /* Number of current partition */
+ uint16_t num_partition;
+
+ /* XCP metrics stats */
+ struct amdgpu_xcp_metrics_v1_2 xcp_stats[NUM_XCP];
+
+ /* PCIE other end recovery counter */
+ uint32_t pcie_lc_perf_other_end_recovery;
+};
+
+struct gpu_metrics_attr {
+ /* Field type encoded with AMDGPU_METRICS_ENC_ATTR */
+ uint64_t attr_encoding;
+ /* Attribute value, depends on attr_encoding */
+ void *attr_value;
+};
+
+struct gpu_metrics_v1_9 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
@@ -1476,4 +1724,106 @@ struct amdgpu_pm_metrics {
uint8_t data[];
};
+enum amdgpu_vr_temp {
+ AMDGPU_VDDCR_VDD0_TEMP,
+ AMDGPU_VDDCR_VDD1_TEMP,
+ AMDGPU_VDDCR_VDD2_TEMP,
+ AMDGPU_VDDCR_VDD3_TEMP,
+ AMDGPU_VDDCR_SOC_A_TEMP,
+ AMDGPU_VDDCR_SOC_C_TEMP,
+ AMDGPU_VDDCR_SOCIO_A_TEMP,
+ AMDGPU_VDDCR_SOCIO_C_TEMP,
+ AMDGPU_VDD_085_HBM_TEMP,
+ AMDGPU_VDDCR_11_HBM_B_TEMP,
+ AMDGPU_VDDCR_11_HBM_D_TEMP,
+ AMDGPU_VDD_USR_TEMP,
+ AMDGPU_VDDIO_11_E32_TEMP,
+ AMDGPU_VR_MAX_TEMP_ENTRIES,
+};
+
+enum amdgpu_system_temp {
+ AMDGPU_UBB_FPGA_TEMP,
+ AMDGPU_UBB_FRONT_TEMP,
+ AMDGPU_UBB_BACK_TEMP,
+ AMDGPU_UBB_OAM7_TEMP,
+ AMDGPU_UBB_IBC_TEMP,
+ AMDGPU_UBB_UFPGA_TEMP,
+ AMDGPU_UBB_OAM1_TEMP,
+ AMDGPU_OAM_0_1_HSC_TEMP,
+ AMDGPU_OAM_2_3_HSC_TEMP,
+ AMDGPU_OAM_4_5_HSC_TEMP,
+ AMDGPU_OAM_6_7_HSC_TEMP,
+ AMDGPU_UBB_FPGA_0V72_VR_TEMP,
+ AMDGPU_UBB_FPGA_3V3_VR_TEMP,
+ AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP,
+ AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP,
+ AMDGPU_RETIMER_0_1_0V9_VR_TEMP,
+ AMDGPU_RETIMER_4_5_0V9_VR_TEMP,
+ AMDGPU_RETIMER_2_3_0V9_VR_TEMP,
+ AMDGPU_RETIMER_6_7_0V9_VR_TEMP,
+ AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP,
+ AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP,
+ AMDGPU_IBC_HSC_TEMP,
+ AMDGPU_IBC_TEMP,
+ AMDGPU_SYSTEM_MAX_TEMP_ENTRIES = 32,
+};
+
+enum amdgpu_node_temp {
+ AMDGPU_RETIMER_X_TEMP,
+ AMDGPU_OAM_X_IBC_TEMP,
+ AMDGPU_OAM_X_IBC_2_TEMP,
+ AMDGPU_OAM_X_VDD18_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_B_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_D_VR_TEMP,
+ AMDGPU_NODE_MAX_TEMP_ENTRIES = 12,
+};
+
+struct amdgpu_gpuboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t node_temp[AMDGPU_NODE_MAX_TEMP_ENTRIES];
+ uint32_t vr_temp[AMDGPU_VR_MAX_TEMP_ENTRIES];
+};
+
+struct amdgpu_baseboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t system_temp[AMDGPU_SYSTEM_MAX_TEMP_ENTRIES];
+};
+
+struct amdgpu_partition_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_XCC];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+ uint16_t padding;
+
+ /* Utilization Instantaneous (%) */
+ uint32_t gfx_busy_inst[MAX_XCC];
+ uint16_t jpeg_busy[NUM_JPEG_ENG_V1];
+ uint16_t vcn_busy[NUM_VCN];
+ /* Utilization Accumulated (%) */
+ uint64_t gfx_busy_acc[MAX_XCC];
+ /* Total App Clock Counter Accumulated */
+ uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_thm_acc[MAX_XCC];
+ uint64_t gfx_low_utilization_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_total_acc[MAX_XCC];
+};
+
+struct amdgpu_partition_metrics_v1_1 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index eb46cb10c24d..f9629d42ada2 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -238,7 +238,8 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t enable_mes_sch_stb_log : 1;
uint32_t limit_single_process : 1;
uint32_t is_strix_tmz_wa_enabled :1;
- uint32_t reserved : 13;
+ uint32_t enable_lr_compute_wa : 1;
+ uint32_t reserved : 12;
};
uint32_t uint32_t_all;
};
@@ -266,7 +267,8 @@ union MESAPI_SET_HW_RESOURCES_1 {
};
uint64_t mes_info_ctx_mc_addr;
uint32_t mes_info_ctx_size;
- uint32_t mes_kiq_unmap_timeout; // unit is 100ms
+ uint64_t reserved1;
+ uint64_t cleaner_shader_fence_mc_addr;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -343,7 +345,8 @@ union MESAPI__REMOVE_QUEUE {
uint32_t unmap_kiq_utility_queue : 1;
uint32_t preempt_legacy_gfx_queue : 1;
uint32_t unmap_legacy_queue : 1;
- uint32_t reserved : 28;
+ uint32_t remove_queue_after_reset : 1;
+ uint32_t reserved : 27;
};
struct MES_API_STATUS api_status;
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index c9b2ca5cf75f..2f12cba4eb66 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -66,6 +66,7 @@ enum MES_SCH_API_OPCODE {
MES_SCH_API_SET_SE_MODE = 17,
MES_SCH_API_SET_GANG_SUBMIT = 18,
MES_SCH_API_SET_HW_RSRC_1 = 19,
+ MES_SCH_API_INV_TLBS = 20,
MES_SCH_API_MAX = 0xFF
};
@@ -105,6 +106,43 @@ struct MES_API_STATUS {
uint64_t api_completion_fence_value;
};
+/*
+ * MES will set api_completion_fence_value in api_completion_fence_addr
+ * when it can successflly process the API. MES will also trigger
+ * following interrupt when it finish process the API no matter success
+ * or failed.
+ * Interrupt source id 181 (EOP) with context ID (DW 6 in the int
+ * cookie) set to 0xb1 and context type set to 8. Driver side need
+ * to enable TIME_STAMP_INT_ENABLE in CPC_INT_CNTL for MES pipe to
+ * catch this interrupt.
+ * Driver side also need to set enable_mes_fence_int = 1 in
+ * set_HW_resource package to enable this fence interrupt.
+ * when the API process failed.
+ * lowre 32 bits set to 0.
+ * higher 32 bits set as follows (bit shift within high 32)
+ * bit 0 - 7 API specific error code.
+ * bit 8 - 15 API OPCODE.
+ * bit 16 - 23 MISC OPCODE if any
+ * bit 24 - 30 ERROR category (API_ERROR_XXX)
+ * bit 31 Set to 1 to indicate error status
+ *
+ */
+enum { MES_SCH_ERROR_CODE_HEADER_SHIFT_12 = 8 };
+enum { MES_SCH_ERROR_CODE_MISC_OP_SHIFT_12 = 16 };
+enum { MES_ERROR_CATEGORY_SHIFT_12 = 24 };
+enum { MES_API_STATUS_ERROR_SHIFT_12 = 31 };
+
+enum MES_ERROR_CATEGORY_CODE_12 {
+ MES_ERROR_API = 1,
+ MES_ERROR_SCHEDULING = 2,
+ MES_ERROR_UNKNOWN = 3,
+};
+
+#define MES_ERR_CODE(api_err, opcode, misc_op, category) \
+ ((uint64) (api_err | opcode << MES_SCH_ERROR_CODE_HEADER_SHIFT_12 | \
+ misc_op << MES_SCH_ERROR_CODE_MISC_OP_SHIFT_12 | \
+ category << MES_ERROR_CATEGORY_SHIFT_12 | \
+ 1 << MES_API_STATUS_ERROR_SHIFT_12) << 32)
enum { MAX_COMPUTE_PIPES = 8 };
enum { MAX_GFX_PIPES = 2 };
@@ -248,7 +286,9 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t enable_mes_sch_stb_log : 1;
uint32_t limit_single_process : 1;
uint32_t unmapped_doorbell_handling: 2;
- uint32_t reserved : 11;
+ uint32_t enable_mes_fence_int: 1;
+ uint32_t enable_lr_compute_wa : 1;
+ uint32_t reserved : 9;
};
uint32_t uint32_all;
};
@@ -278,6 +318,8 @@ union MESAPI_SET_HW_RESOURCES_1 {
uint32_t mes_debug_ctx_size;
/* unit is 100ms */
uint32_t mes_kiq_unmap_timeout;
+ uint64_t reserved1;
+ uint64_t cleaner_shader_fence_mc_addr;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -357,7 +399,8 @@ union MESAPI__REMOVE_QUEUE {
uint32_t unmap_kiq_utility_queue : 1;
uint32_t preempt_legacy_gfx_queue : 1;
uint32_t unmap_legacy_queue : 1;
- uint32_t reserved : 28;
+ uint32_t remove_queue_after_reset : 1;
+ uint32_t reserved : 27;
};
struct MES_API_STATUS api_status;
@@ -830,6 +873,35 @@ union MESAPI__SET_GANG_SUBMIT {
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
};
+/*
+ * @inv_sel 0-select pasid as input to do the invalidation , 1-select vmid
+ * @flush_type 0-old style, 1-light weight, 2-heavyweight, 3-heavyweight2
+ * @inv_sel_id specific pasid when inv_sel is 0 and specific vmid if inv_sel is 1
+ * @hub_id 0-gc_hub, 1-mm_hub
+ */
+struct INV_TLBS {
+ uint8_t inv_sel;
+ uint8_t flush_type;
+ uint16_t inv_sel_id;
+ uint32_t hub_id;
+ /* If following two inv_range setting are all 0 , whole VM will be invalidated,
+ * otherwise only required range be invalidated
+ */
+ uint64_t inv_range_va_start;
+ uint64_t inv_range_size;
+ uint64_t reserved;
+};
+
+union MESAPI__INV_TLBS {
+ struct {
+ union MES_API_HEADER header;
+ struct MES_API_STATUS api_status;
+ struct INV_TLBS invalidate_tlbs;
+ };
+
+ uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+};
+
#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/include/v11_structs.h b/drivers/gpu/drm/amd/include/v11_structs.h
index f8008270f813..3728389fc3be 100644
--- a/drivers/gpu/drm/amd/include/v11_structs.h
+++ b/drivers/gpu/drm/amd/include/v11_structs.h
@@ -535,8 +535,8 @@ struct v11_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v11_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v11_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/include/v12_structs.h b/drivers/gpu/drm/amd/include/v12_structs.h
index 5eabab611b02..03a35f8a65b0 100644
--- a/drivers/gpu/drm/amd/include/v12_structs.h
+++ b/drivers/gpu/drm/amd/include/v12_structs.h
@@ -535,8 +535,8 @@ struct v12_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v12_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v12_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 6a9e26905edf..79b174e5326d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -78,7 +78,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
- bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN);
+ bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
@@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
case AMD_IP_BLOCK_TYPE_VPE:
+ case AMD_IP_BLOCK_TYPE_ISP:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate, 0));
@@ -194,24 +195,6 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
return ret;
}
-int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
-{
- int ret = 0;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (pp_funcs && pp_funcs->notify_rlc_state) {
- mutex_lock(&adev->pm.mutex);
-
- ret = pp_funcs->notify_rlc_state(
- adev->powerplay.pp_handle,
- en);
-
- mutex_unlock(&adev->pm.mutex);
- }
-
- return ret;
-}
-
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -329,6 +312,34 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool support_link_reset = false;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_link_reset = smu_link_reset_is_support(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_link_reset;
+}
+
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_link_reset(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en)
@@ -349,6 +360,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
+ bool pause)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (pp_funcs && pp_funcs->pause_power_profile) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->pause_power_profile(
+ adev->powerplay.pp_handle, pause);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate)
{
@@ -719,6 +749,29 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
return ret;
}
+/**
+ * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
+ * @adev: amdgpu_device pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns false if the hardware does not support software SMU or
+ * if the feature is not supported.
+ */
+bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool ret;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_sdma_is_supported(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -734,6 +787,36 @@ int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool ret;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn_is_supported(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -763,22 +846,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
uint32_t max)
{
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret = 0;
-
- if (type != PP_SCLK)
- return -EINVAL;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
- mutex_lock(&adev->pm.mutex);
- ret = smu_set_soft_freq_range(smu,
- SMU_SCLK,
+ guard(mutex)(&adev->pm.mutex);
+
+ return smu_set_soft_freq_range(smu,
+ type,
min,
max);
- mutex_unlock(&adev->pm.mutex);
-
- return ret;
}
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
@@ -1110,8 +1187,11 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (!pp_funcs->get_pp_table)
- return 0;
+ if (!table)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) || !pp_funcs->get_pp_table || adev->scpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
@@ -1521,6 +1601,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
}
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+ uint32_t limit_type,
uint32_t limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1531,7 +1612,7 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
- limit);
+ limit_type, limit);
mutex_unlock(&adev->pm.mutex);
return ret;
@@ -1608,6 +1689,28 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
}
}
+int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev)
+{
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu->od_enabled;
+ } else {
+ struct pp_hwmgr *hwmgr;
+
+ /*
+ * dpm on some legacy asics don't carry od_enabled member
+ * as its pp_handle is casted directly from adev.
+ */
+ if (amdgpu_dpm_is_legacy_dpm(adev))
+ return false;
+
+ hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
+
+ return hwmgr->od_enabled;
+ }
+}
+
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size)
@@ -1615,7 +1718,10 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (!pp_funcs->set_pp_table)
+ if (!buf || !size)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) || !pp_funcs->set_pp_table || adev->scpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
@@ -1930,3 +2036,102 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
return ret;
}
+
+/**
+ * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute
+ * partition
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
+ * function returns the size of the metrics structure.
+ *
+ * This function retrieves metrics for a specific temperature type, If the
+ * table parameter is NULL, the function returns the size of the metrics
+ * structure without populating it.
+ *
+ * Return: Size of the metrics structure on success, or a negative error code on failure.
+ */
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret;
+
+ if (!pp_funcs->get_temp_metrics ||
+ !amdgpu_dpm_is_temp_metrics_supported(adev, type))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+/**
+ * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support
+ * is available
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ *
+ * This function returns metrics if specific temperature metrics type is supported or not.
+ *
+ * Return: True in case of metrics type supported else false.
+ */
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ bool support_temp_metrics = false;
+
+ if (!pp_funcs->temp_metrics_is_supported)
+ return support_temp_metrics;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_temp_metrics =
+ pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_temp_metrics;
+}
+
+/**
+ * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
+ * partition
+ * @adev: Pointer to the device.
+ * @xcp_id: Identifier of the XCP for which metrics are to be retrieved.
+ * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
+ * function returns the size of the metrics structure.
+ *
+ * This function retrieves metrics for a specific XCP, including details such as
+ * VCN/JPEG activity, clock frequencies, and other performance metrics. If the
+ * table parameter is NULL, the function returns the size of the metrics
+ * structure without populating it.
+ *
+ * Return: Size of the metrics structure on success, or a negative error code on failure.
+ */
+ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
+ void *table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_xcp_metrics)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
+ table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ return smu_get_ras_smu_driver(pp_handle);
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
index 42efe838fa85..b5e9c3ecf703 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
@@ -27,69 +27,69 @@
#include "amdgpu_smu.h"
#include "amdgpu_dpm_internal.h"
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev)
{
struct drm_device *ddev = adev_to_drm(adev);
+ struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ struct single_display_configuration *display_cfg;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_connector *conn;
+ int num_crtcs = 0;
+ int vrefresh;
+ u32 vblank_in_pixels, vblank_time_us;
+
+ cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-}
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vblank_in_pixels;
- u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+ /* The array should only contain active displays. */
+ if (!amdgpu_crtc->enabled)
+ continue;
+
+ conn = to_amdgpu_connector(amdgpu_crtc->connector);
+ display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++];
+
+ if (amdgpu_crtc->hw_mode.clock) {
+ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
vblank_in_pixels =
amdgpu_crtc->hw_mode.crtc_htotal *
(amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2));
- vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- break;
- }
- }
- }
+ vblank_time_us =
+ vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- return vblank_time_us;
-}
+ /* The legacy (non-DC) code has issues with mclk switching
+ * with refresh rates over 120 Hz. Disable mclk switching.
+ */
+ if (vrefresh > 120)
+ vblank_time_us = 0;
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vrefresh = 0;
+ /* Find minimum vblank time. */
+ if (vblank_time_us < cfg->min_vblank_time)
+ cfg->min_vblank_time = vblank_time_us;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- break;
+ /* Find vertical refresh rate of first active display. */
+ if (!cfg->vrefresh)
+ cfg->vrefresh = vrefresh;
}
+
+ if (amdgpu_crtc->crtc_id < cfg->crtc_index) {
+ /* Find first active CRTC and its line time. */
+ cfg->crtc_index = amdgpu_crtc->crtc_id;
+ cfg->line_time_in_us = amdgpu_crtc->line_time;
+ }
+
+ display_cfg->controller_id = amdgpu_crtc->crtc_id;
+ display_cfg->pixel_clock = conn->pixelclock_for_modeset;
}
}
- return vrefresh;
+ cfg->display_clk = adev->clock.default_dispclk;
+ cfg->num_display = num_crtcs;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e8ae7681bf0a..65296a819e6a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -98,6 +98,86 @@ const char * const amdgpu_pp_profile_name[] = {
};
/**
+ * amdgpu_pm_dev_state_check - Check if device can be accessed.
+ * @adev: Target device.
+ * @runpm: Check runpm status for suspend state checks.
+ *
+ * Checks the state of the @adev for access. Return 0 if the device is
+ * accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
+{
+ bool runpm_check = runpm ? adev->in_runpm : false;
+ bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT);
+
+ if (amdgpu_in_reset(adev) || !full_init)
+ return -EBUSY;
+
+ if (adev->in_suspend && !runpm_check)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to resume if
+ * needed. Return 0 if the device is accessible or a negative error code
+ * otherwise.
+ */
+static int amdgpu_pm_get_access(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
+
+ return pm_runtime_resume_and_get(adev->dev);
+}
+
+/**
+ * amdgpu_pm_get_access_if_active - Check if device is active for access.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to determine
+ * if device is active. Allow access only if device is active.Return 0 if the
+ * device is accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
+{
+ int ret;
+
+ /* Ignore runpm status. If device is in suspended state, deny access */
+ ret = amdgpu_pm_dev_state_check(adev, false);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow only if device is active. If runpm is disabled also, as in
+ * kernels without CONFIG_PM, allow access.
+ */
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (!ret)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
+ * @adev: Target device.
+ *
+ * Should be paired with amdgpu_pm_get_access* calls
+ */
+static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
+{
+ pm_runtime_put_autosuspend(adev->dev);
+}
+
+/**
* DOC: power_dpm_state
*
* The power_dpm_state file is a legacy interface and is only provided for
@@ -140,18 +220,13 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -168,11 +243,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
enum amd_pm_state_type state;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -182,14 +252,13 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_power_state(adev, state);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -263,18 +332,13 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
level = amdgpu_dpm_get_performance_level(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
@@ -299,11 +363,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -328,14 +387,13 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
@@ -343,8 +401,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
adev->pm.stable_pstate_ctx = NULL;
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -359,19 +416,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
uint32_t i;
int buf_len, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
@@ -394,20 +446,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -430,11 +477,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -453,11 +495,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
adev->pm.pp_force_state_enabled = false;
if (strlen(buf) == 1)
@@ -469,7 +506,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -490,14 +527,13 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
adev->pm.pp_force_state_enabled = true;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return ret;
}
@@ -521,18 +557,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (size <= 0)
return size;
@@ -554,19 +585,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -735,11 +760,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (count > 127 || count == 0)
return -EINVAL;
@@ -785,7 +805,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -806,14 +826,13 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
goto err_out;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return -EINVAL;
}
@@ -835,14 +854,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
};
uint clk_index;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
@@ -861,7 +875,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -892,23 +906,17 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
uint64_t featuremask;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -925,20 +933,15 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -991,14 +994,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
int size = 0;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
@@ -1007,7 +1005,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1057,23 +1055,17 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -1240,18 +1232,13 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_sclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1266,24 +1253,18 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1297,18 +1278,13 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_mclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1323,24 +1299,18 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1378,20 +1348,15 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1414,11 +1379,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
@@ -1439,20 +1399,21 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
if (ret)
return -EINVAL;
parameter_size++;
+ if (!tmp_str)
+ break;
while (isspace(*tmp_str))
tmp_str++;
}
}
parameter[parameter_size] = profile_mode;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (!ret)
return count;
@@ -1460,25 +1421,20 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return -EINVAL;
}
-static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
- enum amd_pp_sensors sensor,
- void *query)
+static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
+ enum amd_pp_sensors sensor,
+ void *query)
{
int r, size = sizeof(uint32_t);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -1500,7 +1456,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
if (r)
return r;
@@ -1524,7 +1480,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
if (r)
return r;
@@ -1548,7 +1504,7 @@ static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
if (r)
return r;
@@ -1576,24 +1532,19 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->flags & AMD_IS_APU)
return -ENODATA;
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
@@ -1616,11 +1567,6 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->unique_id)
return sysfs_emit(buf, "%016llx\n", adev->unique_id);
@@ -1663,7 +1609,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
- unsigned long flags;
int ret = 0;
ret = kstrtol(buf, 0, &throttling_logging_interval);
@@ -1674,18 +1619,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
return -EINVAL;
if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
/*
* Reset the ratelimit timer internals.
* This can effectively restart the timer.
*/
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
+ ratelimit_state_reset_interval(&adev->throttling_logging_rs,
+ (throttling_logging_interval - 1) * HZ);
atomic_set(&adev->throttling_logging_enabled, 1);
} else {
atomic_set(&adev->throttling_logging_enabled, 0);
@@ -1715,9 +1654,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
@@ -1725,7 +1664,7 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1749,20 +1688,18 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1786,18 +1723,13 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1824,14 +1756,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
@@ -1843,7 +1770,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
memcpy(buf, gpu_metrics, size);
out:
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1856,7 +1783,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
uint32_t ss_power;
int r = 0, i;
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
if (r == -EOPNOTSUPP) {
/* sensor not available on dGPU, try to read from APU */
adev = NULL;
@@ -1869,7 +1796,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
}
mutex_unlock(&mgpu_info.mutex);
if (adev)
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
}
if (r)
@@ -1939,19 +1866,14 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
int r = 0;
int bias = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(ddev->dev);
- if (r < 0)
- return r;
-
r = kstrtoint(buf, 10, &bias);
if (r)
goto out;
+ r = amdgpu_pm_get_access(adev);
+ if (r < 0)
+ return r;
+
if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
@@ -1963,15 +1885,15 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
/* TODO: update bias level with SMU message */
out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return r;
}
static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
- if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -1982,13 +1904,13 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
{
uint32_t ss_power;
- if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
- (void *)&ss_power))
+ else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
- (void *)&ss_power))
+ else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -2006,10 +1928,11 @@ static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdg
return 0;
}
- /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
+ /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)) {
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
+ if (amdgpu_sriov_multi_vf_mode(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
@@ -2044,7 +1967,7 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_
* setting should not be allowed from VF if not in one VF mode.
*/
if (gc_ver >= IP_VERSION(10, 0, 0) ||
- (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) {
+ (amdgpu_sriov_multi_vf_mode(adev))) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
@@ -2087,7 +2010,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2109,7 +2033,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2120,7 +2045,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
if (gc_ver == IP_VERSION(9, 4, 2) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))
*states = ATTR_STATE_UNSUPPORTED;
}
@@ -2148,6 +2074,265 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
return 0;
}
+/**
+ * DOC: board
+ *
+ * Certain SOCs can support various board attributes reporting. This is useful
+ * for user application to monitor various board reated attributes.
+ *
+ * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
+ * seven types of attributes are reported. Baseboard temperature and
+ * gpu board temperature are reported as binary files. Npm status, current node power limit,
+ * max node power limit, node power and global ppt residency is reported as ASCII text file.
+ *
+ * * .. code-block:: console
+ *
+ * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/npm_status
+ *
+ * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
+ *
+ * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
+ *
+ * hexdump /sys/bus/pci/devices/.../board/node_power
+ *
+ * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
+ */
+
+/**
+ * DOC: baseboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current baseboard
+ * temperature metrics data. The file baseboard_temp is used for this.
+ * Reading the file will dump all the current baseboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: gpuboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current gpuboard
+ * temperature metrics data. The file gpuboard_temp is used for this.
+ * Reading the file will dump all the current gpuboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: cur_node_power_limit
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power limit.
+ * The file cur_node_power_limit is used for this.
+ */
+static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 nplimit;
+ int r;
+
+ /* get the current node power limit */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
+ (void *)&nplimit);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", nplimit);
+}
+
+/**
+ * DOC: node_power
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power.
+ * The file node_power is used for this.
+ */
+static ssize_t amdgpu_show_node_power(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 npower;
+ int r;
+
+ /* get the node power */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
+ (void *)&npower);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", npower);
+}
+
+/**
+ * DOC: npm_status
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power management status.
+ * The file npm_status is used for this. It shows the status as enabled or disabled based on
+ * current node power value. If node power is zero, status is disabled else enabled.
+ */
+static ssize_t amdgpu_show_npm_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 npower;
+ int r;
+
+ /* get the node power */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
+ (void *)&npower);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
+}
+
+/**
+ * DOC: global_ppt_resid
+ *
+ * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
+ * The file global_ppt_resid is used for this.
+ */
+static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 gpptresid;
+ int r;
+
+ /* get the global ppt residency */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
+ (void *)&gpptresid);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", gpptresid);
+}
+
+/**
+ * DOC: max_node_power_limit
+ *
+ * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
+ * The file max_node_power_limit is used for this.
+ */
+static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 max_nplimit;
+ int r;
+
+ /* get the max node power limit */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
+ (void *)&max_nplimit);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", max_nplimit);
+}
+
+static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
+static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
+static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
+static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
+static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
+static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
+static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
+
+static struct attribute *board_attrs[] = {
+ &dev_attr_baseboard_temp.attr,
+ &dev_attr_gpuboard_temp.attr,
+ NULL
+};
+
+static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_baseboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
+ return 0;
+ }
+
+ if (attr == &dev_attr_gpuboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+const struct attribute_group amdgpu_board_attr_group = {
+ .name = "board",
+ .attrs = board_attrs,
+ .is_visible = amdgpu_board_attr_visible,
+};
+
/* pm policy attributes */
struct amdgpu_pm_policy_attr {
struct device_attribute dev_attr;
@@ -2218,11 +2403,6 @@ static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
}
@@ -2239,11 +2419,6 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
char *tmp, *param;
long val;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
count = min(count, sizeof(tmp_buf));
memcpy(tmp_buf, buf, count);
tmp_buf[count - 1] = '\0';
@@ -2269,14 +2444,13 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -2332,7 +2506,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
.attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
@@ -2395,13 +2569,22 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 3) ||
- gc_ver == IP_VERSION(10, 3, 6) ||
- gc_ver == IP_VERSION(10, 3, 7) ||
- gc_ver == IP_VERSION(11, 0, 1) ||
- gc_ver == IP_VERSION(11, 0, 4) ||
- gc_ver == IP_VERSION(11, 5, 0)))
+ if (!(gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 5, 1) ||
+ gc_ver == IP_VERSION(11, 5, 2) ||
+ gc_ver == IP_VERSION(11, 5, 3) ||
+ gc_ver == IP_VERSION(12, 0, 0) ||
+ gc_ver == IP_VERSION(12, 0, 1)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
@@ -2416,11 +2599,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -2452,6 +2638,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
-EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_table)) {
+ int ret;
+ char *tmp = NULL;
+
+ ret = amdgpu_dpm_get_pp_table(adev, &tmp);
+ if (ret == -EOPNOTSUPP || !tmp)
+ *states = ATTR_STATE_UNSUPPORTED;
+ else
+ *states = ATTR_STATE_SUPPORTED;
}
switch (gc_ver) {
@@ -2581,18 +2776,18 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_MEM:
/* get current memory temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
+ (void *)&temp);
break;
default:
r = -EINVAL;
@@ -2699,18 +2894,13 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2728,11 +2918,6 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
u32 pwm_mode;
int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2746,14 +2931,13 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2784,16 +2968,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
@@ -2810,8 +2989,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2827,18 +3005,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2854,18 +3027,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2881,8 +3049,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 min_rpm = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
- (void *)&min_rpm);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
+ (void *)&min_rpm);
if (r)
return r;
@@ -2898,8 +3066,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 max_rpm = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
- (void *)&max_rpm);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ (void *)&max_rpm);
if (r)
return r;
@@ -2915,18 +3083,13 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2943,16 +3106,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
@@ -2968,8 +3126,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2985,18 +3142,13 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -3014,11 +3166,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -3030,14 +3177,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return -EINVAL;
@@ -3054,14 +3200,31 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
int r;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
- (void *)&vddgfx);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
+ (void *)&vddgfx);
if (r)
return r;
return sysfs_emit(buf, "%d\n", vddgfx);
}
+static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 vddboard;
+ int r;
+
+ /* get the voltage */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", vddboard);
+}
+
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3069,6 +3232,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
return sysfs_emit(buf, "vddgfx\n");
}
+static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "vddboard\n");
+}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3082,8 +3251,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
return -EINVAL;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
- (void *)&vddnb);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
+ (void *)&vddnb);
if (r)
return r;
@@ -3105,7 +3274,7 @@ static int amdgpu_hwmon_get_power(struct device *dev,
u32 query = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
if (r)
return r;
@@ -3152,14 +3321,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
ssize_t size;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
@@ -3169,7 +3333,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
else
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3217,7 +3381,9 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
"fastPPT" : "slowPPT");
else
- return sysfs_emit(buf, "PPT\n");
+ return sysfs_emit(buf, "%s\n",
+ to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
+ "PPT1" : "PPT");
}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
@@ -3230,29 +3396,19 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
value = value / 1000000; /* convert to Watt */
- value |= limit_type << 24;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
- err = amdgpu_dpm_set_power_limit(adev, value);
+ err = amdgpu_dpm_set_power_limit(adev, limit_type, value);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -3269,8 +3425,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
int r;
/* get the sclk */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
- (void *)&sclk);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
+ (void *)&sclk);
if (r)
return r;
@@ -3293,8 +3449,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
int r;
/* get the sclk */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
- (void *)&mclk);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
+ (void *)&mclk);
if (r)
return r;
@@ -3423,6 +3579,8 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0)
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
@@ -3430,7 +3588,6 @@ static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_m
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
@@ -3470,6 +3627,8 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
@@ -3477,7 +3636,6 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_cap.dev_attr.attr,
&sensor_dev_attr_power1_cap_default.dev_attr.attr,
&sensor_dev_attr_power1_label.dev_attr.attr,
- &sensor_dev_attr_power2_average.dev_attr.attr,
&sensor_dev_attr_power2_cap_max.dev_attr.attr,
&sensor_dev_attr_power2_cap_min.dev_attr.attr,
&sensor_dev_attr_power2_cap.dev_attr.attr,
@@ -3530,7 +3688,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* Skip crit temp on APU */
if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
- (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4))) &&
+ (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
@@ -3564,14 +3723,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
- if (((adev->family == AMDGPU_FAMILY_SI) ||
- ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
- (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
- return 0;
+ if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
+ if (adev->family == AMDGPU_FAMILY_SI ||
+ ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
+ (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
+ (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
+ return 0;
+ }
+
+ if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
+ amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
+ effective_mode |= S_IWUSR;
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
@@ -3581,10 +3746,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* not all products support both average and instantaneous */
if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
/* hide max/min values if we can't both query and manage the fan */
@@ -3605,7 +3772,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
(gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))) &&
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
return 0;
@@ -3613,11 +3781,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only APUs other than gc 9,4,3 have vddnb */
if ((!(adev->flags & AMD_IS_APU) ||
(gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))) &&
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
+ /* only few boards support vddboard */
+ if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
+ return 0;
+
/* no mclk on APUs other than gc 9,4,3*/
if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
@@ -3636,7 +3812,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* hotspot temperature for gc 9,4,3*/
if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)) {
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
@@ -3657,13 +3834,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* only Vangogh has fast PPT limit and power labels */
- if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
- (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
+ if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr) &&
+ (amdgpu_dpm_get_power_limit(adev, &tmp,
+ PP_PWR_LIMIT_MAX,
+ PP_PWR_TYPE_FAST) == -EOPNOTSUPP))
return 0;
return effective_mode;
@@ -3686,20 +3864,15 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
int size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3748,6 +3921,9 @@ static int parse_input_od_command_lines(const char *buf,
return -EINVAL;
parameter_size++;
+ if (!tmp_str)
+ break;
+
while (isspace(*tmp_str))
tmp_str++;
}
@@ -3767,11 +3943,6 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
long parameter[64];
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = parse_input_od_command_lines(in_buf,
count,
&cmd_type,
@@ -3780,7 +3951,7 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -3799,14 +3970,12 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
goto err_out;
}
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return ret;
}
@@ -4505,6 +4674,7 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
enum amdgpu_sriov_vf_mode mode;
uint32_t mask = 0;
+ uint32_t tmp;
int ret;
if (adev->pm.sysfs_initialized)
@@ -4563,7 +4733,29 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
ret = devm_device_add_group(adev->dev,
&amdgpu_pm_policy_attr_group);
if (ret)
- goto err_out0;
+ goto err_out1;
+ }
+
+ if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
+ ret = devm_device_add_group(adev->dev,
+ &amdgpu_board_attr_group);
+ if (ret)
+ goto err_out1;
+ if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
+ (void *)&tmp) != -EOPNOTSUPP) {
+ sysfs_add_file_to_group(&adev->dev->kobj,
+ &dev_attr_cur_node_power_limit.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj,
+ &dev_attr_max_node_power_limit.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
+ amdgpu_board_attr_group.name);
+ }
}
adev->pm.sysfs_initialized = true;
@@ -4776,16 +4968,10 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct drm_device *dev = adev_to_drm(adev);
u64 flags = 0;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(dev->dev);
+ r = amdgpu_pm_get_access(adev);
if (r < 0)
return r;
@@ -4802,7 +4988,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
seq_printf(m, "\n");
out:
- pm_runtime_put_autosuspend(dev->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -4822,10 +5008,9 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
void *smu_prv_buf;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
if (ret)
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 1f5ac7e0230d..aa3f427819a0 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -263,10 +263,6 @@ struct amdgpu_dpm {
u32 voltage_response_time;
u32 backbias_response_time;
void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
struct amdgpu_dpm_dynamic_state dyn_state;
struct amdgpu_dpm_fan fan;
u32 tdp_limit;
@@ -295,7 +291,8 @@ enum ip_power_state {
};
/* Used to mask smu debug modes */
-#define SMU_DEBUG_HALT_ON_ERROR 0x1
+#define SMU_DEBUG_HALT_ON_ERROR BIT(0)
+#define SMU_DEBUG_POOL_USE_VRAM BIT(1)
#define MAX_SMU_I2C_BUSES 2
@@ -409,22 +406,24 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en);
+int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
+ bool pause);
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev);
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev);
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
-int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
-
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
@@ -519,6 +518,10 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size);
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
+ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
+ void *table);
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table);
/**
* @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
@@ -548,7 +551,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
- uint32_t limit);
+ uint32_t limit_type, uint32_t limit);
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev);
int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m);
@@ -556,6 +559,7 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
void **addr,
size_t *size);
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev);
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size);
@@ -603,5 +607,11 @@ int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
+bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type);
+const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
index 5c2a89f0d5d5..cc6d7ba040e9 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
@@ -23,10 +23,6 @@
#ifndef __AMDGPU_DPM_INTERNAL_H__
#define __AMDGPU_DPM_INTERNAL_H__
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 67a8e22b1126..33eb85dd68e9 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable)
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, enable);
if (ret)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
}
}
@@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
ret = kv_process_firmware_header(adev);
if (ret) {
- DRM_ERROR("kv_process_firmware_header failed\n");
+ drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n");
return ret;
}
kv_init_fps_limits(adev);
kv_init_graphics_levels(adev);
ret = kv_program_bootup_state(adev);
if (ret) {
- DRM_ERROR("kv_program_bootup_state failed\n");
+ drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n");
return ret;
}
kv_calculate_dfs_bypass_settings(adev);
ret = kv_upload_dpm_settings(adev);
if (ret) {
- DRM_ERROR("kv_upload_dpm_settings failed\n");
+ drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n");
return ret;
}
ret = kv_populate_uvd_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_uvd_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n");
return ret;
}
ret = kv_populate_vce_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_vce_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n");
return ret;
}
ret = kv_populate_samu_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_samu_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n");
return ret;
}
ret = kv_populate_acp_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_acp_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n");
return ret;
}
kv_program_vc(adev);
@@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
if (pi->enable_auto_thermal_throttling) {
ret = kv_enable_auto_thermal_throttling(adev);
if (ret) {
- DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n");
return ret;
}
}
ret = kv_enable_dpm_voltage_scaling(adev);
if (ret) {
- DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n");
return ret;
}
ret = kv_set_dpm_interval(adev);
if (ret) {
- DRM_ERROR("kv_set_dpm_interval failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n");
return ret;
}
ret = kv_set_dpm_boot_state(adev);
if (ret) {
- DRM_ERROR("kv_set_dpm_boot_state failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n");
return ret;
}
ret = kv_enable_ulv(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_ulv failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n");
return ret;
}
kv_start_dpm(adev);
ret = kv_enable_didt(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_didt failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_didt failed\n");
return ret;
}
ret = kv_enable_smc_cac(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_smc_cac failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n");
return ret;
}
@@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
ret = amdgpu_kv_smc_bapm_enable(adev, false);
if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
@@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
if (ret) {
- DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n");
return ret;
}
amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
@@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
err = amdgpu_kv_smc_bapm_enable(adev, false);
if (err)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
@@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle)
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
}
@@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
+ drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
@@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle)
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
+ drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");
return ret;
}
kv_update_sclk_t(adev);
@@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
+ drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
@@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_set_enabled_levels(adev);
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
+ drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");
return ret;
}
kv_update_acp_boot_level(adev);
@@ -2299,7 +2299,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) ||
pi->disable_nb_ps3_in_battery;
ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
ps->dpm0_pg_nb_ps_hi = 0x2;
@@ -2358,7 +2358,7 @@ static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
return 0;
force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+ (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start);
if (force_high) {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
@@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
if (high_temp > max_temp)
high_temp = max_temp;
if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp);
return -EINVAL;
}
@@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
data_offset);
if (crev != 8) {
- DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
@@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
else
pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
- DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+ drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n");
}
if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
@@ -2594,7 +2594,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
}
if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS)
pi->caps_enable_dfs_bypass = true;
sumo_construct_sclk_voltage_mapping_table(adev,
@@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps)
struct kv_ps *ps = kv_get_ps(rps);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
+ amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
+ drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n",
+ rps->vclk, rps->dclk);
for (i = 0; i < ps->num_levels; i++) {
struct kv_pl *pl = &ps->levels[i];
- printk("\t\tpower level %d sclk: %u vddc: %u\n",
- i, pl->sclk,
- kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
+ drm_dbg(adev_to_drm(adev),
+ "power level %d sclk: %u vddc: %u\n",
+ i, pl->sclk,
+ kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
}
- amdgpu_dpm_print_ps_status(adev, rps);
+ amdgpu_dpm_dbg_print_ps_status(adev, rps);
}
static void kv_dpm_fini(struct amdgpu_device *adev)
@@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block)
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- DRM_INFO("amdgpu: dpm initialized\n");
+ drm_info(adev_to_drm(adev), "dpm initialized\n");
return 0;
dpm_failed:
kv_dpm_fini(adev);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
+ drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret);
return ret;
}
@@ -3042,6 +3044,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
@@ -3049,6 +3052,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -3066,35 +3071,45 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
return 0;
}
static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
/* asic init will reset to the boot state */
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+ return ret;
}
-static bool kv_dpm_is_idle(void *handle)
+static bool kv_dpm_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index e861355ebd75..c7ed0b457129 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -47,7 +47,7 @@
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2)
{
const char *s;
@@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2)
s = "performance";
break;
}
- printk("\tui class: %s\n", s);
- printk("\tinternal class:");
+ drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s);
if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
(class2 == 0))
- pr_cont(" none");
- else {
- if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- pr_cont(" boot");
- if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- pr_cont(" thermal");
- if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
- pr_cont(" limited_pwr");
- if (class & ATOM_PPLIB_CLASSIFICATION_REST)
- pr_cont(" rest");
- if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
- pr_cont(" forced");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- pr_cont(" 3d_perf");
- if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
- pr_cont(" ovrdrv");
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- pr_cont(" uvd");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
- pr_cont(" 3d_low");
- if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- pr_cont(" acpi");
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- pr_cont(" uvd_hd2");
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- pr_cont(" uvd_hd");
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- pr_cont(" uvd_sd");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
- pr_cont(" limited_pwr2");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- pr_cont(" ulv");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- pr_cont(" uvd_mvc");
- }
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tinternal class: none\n");
+ else
+ drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : "");
}
-void amdgpu_dpm_print_cap_info(u32 caps)
+void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps)
{
- printk("\tcaps:");
- if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- pr_cont(" single_disp");
- if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
- pr_cont(" video");
- if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
- pr_cont(" no_dc");
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n",
+ (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "",
+ (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "",
+ (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : "");
}
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
- printk("\tstatus:");
- if (rps == adev->pm.dpm.current_ps)
- pr_cont(" c");
- if (rps == adev->pm.dpm.requested_ps)
- pr_cont(" r");
- if (rps == adev->pm.dpm.boot_ps)
- pr_cont(" b");
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n",
+ rps == adev->pm.dpm.current_ps ? " c" : "",
+ rps == adev->pm.dpm.requested_ps ? " r" : "",
+ rps == adev->pm.dpm.boot_ps ? " b" : "");
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
}
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_NI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_CI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
@@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
- DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+ drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n",
controller->ucType,
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
@@ -797,8 +771,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
int i;
struct amdgpu_ps *ps;
u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ bool single_display = adev->pm.pm_display_cfg.num_display < 2;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
@@ -943,9 +916,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
return -EINVAL;
if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
- printk("switching from power state:\n");
+ drm_dbg(adev_to_drm(adev), "switching from power state\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
+ drm_dbg(adev_to_drm(adev), "switching to power state\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
}
@@ -971,9 +944,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
amdgpu_dpm_post_set_power_state(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
if (pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -994,7 +964,8 @@ void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_get_active_displays(adev);
+ if (!adev->dc_enabled)
+ amdgpu_dpm_get_display_cfg(adev);
amdgpu_dpm_change_power_state_locked(adev);
}
@@ -1009,9 +980,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
- if (!adev->pm.dpm_enabled)
- return;
+ mutex_lock(&adev->pm.mutex);
+ if (!adev->pm.dpm_enabled) {
+ mutex_unlock(&adev->pm.mutex);
+ return;
+ }
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp,
@@ -1033,4 +1007,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
adev->pm.dpm.state = dpm_state;
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
index 93bd3973330c..7120eef30509 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
@@ -23,10 +23,9 @@
#ifndef __LEGACY_DPM_H__
#define __LEGACY_DPM_H__
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
+void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2);
+void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps);
+void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps);
int amdgpu_get_platform_caps(struct amdgpu_device *adev);
int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index a87dcf0974bc..1f539cc65f41 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -30,16 +30,32 @@
#include "amdgpu_atombios.h"
#include "amdgpu_dpm_internal.h"
#include "amd_pcie.h"
-#include "sid.h"
+#include "atom.h"
+#include "gfx_v6_0.h"
#include "r600_dpm.h"
+#include "sid.h"
#include "si_dpm.h"
-#include "atom.h"
#include "../include/pptable.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <legacy_dpm.h>
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
+#include"gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
@@ -2193,7 +2209,7 @@ static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
if (xclk == 0)
return 0;
- cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+ cac_window = RREG32(mmCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK;
cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
wintime = (cac_window_size * 100) / xclk;
@@ -2489,19 +2505,19 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if (adev->pm.dpm.sq_ramping_threshold == 0)
return -EINVAL;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -2510,14 +2526,17 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
enable_sq_ramping) {
- sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
- sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
- sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
- sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
- sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT;
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT;
} else {
- sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
- sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK |
+ SQ_POWER_THROTTLE__MIN_POWER_MASK;
+ sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
}
smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
@@ -2539,18 +2558,13 @@ static int si_enable_power_containment(struct amdgpu_device *adev,
if (enable) {
if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
- if (smc_result != PPSMC_Result_OK) {
+ if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
- ni_pi->pc_enabled = false;
- } else {
- ni_pi->pc_enabled = true;
- }
}
} else {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
- ni_pi->pc_enabled = false;
}
}
@@ -2761,9 +2775,9 @@ static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
if (!cac_tables)
return -ENOMEM;
- reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
- reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
- WREG32(CG_CAC_CTRL, reg);
+ reg = RREG32(mmCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK;
+ reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT);
+ WREG32(mmCG_CAC_CTRL, reg);
si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
si_pi->dyn_powertune_data.dc_pwr_value =
@@ -2962,10 +2976,10 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev)
ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
if (ret)
break;
- p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
- fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
- clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
- clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+ p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
+ fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
fb_div &= ~0x00001FFF;
fb_div >>= 1;
@@ -3062,11 +3076,17 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
static bool si_dpm_vblank_too_short(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time;
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
- if (vblank_time < switch_limit)
+ /* Consider zero vblank time too short and disable MCLK switching.
+ * Note that the vblank time is set to maximum when no displays are attached,
+ * so we'll still enable MCLK switching in that case.
+ */
+ if (vblank_time == 0)
+ return true;
+ else if (vblank_time < switch_limit)
return true;
else
return false;
@@ -3422,6 +3442,8 @@ static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
+ const struct amd_pp_display_configuration *display_cfg =
+ &adev->pm.pm_display_cfg;
struct si_ps *ps = si_get_ps(rps);
struct amdgpu_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching = false;
@@ -3430,6 +3452,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
u16 vddc, vddci, min_vce_voltage = 0;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
+ u32 high_pixelclock_count = 0;
int i;
if (adev->asic_type == CHIP_HAINAN) {
@@ -3457,6 +3480,35 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
}
+ /* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz.
+ * For example, 4K 60Hz and 1080p 144Hz fall into this category.
+ * Find number of such displays connected.
+ */
+ for (i = 0; i < display_cfg->num_display; i++) {
+ /* The array only contains active displays. */
+ if (display_cfg->displays[i].pixel_clock > 297000)
+ high_pixelclock_count++;
+ }
+
+ /* These are some ad-hoc fixes to some issues observed with SI GPUs.
+ * They are necessary because we don't have something like dce_calcs
+ * for these GPUs to calculate bandwidth requirements.
+ */
+ if (high_pixelclock_count) {
+ /* Work around flickering lines at the bottom edge
+ * of the screen when using a single 4K 60Hz monitor.
+ */
+ disable_mclk_switching = true;
+
+ /* On Oland, we observe some flickering when two 4K 60Hz
+ * displays are connected, possibly because voltage is too low.
+ * Raise the voltage by requiring a higher SCLK.
+ * (Voltage cannot be adjusted independently without also SCLK.)
+ */
+ if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND)
+ disable_sclk_switching = true;
+ }
+
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
@@ -3467,7 +3519,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
rps->ecclk = 0;
}
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+ if ((adev->pm.pm_display_cfg.num_display > 1) ||
si_dpm_vblank_too_short(adev))
disable_mclk_switching = true;
@@ -3615,7 +3667,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
ps->performance_levels[i].mclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
- adev->clock.current_dispclk,
+ display_cfg->display_clk,
max_limits->vddc, &ps->performance_levels[i].vddc);
}
@@ -3669,10 +3721,10 @@ static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
- tmp = RREG32(MC_ARB_RAMCFG);
- row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
- column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
- bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10;
+ column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8;
+ bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2;
density = (1 << (row + column - 20 + bank)) * width;
@@ -3756,11 +3808,11 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
}
if (want_thermal_protection) {
- WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK);
if (pi->thermal_protection)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
} else {
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
}
@@ -3785,20 +3837,20 @@ static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
static void si_start_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_stop_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
}
@@ -3838,7 +3890,7 @@ static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power
static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg, u32 parameter)
{
- WREG32(SMC_SCRATCH0, parameter);
+ WREG32(mmSMC_SCRATCH0, parameter);
return amdgpu_si_send_msg_to_smc(adev, msg);
}
@@ -4023,12 +4075,12 @@ static void si_read_clock_registers(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
- si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
- si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
- si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
- si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
- si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
- si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+ si_pi->clock_registers.cg_spll_func_cntl = RREG32(mmCG_SPLL_FUNC_CNTL);
+ si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(mmCG_SPLL_FUNC_CNTL_3);
+ si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(mmCG_SPLL_FUNC_CNTL_4);
+ si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(mmCG_SPLL_SPREAD_SPECTRUM);
+ si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(mmCG_SPLL_SPREAD_SPECTRUM_2);
si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
@@ -4044,14 +4096,14 @@ static void si_enable_thermal_protection(struct amdgpu_device *adev,
bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
else
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
static void si_enable_acpi_power_management(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK);
}
#if 0
@@ -4132,44 +4184,35 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
tmp = 0x1;
if (eg_pi->sclk_deep_sleep) {
- WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
- WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
- ~AUTOSCALE_ON_SS_CLEAR);
+ WREG32_P(mmMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK);
+ WREG32_P(mmCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK,
+ ~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK);
}
}
static void si_program_display_gap(struct amdgpu_device *adev)
{
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
u32 tmp, pipe;
- int i;
- tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
+ if (cfg->num_display > 0)
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
else
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
- if (adev->pm.dpm.new_active_crtc_count > 1)
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ if (cfg->num_display > 1)
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
else
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
- if ((adev->pm.dpm.new_active_crtc_count > 0) &&
- (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
- /* find the first active crtc */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i))
- break;
- }
- if (i == adev->mode_info.num_crtc)
- pipe = 0;
- else
- pipe = i;
+ if (cfg->num_display > 0 && pipe != cfg->crtc_index) {
+ pipe = cfg->crtc_index;
tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
@@ -4180,7 +4223,7 @@ static void si_program_display_gap(struct amdgpu_device *adev)
* This can be a problem on PowerXpress systems or if you want to use the card
* for offscreen rendering or compute if there are no crtcs enabled.
*/
- si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+ si_notify_smc_display_change(adev, cfg->num_display > 0);
}
static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
@@ -4189,10 +4232,10 @@ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
if (enable) {
if (pi->sclk_ss)
- WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
} else {
- WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
- WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
}
}
@@ -4214,15 +4257,15 @@ static void si_setup_bsp(struct amdgpu_device *adev)
&pi->pbsu);
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
- pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+ pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT);
+ pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT);
- WREG32(CG_BSP, pi->dsp);
+ WREG32(mmCG_BSP, pi->dsp);
}
static void si_program_git(struct amdgpu_device *adev)
{
- WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+ WREG32_P(mmCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK);
}
static void si_program_tp(struct amdgpu_device *adev)
@@ -4231,54 +4274,54 @@ static void si_program_tp(struct amdgpu_device *adev)
enum r600_td td = R600_TD_DFLT;
for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
- WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+ WREG32(mmCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT));
if (td == R600_TD_AUTO)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
if (td == R600_TD_UP)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
if (td == R600_TD_DOWN)
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
}
static void si_program_tpp(struct amdgpu_device *adev)
{
- WREG32(CG_TPC, R600_TPC_DFLT);
+ WREG32(mmCG_TPC, R600_TPC_DFLT);
}
static void si_program_sstp(struct amdgpu_device *adev)
{
- WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+ WREG32(mmCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT));
}
static void si_enable_display_gap(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+ u32 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL);
- tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT);
- tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
- DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
}
static void si_program_vc(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
- WREG32(CG_FTV, pi->vrc);
+ WREG32(mmCG_FTV, pi->vrc);
}
static void si_clear_vc(struct amdgpu_device *adev)
{
- WREG32(CG_FTV, 0);
+ WREG32(mmCG_FTV, 0);
}
static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
@@ -4735,7 +4778,7 @@ static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
- u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+ u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT;
if (tmp >= 4)
dram_rows = 16384;
@@ -4909,7 +4952,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
- reg = CG_R(0xffff) | CG_L(0);
+ reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
table->initialState.level.aT = cpu_to_be32(reg);
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
@@ -4935,10 +4978,13 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
table->initialState.level.dpm2.BelowSafeInc = 0;
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK |
+ SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5057,8 +5103,8 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
@@ -5102,10 +5148,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
table->ACPIState.level.dpm2.BelowSafeInc = 0;
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5250,8 +5296,8 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (ret)
return ret;
- WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
- WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ WREG32(mmCG_ULV_CONTROL, ulv->cg_ulv_control);
+ WREG32(mmCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
lane_width = amdgpu_get_pcie_lanes(adev);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
@@ -5294,16 +5340,16 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
do_div(tmp, reference_clock);
fbdiv = (u32) tmp;
- spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
- spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
- spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+ spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK);
+ spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT;
+ spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
if (pi->sclk_ss) {
struct amdgpu_atom_ss ss;
@@ -5314,12 +5360,12 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
- cg_spll_spread_spectrum &= ~CLK_S_MASK;
- cg_spll_spread_spectrum |= CLK_S(clk_s);
- cg_spll_spread_spectrum |= SSEN;
+ cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK;
+ cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
- cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
- cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
}
}
@@ -5485,8 +5531,8 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
if (pi->mclk_stutter_mode_threshold &&
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
- (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
- (adev->pm.dpm.new_active_crtc_count <= 2)) {
+ (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
+ (adev->pm.pm_display_cfg.num_display <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5579,7 +5625,7 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
return -EINVAL;
if (state->performance_level_count < 2) {
- a_t = CG_R(0xffff) | CG_L(0);
+ a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
smc_state->levels[0].aT = cpu_to_be32(a_t);
return 0;
}
@@ -5600,13 +5646,13 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
}
- a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
- a_t |= CG_R(t_l * pi->bsp / 20000);
+ a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK;
+ a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT;
smc_state->levels[i].aT = cpu_to_be32(a_t);
high_bsp = (i == state->performance_level_count - 2) ?
pi->pbsp : pi->bsp;
- a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+ a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT;
smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
}
@@ -5615,14 +5661,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
static int si_disable_ulv(struct amdgpu_device *adev)
{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
+ PPSMC_Result r;
- if (ulv->supported)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-
- return 0;
+ r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV);
+ return (r == PPSMC_Result_OK) ? 0 : -EINVAL;
}
static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
@@ -5639,7 +5681,7 @@ static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
/* XXX validate against display requirements! */
for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
- if (adev->clock.current_dispclk <=
+ if (adev->pm.pm_display_cfg.display_clk <=
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
if (ulv->pl.vddc <
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
@@ -5793,39 +5835,36 @@ static int si_upload_ulv_state(struct amdgpu_device *adev)
static int si_upload_smc_data(struct amdgpu_device *adev)
{
- struct amdgpu_crtc *amdgpu_crtc = NULL;
- int i;
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ u32 crtc_index = 0;
+ u32 mclk_change_block_cp_min = 0;
+ u32 mclk_change_block_cp_max = 0;
- if (adev->pm.dpm.new_active_crtc_count == 0)
- return 0;
+ /* When a display is plugged in, program these so that the SMC
+ * performs MCLK switching when it doesn't cause flickering.
+ * When no display is plugged in, there is no need to restrict
+ * MCLK switching, so program them to zero.
+ */
+ if (cfg->num_display) {
+ crtc_index = cfg->crtc_index;
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
- amdgpu_crtc = adev->mode_info.crtcs[i];
- break;
+ if (cfg->line_time_in_us) {
+ mclk_change_block_cp_min = 200 / cfg->line_time_in_us;
+ mclk_change_block_cp_max = 100 / cfg->line_time_in_us;
}
}
- if (amdgpu_crtc == NULL)
- return 0;
-
- if (amdgpu_crtc->line_time <= 0)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_crtc_index,
- amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_crtc_index,
+ crtc_index);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
- amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+ mclk_change_block_cp_min);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
- amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+ mclk_change_block_cp_max);
return 0;
}
@@ -6180,9 +6219,9 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev,
static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
}
static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
@@ -6204,8 +6243,8 @@ static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
{
u32 speed_cntl;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
return (u16)speed_cntl;
}
@@ -6412,21 +6451,21 @@ static void si_dpm_setup_asic(struct amdgpu_device *adev)
static int si_thermal_enable_alert(struct amdgpu_device *adev,
bool enable)
{
- u32 thermal_int = RREG32(CG_THERMAL_INT);
+ u32 thermal_int = RREG32(mmCG_THERMAL_INT);
if (enable) {
PPSMC_Result result;
- thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK);
+ WREG32(mmCG_THERMAL_INT, thermal_int);
result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
return -EINVAL;
}
} else {
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32(mmCG_THERMAL_INT, thermal_int);
}
return 0;
@@ -6447,9 +6486,9 @@ static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
return -EINVAL;
}
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
- WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK);
adev->pm.dpm.thermal.min_temp = low_temp;
adev->pm.dpm.thermal.max_temp = high_temp;
@@ -6463,20 +6502,20 @@ static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
u32 tmp;
if (si_pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
si_pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT;
si_pi->t_min = tmp;
si_pi->fan_ctrl_is_in_default_mode = false;
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(0);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
@@ -6495,7 +6534,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
return 0;
}
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0) {
adev->pm.dpm.fan.ucode_fan_control = false;
@@ -6531,7 +6570,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
reference_clock) / 1600);
fan_table.fdo_max = cpu_to_be16((u16)duty100);
- tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ tmp = (RREG32(mmCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
fan_table.temp_src = (uint8_t)tmp;
ret = amdgpu_si_copy_bytes_to_smc(adev,
@@ -6590,8 +6629,8 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
if (adev->pm.no_fan)
return -ENOENT;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
- duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty = (RREG32(mmCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6621,7 +6660,7 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
if (speed > 255)
return -EINVAL;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6630,9 +6669,9 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
do_div(tmp64, 255);
duty = (u32)tmp64;
- tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
- tmp |= FDO_STATIC_DUTY(duty);
- WREG32(CG_FDO_CTRL0, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
+ tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
+ WREG32(mmCG_FDO_CTRL0, tmp);
return 0;
}
@@ -6672,8 +6711,8 @@ static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
if (si_pi->fan_is_controlled_by_smc)
return 0;
- tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
+ tmp = RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ *fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
return 0;
}
@@ -6691,7 +6730,7 @@ static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
if (adev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
- tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ tach_period = (RREG32(mmCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
if (tach_period == 0)
return -ENOENT;
@@ -6720,9 +6759,9 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
si_fan_ctrl_stop_smc_fan_control(adev);
tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
- tmp |= TARGET_PERIOD(tach_period);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
+ tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
@@ -6736,13 +6775,13 @@ static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
u32 tmp;
if (!si_pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(si_pi->t_min);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
si_pi->fan_ctrl_is_in_default_mode = true;
}
}
@@ -6760,14 +6799,14 @@ static void si_thermal_initialize(struct amdgpu_device *adev)
u32 tmp;
if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
+ tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
- tmp |= TACH_PWM_RESP_RATE(0x28);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
+ tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
@@ -7007,13 +7046,20 @@ static void si_set_vce_clock(struct amdgpu_device *adev,
if ((old_rps->evclk != new_rps->evclk) ||
(old_rps->ecclk != new_rps->ecclk)) {
/* Turn the clocks on when encoding, off otherwise */
+ dev_dbg(adev->dev, "set VCE clocks: %u, %u\n", new_rps->evclk, new_rps->ecclk);
+
if (new_rps->evclk || new_rps->ecclk) {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, false, false);*/
+ amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);
+ amdgpu_device_ip_set_clockgating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE);
} else {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, true, false);
- amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/
+ amdgpu_device_ip_set_powergating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE);
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
}
}
}
@@ -7465,8 +7511,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
pi->pasi = CYPRESS_HASI_DFLT;
pi->vrc = SISLANDS_VRC_DFLT;
- pi->gfx_clock_gating = true;
-
eg_pi->sclk_deep_sleep = true;
si_pi->sclk_deep_sleep_above_low = false;
@@ -7477,7 +7521,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
eg_pi->dynamic_ac_timing = true;
- eg_pi->light_sleep = true;
#if defined(CONFIG_ACPI)
eg_pi->pcie_performance_request =
amdgpu_acpi_is_pcie_performance_request_supported(adev);
@@ -7530,14 +7573,15 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
if (current_index >= ps->performance_level_count) {
seq_printf(m, "invalid dpm profile %d\n", current_index);
} else {
pl = &ps->performance_levels[current_index];
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ seq_printf(m, "vce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
@@ -7554,14 +7598,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7571,14 +7615,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7786,6 +7830,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
@@ -7793,6 +7838,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
return ret;
}
@@ -7810,35 +7856,47 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
+
return 0;
}
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+
+ return ret;
}
-static bool si_dpm_is_idle(void *handle)
+static bool si_dpm_is_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX */
return true;
@@ -7869,8 +7927,8 @@ static int si_dpm_get_temp(void *handle)
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
- CTF_TEMP_SHIFT;
+ temp = (RREG32(mmCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
if (temp & 0x200)
actual_temp = 255;
@@ -7915,15 +7973,16 @@ static void si_dpm_print_power_state(void *handle,
struct rv7xx_pl *pl;
int i;
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
+ amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
+ drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ drm_dbg(adev_to_drm(adev), "\tvce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
- amdgpu_dpm_print_ps_status(adev, rps);
+ amdgpu_dpm_dbg_print_ps_status(adev, rps);
}
static int si_dpm_early_init(struct amdgpu_ip_block *ip_block)
@@ -8000,8 +8059,8 @@ static int si_dpm_read_sensor(void *handle, int idx,
struct si_ps *ps = si_get_ps(rps);
uint32_t sclk, mclk;
u32 pl_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
index 11cb7874a6bb..3aed75fbf913 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
@@ -38,11 +38,7 @@
#define MC_ARB_DRAM_TIMING2_2 0xa00
#define MC_ARB_DRAM_TIMING2_3 0xa01
-#define MAX_NO_OF_MVDD_VALUES 2
-#define MAX_NO_VREG_STEPS 32
#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
#define RV770_ASI_DFLT 1000
#define CYPRESS_HASI_DFLT 400000
#define PCIE_PERF_REQ_PECI_GEN1 2
@@ -51,11 +47,6 @@
#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
-#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
-
-#define RV770_SMC_TABLE_ADDRESS 0xB000
-#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
-
#define SMC_STROBE_RATIO 0x0F
#define SMC_STROBE_ENABLE 0x10
@@ -64,27 +55,6 @@
#define SMC_MC_RTT_ENABLE 0x04
#define SMC_MC_STUTTER_EN 0x08
-#define RV770_SMC_VOLTAGEMASK_VDDC 0
-#define RV770_SMC_VOLTAGEMASK_MVDD 1
-#define RV770_SMC_VOLTAGEMASK_VDDCI 2
-#define RV770_SMC_VOLTAGEMASK_MAX 4
-
-#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define NISLANDS_SMC_STROBE_RATIO 0x0F
-#define NISLANDS_SMC_STROBE_ENABLE 0x10
-
-#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
-#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
-#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
-#define NISLANDS_SMC_MC_STUTTER_EN 0x08
-
-#define MAX_NO_VREG_STEPS 32
-
-#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
-#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
-#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
-#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
-
#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
@@ -219,32 +189,6 @@ enum si_cac_config_reg_type
SISLANDS_CACCONFIG_MAX
};
-enum si_power_level {
- SI_POWER_LEVEL_LOW = 0,
- SI_POWER_LEVEL_MEDIUM = 1,
- SI_POWER_LEVEL_HIGH = 2,
- SI_POWER_LEVEL_CTXSW = 3,
-};
-
-enum si_td {
- SI_TD_AUTO,
- SI_TD_UP,
- SI_TD_DOWN,
-};
-
-enum si_display_watermark {
- SI_DISPLAY_WATERMARK_LOW = 0,
- SI_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum si_display_gap
-{
- SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- SI_PM_DISPLAY_GAP_VBLANK = 1,
- SI_PM_DISPLAY_GAP_WATERMARK = 2,
- SI_PM_DISPLAY_GAP_IGNORE = 3,
-};
-
extern const struct amdgpu_ip_block_version si_smu_ip_block;
struct ni_leakage_coeffients
@@ -258,56 +202,6 @@ struct ni_leakage_coeffients
u32 t_ref;
};
-struct SMC_Evergreen_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
-
-struct evergreen_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct evergreen_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct SMC_Evergreen_MCRegisterSet
-{
- uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
-
-struct SMC_Evergreen_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
- SMC_Evergreen_MCRegisterSet data[5];
-};
-
-typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
-
-struct SMC_NIslands_MCRegisterSet
-{
- uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
-
-struct ni_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
struct SMC_NIslands_MCRegisterAddress
{
uint16_t s0;
@@ -316,257 +210,20 @@ struct SMC_NIslands_MCRegisterAddress
typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
-struct SMC_NIslands_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
- SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
-};
-
-typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
-
-struct evergreen_ulv_param {
- bool supported;
- struct rv7xx_pl *pl;
-};
-
-struct evergreen_arb_registers {
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 mc_arb_rfsh_rate;
- u32 mc_arb_burst_time;
-};
-
-struct at {
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
-};
-
-struct ni_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct RV770_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
-
-struct RV770_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
-
-
-struct RV730_SMC_MCLK_VALUE
-{
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL2;
- uint32_t vMPLL_FUNC_CNTL3;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
-
-struct RV770_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
-
-union RV7XX_SMC_MCLK_VALUE
-{
- RV770_SMC_MCLK_VALUE mclk770;
- RV730_SMC_MCLK_VALUE mclk730;
-};
-
-typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
-
-struct RV770_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- union{
- uint8_t seqValue;
- uint8_t ACIndex;
- };
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t gen2XSP;
- uint8_t backbias;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- RV770_SMC_SCLK_VALUE sclk;
- RV7XX_SMC_MCLK_VALUE mclk;
- RV770_SMC_VOLTAGE_VALUE vddc;
- RV770_SMC_VOLTAGE_VALUE mvdd;
- RV770_SMC_VOLTAGE_VALUE vddci;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t stateFlags;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
-
-struct RV770_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t padding1;
- uint8_t padding2;
- uint8_t padding3;
- RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
-};
-
-typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
-
-struct RV770_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
-
-struct RV770_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[MAX_NO_VREG_STEPS];
- RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- RV770_SMC_SWSTATE initialState;
- RV770_SMC_SWSTATE ACPIState;
- RV770_SMC_SWSTATE driverState;
- RV770_SMC_SWSTATE ULVState;
-};
-
-typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
-
-struct vddc_table_entry {
- u16 vddc;
- u8 vddc_index;
- u8 high_smio;
- u32 low_smio;
-};
-
-struct rv770_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct rv730_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl2;
- u32 mpll_func_cntl3;
- u32 mpll_ss;
- u32 mpll_ss2;
-};
-
-union r7xx_clock_registers {
- struct rv770_clock_registers rv770;
- struct rv730_clock_registers rv730;
-};
-
struct rv7xx_power_info {
/* flags */
- bool mem_gddr5;
- bool pcie_gen2;
- bool dynamic_pcie_gen2;
- bool acpi_pcie_gen2;
- bool boot_in_gen2;
bool voltage_control; /* vddc */
bool mvdd_control;
bool sclk_ss;
bool mclk_ss;
bool dynamic_ss;
- bool gfx_clock_gating;
- bool mg_clock_gating;
- bool mgcgtssm;
- bool power_gating;
bool thermal_protection;
- bool display_gap;
- bool dcodt;
- bool ulps;
- /* registers */
- union r7xx_clock_registers clk_regs;
- u32 s0_vid_lower_smio_cntl;
/* voltage */
- u32 vddc_mask_low;
- u32 mvdd_mask_low;
u32 mvdd_split_frequency;
- u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
u16 max_vddc;
u16 max_vddc_in_table;
u16 min_vddc_in_table;
- struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
- u8 valid_vddc_entries;
- /* dc odt */
- u32 mclk_odt_threshold;
- u8 odt_value_0[2];
- u8 odt_value_1[2];
/* stored values */
- u32 boot_sclk;
u16 acpi_vddc;
u32 ref_div;
u32 active_auto_throttle_sources;
@@ -582,17 +239,6 @@ struct rv7xx_power_info {
u32 asi;
u32 pasi;
u32 vrc;
- u32 restricted_levels;
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
- /* smc offsets */
- u16 state_table_start;
- u16 soft_regs_start;
- u16 sram_end;
- /* scratch structs */
- RV770_SMC_STATETABLE smc_statetable;
};
enum si_pcie_gen {
@@ -611,44 +257,12 @@ struct rv7xx_pl {
enum si_pcie_gen pcie_gen; /* si+ only */
};
-struct rv7xx_ps {
- struct rv7xx_pl high;
- struct rv7xx_pl medium;
- struct rv7xx_pl low;
- bool dc_compatible;
-};
-
struct si_ps {
u16 performance_level_count;
bool dc_compatible;
struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
-struct ni_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ni_cac_data
-{
- struct ni_leakage_coeffients leakage_coefficients;
- u32 i_leakage;
- s32 leakage_minimum_temperature;
- u32 pwr_const;
- u32 dc_cac_value;
- u32 bif_cac_value;
- u32 lkge_pwr;
- u8 mc_wr_weight;
- u8 mc_rd_weight;
- u8 allow_ovrflw;
- u8 num_win_tdp;
- u8 l2num_win_tdp;
- u8 lts_truncate_n;
-};
-
struct evergreen_power_info {
/* must be first! */
struct rv7xx_power_info rv7xx;
@@ -657,203 +271,33 @@ struct evergreen_power_info {
bool dynamic_ac_timing;
bool abm;
bool mcls;
- bool light_sleep;
- bool memory_transition;
bool pcie_performance_request;
- bool pcie_performance_request_registered;
bool sclk_deep_sleep;
- bool dll_default_on;
- bool ls_clock_gating;
bool smu_uvd_hs;
bool uvd_enabled;
/* stored values */
u16 acpi_vddci;
- u8 mvdd_high_index;
- u8 mvdd_low_index;
u32 mclk_edc_wr_enable_threshold;
- struct evergreen_mc_reg_table mc_reg_table;
struct atom_voltage_table vddc_voltage_table;
struct atom_voltage_table vddci_voltage_table;
- struct evergreen_arb_registers bootup_arb_registers;
- struct evergreen_ulv_param ulv;
- struct at ats[2];
- /* smc offsets */
- u16 mc_reg_table_start;
struct amdgpu_ps current_rps;
- struct rv7xx_ps current_ps;
struct amdgpu_ps requested_rps;
- struct rv7xx_ps requested_ps;
-};
-
-struct PP_NIslands_Dpm2PerfLevel
-{
- uint8_t MaxPS;
- uint8_t TgtAct;
- uint8_t MaxPS_StepInc;
- uint8_t MaxPS_StepDec;
- uint8_t PSST;
- uint8_t NearTDPDec;
- uint8_t AboveSafeInc;
- uint8_t BelowSafeInc;
- uint8_t PSDeltaLimit;
- uint8_t PSDeltaWin;
- uint8_t Reserved[6];
-};
-
-typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
-
-struct PP_NIslands_DPM2Parameters
-{
- uint32_t TDPLimit;
- uint32_t NearTDPLimit;
- uint32_t SafePowerLimit;
- uint32_t PowerBoostLimit;
-};
-typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
-
-struct NISLANDS_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
-
-struct NISLANDS_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
-
-struct NISLANDS_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
-
-struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- uint8_t ACIndex;
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- NISLANDS_SMC_SCLK_VALUE sclk;
- NISLANDS_SMC_MCLK_VALUE mclk;
- NISLANDS_SMC_VOLTAGE_VALUE vddc;
- NISLANDS_SMC_VOLTAGE_VALUE mvdd;
- NISLANDS_SMC_VOLTAGE_VALUE vddci;
- NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
- uint32_t powergate_en;
- uint8_t hUp;
- uint8_t hDown;
- uint8_t stateFlags;
- uint8_t arbRefreshState;
- uint32_t SQPowerThrottle;
- uint32_t SQPowerThrottle_2;
- uint32_t reserved[2];
- PP_NIslands_Dpm2PerfLevel dpm2;
-};
-
-typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-
-struct NISLANDS_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t levelCount;
- uint8_t padding2;
- uint8_t padding3;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[];
-};
-
-typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
-
-struct NISLANDS_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
-
-#define NISLANDS_MAX_NO_VREG_STEPS 32
-
-struct NISLANDS_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- PP_NIslands_DPM2Parameters dpm2Params;
- NISLANDS_SMC_SWSTATE initialState;
- NISLANDS_SMC_SWSTATE ACPIState;
- NISLANDS_SMC_SWSTATE ULVState;
- NISLANDS_SMC_SWSTATE driverState;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
};
-typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
-
struct ni_power_info {
/* must be first! */
struct evergreen_power_info eg;
- struct ni_clock_registers clock_registers;
- struct ni_mc_reg_table mc_reg_table;
u32 mclk_rtt_mode_threshold;
/* flags */
- bool use_power_boost_limit;
bool support_cac_long_term_average;
bool cac_enabled;
bool cac_configuration_required;
bool driver_calculate_cac_leakage;
- bool pc_enabled;
bool enable_power_containment;
bool enable_cac;
bool enable_sq_ramping;
- /* smc offsets */
- u16 arb_table_start;
- u16 fan_table_start;
- u16 cac_table_start;
- u16 spll_table_start;
- /* CAC stuff */
- struct ni_cac_data cac_data;
- u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
- const struct ni_cac_weights *cac_weights;
- u8 lta_window_size;
- u8 lts_truncate;
struct si_ps current_ps;
struct si_ps requested_ps;
- /* scratch structs */
- SMC_NIslands_MCRegisters smc_mc_reg_table;
- NISLANDS_SMC_STATETABLE smc_statetable;
};
struct si_cac_config_reg
@@ -952,7 +396,6 @@ struct si_leakage_voltage
struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
};
-
struct si_ulv_param {
bool supported;
u32 cg_ulv_control;
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 8f994ffa9cd1..281a5e377aee 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -30,6 +30,12 @@
#include "amdgpu_ucode.h"
#include "sislands_smc.h"
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
static int si_set_smc_sram_address(struct amdgpu_device *adev,
u32 smc_address, u32 limit)
{
@@ -38,8 +44,8 @@ static int si_set_smc_sram_address(struct amdgpu_device *adev,
if ((smc_address + 3) > limit)
return -EINVAL;
- WREG32(SMC_IND_INDEX_0, smc_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, smc_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
return 0;
}
@@ -68,7 +74,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
@@ -83,7 +89,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- original_data = RREG32(SMC_IND_DATA_0);
+ original_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
@@ -99,7 +105,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
}
done:
@@ -121,10 +127,10 @@ void amdgpu_si_reset_smc(struct amdgpu_device *adev)
{
u32 tmp;
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
RST_REG;
@@ -166,20 +172,42 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
{
u32 tmp;
int i;
+ int usec_timeout;
+
+ /* SMC seems to process some messages exceptionally slowly. */
+ switch (msg) {
+ case PPSMC_MSG_NoForcedLevel:
+ case PPSMC_MSG_SetEnabledLevels:
+ case PPSMC_MSG_SetForcedLevels:
+ case PPSMC_MSG_DisableULV:
+ case PPSMC_MSG_SwitchToSwState:
+ usec_timeout = 1000000; /* 1 sec */
+ break;
+ default:
+ usec_timeout = 200000; /* 200 ms */
+ break;
+ }
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
- WREG32(SMC_MESSAGE_0, msg);
+ WREG32(mmSMC_MESSAGE_0, msg);
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
+ for (i = 0; i < usec_timeout; i++) {
+ tmp = RREG32(mmSMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
- return (PPSMC_Result)RREG32(SMC_RESP_0);
+ tmp = RREG32(mmSMC_RESP_0);
+ if (tmp == 0) {
+ drm_warn(adev_to_drm(adev),
+ "%s timeout on message: %x (SMC_SCRATCH0: %x)\n",
+ __func__, msg, RREG32(mmSMC_SCRATCH0));
+ }
+
+ return (PPSMC_Result)tmp;
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
@@ -225,18 +253,18 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
return -EINVAL;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
while (ucode_size >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
ucode_size -= 4;
}
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
@@ -251,7 +279,7 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- *value = RREG32(SMC_IND_DATA_0);
+ *value = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
@@ -266,7 +294,7 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- WREG32(SMC_IND_DATA_0, value);
+ WREG32(mmSMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 686345f75f26..3aaf3dd71868 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
@@ -28,12 +27,10 @@
#include <linux/firmware.h>
#include <linux/reboot.h>
#include "amd_shared.h"
-#include "amd_powerplay.h"
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
#include "amdgpu_dpm_internal.h"
-#include "amdgpu_display.h"
static const struct amd_pm_funcs pp_dpm_funcs;
@@ -51,6 +48,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
+ if (!hwmgr->device) {
+ kfree(hwmgr);
+ return -ENOMEM;
+ }
+
mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
@@ -239,7 +241,7 @@ static void pp_late_fini(struct amdgpu_ip_block *ip_block)
}
-static bool pp_is_idle(void *handle)
+static bool pp_is_idle(struct amdgpu_ip_block *ip_block)
{
return false;
}
@@ -629,9 +631,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
+ if (!hwmgr || !hwmgr->pm_en || !table)
return -EINVAL;
+ if (!hwmgr->soft_pp_table)
+ return -EOPNOTSUPP;
+
*table = (char *)hwmgr->soft_pp_table;
return hwmgr->soft_pp_table_size;
}
@@ -950,7 +955,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return 0;
}
-static int pp_set_power_limit(void *handle, uint32_t limit)
+static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
{
struct pp_hwmgr *hwmgr = handle;
uint32_t max_power_limit;
@@ -1549,16 +1554,7 @@ static void pp_pm_compute_clocks(void *handle)
struct amdgpu_device *adev = hwmgr->adev;
if (!adev->dc_enabled) {
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with
- * refresh rates over 120 hz on the non-DC code.
- */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
-
+ amdgpu_dpm_get_display_cfg(adev);
pp_display_configuration_change(handle,
&adev->pm.pm_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
index 90452b66e107..a59677cf8dfc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
@@ -149,16 +149,6 @@ int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
return 0;
}
-int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
- return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
- return 0;
-}
-
-
int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 4bd92fd782be..ce166a7f8e42 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -143,6 +143,10 @@ int atomctrl_initialize_mc_reg_table(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
@@ -180,6 +184,10 @@ int atomctrl_initialize_mc_reg_table_v2_2(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
@@ -555,8 +563,8 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3(
PP_ASSERT_WITH_CODE((NULL != voltage_info),
"Could not find Voltage Table in BIOS.", return false;);
- ret = (NULL != atomctrl_lookup_voltage_type_v3
- (voltage_info, voltage_type, voltage_mode)) ? true : false;
+ ret = atomctrl_lookup_voltage_type_v3
+ (voltage_info, voltage_type, voltage_mode) != NULL;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
index 82d540334318..6120f14caab0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
@@ -158,84 +158,6 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr,
return result;
}
-
-static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table(
- struct pp_hwmgr *hwmgr)
-{
- const void *table_address;
- uint16_t idx;
-
- idx = GetIndexIntoMasterDataTable(gpio_pin_lut);
- table_address = smu_atom_get_data_table(hwmgr->adev,
- idx, NULL, NULL, NULL);
- PP_ASSERT_WITH_CODE(table_address,
- "Error retrieving BIOS Table Address!",
- return NULL);
-
- return (struct atom_gpio_pin_lut_v2_1 *)table_address;
-}
-
-static bool pp_atomfwctrl_lookup_gpio_pin(
- struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table,
- const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment)
-{
- unsigned int size = le16_to_cpu(
- gpio_lookup_table->table_header.structuresize);
- unsigned int offset =
- offsetof(struct atom_gpio_pin_lut_v2_1, gpio_pin[0]);
- unsigned long start = (unsigned long)gpio_lookup_table;
-
- while (offset < size) {
- const struct atom_gpio_pin_assignment *pin_assignment =
- (const struct atom_gpio_pin_assignment *)(start + offset);
-
- if (pin_id == pin_assignment->gpio_id) {
- gpio_pin_assignment->uc_gpio_pin_bit_shift =
- pin_assignment->gpio_bitshift;
- gpio_pin_assignment->us_gpio_pin_aindex =
- le16_to_cpu(pin_assignment->data_a_reg_index);
- return true;
- }
- offset += offsetof(struct atom_gpio_pin_assignment, gpio_id) + 1;
- }
- return false;
-}
-
-/*
- * Returns TRUE if the given pin id find in lookup table.
- */
-bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr,
- const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment)
-{
- bool ret = false;
- struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table =
- pp_atomfwctrl_get_gpio_lookup_table(hwmgr);
-
- /* If we cannot find the table do NOT try to control this voltage. */
- PP_ASSERT_WITH_CODE(gpio_lookup_table,
- "Could not find GPIO lookup Table in BIOS.",
- return false);
-
- ret = pp_atomfwctrl_lookup_gpio_pin(gpio_lookup_table,
- pin_id, gpio_pin_assignment);
-
- return ret;
-}
-
-/*
- * Enter to SelfRefresh mode.
- * @param hwmgr
- */
-int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr)
-{
- /* 0 - no action
- * 1 - leave power to video memory always on
- */
- return 0;
-}
-
/** pp_atomfwctrl_get_gpu_pll_dividers_vega10().
*
* @param hwmgr input parameter: pointer to HwMgr
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
index e86e05c786d9..0d62903d5676 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
@@ -217,9 +217,6 @@ struct pp_atomfwctrl_smc_dpm_parameters {
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
uint32_t clock_type, uint32_t clock_value,
struct pp_atomfwctrl_clock_dividers_soc15 *dividers);
-int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr);
-bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment);
int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint8_t voltage_mode, struct pp_atomfwctrl_voltage_table *voltage_table);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index a8c732e07006..14ccd743ca1d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1032,7 +1032,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
uint32_t min_freq, max_freq = 0;
- uint32_t ret = 0;
+ int ret = 0;
switch (type) {
case PP_SCLK:
@@ -1642,7 +1642,6 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
.force_dpm_level = smu10_dpm_force_dpm_level,
.get_power_state_size = smu10_get_power_state_size,
- .powerdown_uvd = NULL,
.powergate_uvd = smu10_powergate_vcn,
.powergate_vce = NULL,
.get_mclk = smu10_dpm_get_mclk,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
index f2bda3bcbbde..5e4c80f7b20a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
@@ -55,7 +55,7 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
}
-int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
index fc8f8a6acc72..e56abbadc78b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
@@ -28,7 +28,6 @@
void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 632a25957477..9b28c0728269 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
else if (hwmgr->pp_table_version == PP_TABLE_V0)
- thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->max = data->thermal_temp_setting.temperature_shutdown;
thermal_data->sw_ctf_threshold = thermal_data->max;
@@ -5754,7 +5753,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.patch_boot_state = smu7_dpm_patch_boot_state,
.get_pp_table_entry = smu7_get_pp_table_entry,
.get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
- .powerdown_uvd = smu7_powerdown_uvd,
.powergate_uvd = smu7_powergate_uvd,
.powergate_vce = smu7_powergate_vce,
.disable_clock_power_gating = smu7_disable_clock_power_gating,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
index a8fc0fa44db6..ba5c1237fcfe 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
@@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0) ||
- speed == 0 ||
+ (!speed || speed > UINT_MAX/8) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
- return 0;
+ return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 7e1197420873..9b20076e26c0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -394,7 +394,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
}
if (le32_to_cpu(info->ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableDFSBypass);
}
@@ -2044,7 +2044,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.apply_state_adjust_rules = smu8_apply_state_adjust_rules,
.force_dpm_level = smu8_dpm_force_dpm_level,
.get_power_state_size = smu8_get_power_state_size,
- .powerdown_uvd = smu8_dpm_powerdown_uvd,
.powergate_uvd = smu8_dpm_powergate_uvd,
.powergate_vce = smu8_dpm_powergate_vce,
.powergate_acp = smu8_dpm_powergate_acp,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index 79a566f3564a..c305ea4ec17d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
@@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
}
cgs_write_register(hwmgr->device, indirect_port, index);
- return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask);
}
int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index 379012494da5..56423aedf3fa 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
int result = 0;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- speed == 0 ||
+ (!speed || speed > UINT_MAX/8) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
- return -1;
+ return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
index a3331ffb2daf..1b1c88590156 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
@@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
uint32_t tach_period, crystal_clock_freq;
int result = 0;
- if (!speed)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
index f4f9a104d170..915f1b8e4dba 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
@@ -396,7 +396,6 @@ struct phm_odn_clock_levels {
};
extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
-extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 227bf0e84a13..c661185753b4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -257,7 +257,6 @@ struct pp_hwmgr_func {
int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr,
unsigned long, struct pp_power_state *);
int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr);
- int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index 5e43ad2b2956..0a876c840c79 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
table->VRConfig = 0;
@@ -2540,9 +2540,8 @@ static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 17d2f5bff4a7..aa3ae9b115c4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
result = iceland_populate_smc_svi2_config(hwmgr, table);
@@ -2655,9 +2655,8 @@ static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
const struct pp_smumgr_func iceland_smu_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index ff6b563ecbf5..bf6d09572cfc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
@@ -2578,9 +2578,8 @@ static int polaris10_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index ac9ec8257f82..38e19e5cad4d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
@@ -139,7 +139,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id,
NULL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -164,7 +164,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index 5a010cd38303..0d4cbe4113a0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -46,42 +46,6 @@ static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
}
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
- uint32_t data;
- uint32_t addr;
- uint8_t *dest_byte;
- uint8_t i, data_byte[4] = {0};
- uint32_t *pdata = (uint32_t *)&data_byte;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
-
- *dest = PP_SMC_TO_HOST_UL(data);
-
- dest += 1;
- byte_count -= 4;
- addr += 4;
- }
-
- if (byte_count) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
- *pdata = PP_SMC_TO_HOST_UL(data);
- /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
- dest_byte = (uint8_t *)dest;
- for (i = 0; i < byte_count; i++)
- dest_byte[i] = data_byte[i];
- }
-
- return 0;
-}
-
-
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
@@ -437,7 +401,7 @@ failed:
int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- uint32_t ret;
+ int ret;
ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
index e7303dc8c260..63e428ceaee4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
@@ -53,8 +53,6 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
- uint32_t *dest, uint32_t byte_count, uint32_t limit);
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
index 6fe6e6abb5d8..2e21f9d066cb 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
@@ -3139,9 +3139,8 @@ static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
index f9c0f117725d..0bf1bf5528c2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
@@ -60,7 +60,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id,
NULL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -90,7 +90,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
index d3ff6a831ed5..e2ba593faa5d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
@@ -68,7 +68,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -98,7 +98,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
index a5c95b180672..e3515156d26f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
@@ -192,7 +192,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -223,7 +223,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
@@ -256,7 +256,7 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
@@ -306,7 +306,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8ca793c222ff..f51fa265230b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -68,7 +68,7 @@ static int smu_handle_task(struct smu_context *smu,
static int smu_reset(struct smu_context *smu);
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
static int smu_set_fan_control_mode(void *handle, u32 value);
-static int smu_set_power_limit(void *handle, uint32_t limit);
+static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
@@ -76,6 +76,10 @@ static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
+static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
+static int smu_od_edit_dpm_table(void *handle,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -134,12 +138,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
}
int smu_set_soft_freq_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
+ enum pp_clock_type type,
uint32_t min,
uint32_t max)
{
+ enum smu_clk_type clk_type;
int ret = 0;
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
@@ -307,6 +316,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu,
return ret;
}
+static int smu_dpm_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret;
+
+ if (!smu->ppt_funcs->dpm_set_isp_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->isp_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->isp_gated, !enable);
+
+ return ret;
+}
+
static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -408,6 +437,12 @@ static int smu_dpm_set_power_gate(void *handle,
dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
gate ? "gate" : "ungate");
break;
+ case AMD_IP_BLOCK_TYPE_ISP:
+ ret = smu_dpm_set_isp_enable(smu, !gate);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
+ gate ? "gate" : "ungate");
+ break;
default:
dev_err(smu->adev->dev, "Unsupported block type!\n");
return -EINVAL;
@@ -473,11 +508,14 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
/* Enable restore flag */
smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
- /* set the user dpm power limit */
- if (smu->user_dpm_profile.power_limit) {
- ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
+ /* set the user dpm power limits */
+ for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) {
+ if (!smu->user_dpm_profile.power_limits[i])
+ continue;
+ ret = smu_set_power_limit(smu, i,
+ smu->user_dpm_profile.power_limits[i]);
if (ret)
- dev_err(smu->adev->dev, "Failed to set power limit value\n");
+ dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i);
}
/* set the user dpm clock configurations */
@@ -574,6 +612,17 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev)
return true;
}
+int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
+ uint32_t param, uint32_t *read_arg)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg)
+ ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg);
+
+ return ret;
+}
static int smu_sys_get_pp_table(void *handle,
char **table)
@@ -585,7 +634,7 @@ static int smu_sys_get_pp_table(void *handle,
return -EOPNOTSUPP;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
@@ -612,7 +661,8 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
- if (!smu_table->hardcode_pptable) {
+ if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
+ kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
if (!smu_table->hardcode_pptable)
return -ENOMEM;
@@ -693,6 +743,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
renoir_set_ppt_funcs(smu);
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
vangogh_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 1):
@@ -738,6 +789,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
smu_v14_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(14, 0, 2):
@@ -1001,6 +1053,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
+static void smu_update_gpu_addresses(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
+
+ if (pm_status_table->bo)
+ pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
+ if (driver_table->bo)
+ driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
+ if (dummy_read_1_table->bo)
+ dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
+}
+
/**
* smu_alloc_memory_pool - allocate memory pool in the system memory
*
@@ -1024,7 +1091,10 @@ static int smu_alloc_memory_pool(struct smu_context *smu)
memory_pool->size = pool_size;
memory_pool->align = PAGE_SIZE;
- memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
+ memory_pool->domain =
+ (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ?
+ AMDGPU_GEM_DOMAIN_VRAM :
+ AMDGPU_GEM_DOMAIN_GTT;
switch (pool_size) {
case SMU_MEMORY_POOL_SIZE_256_MB:
@@ -1251,27 +1321,39 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
}
}
-static bool smu_is_workload_profile_available(struct smu_context *smu,
- u32 profile)
+static void smu_init_power_profile(struct smu_context *smu)
+{
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)
+ smu->power_profile_mode =
+ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu_power_profile_mode_get(smu, smu->power_profile_mode);
+}
+
+void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
+ return;
+
+ set_bit(fea_id, fea_cap->cap_map);
+}
+
+bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id)
{
- if (profile >= PP_SMC_POWER_PROFILE_COUNT)
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
return false;
- return smu->workload_map && smu->workload_map[profile].valid_mapping;
+
+ return test_bit(fea_id, fea_cap->cap_map);
}
-static void smu_init_power_profile(struct smu_context *smu)
+static void smu_feature_cap_init(struct smu_context *smu)
{
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
- if (smu->is_apu ||
- !smu_is_workload_profile_available(
- smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
- smu->power_profile_mode =
- PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- else
- smu->power_profile_mode =
- PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- }
- smu_power_profile_mode_get(smu, smu->power_profile_mode);
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT);
}
static int smu_sw_init(struct amdgpu_ip_block *ip_block)
@@ -1294,6 +1376,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu_init_power_profile(smu);
@@ -1305,6 +1388,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
INIT_DELAYED_WORK(&smu->swctf_delayed_work,
smu_swctf_delayed_work_handler);
+ smu_feature_cap_init(smu);
+
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -1579,13 +1664,17 @@ static int smu_smc_hw_setup(struct smu_context *smu)
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
if (adev->in_suspend && smu_is_dpm_running(smu)) {
dev_info(adev->dev, "dpm has been enabled\n");
ret = smu_system_features_control(smu, true);
- if (ret)
+ if (ret) {
dev_err(adev->dev, "Failed system features control!\n");
- return ret;
+ return ret;
+ }
+
+ return smu_enable_thermal_alert(smu);
}
break;
default:
@@ -1680,37 +1769,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
}
}
- ret = smu_system_features_control(smu, true);
- if (ret) {
- dev_err(adev->dev, "Failed to enable requested dpm features!\n");
- return ret;
- }
-
- smu_init_xgmi_plpd_mode(smu);
-
- ret = smu_feature_get_enabled_mask(smu, &features_supported);
- if (ret) {
- dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
- return ret;
- }
- bitmap_copy(feature->supported,
- (unsigned long *)&features_supported,
- feature->feature_num);
-
- if (!smu_is_dpm_running(smu))
- dev_info(adev->dev, "dpm has been disabled\n");
-
- /*
- * Set initialized values (get from vbios) to dpm tables context such as
- * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
- * type of clks.
- */
- ret = smu_set_default_dpm_table(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
- return ret;
- }
-
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
pcie_gen = 4;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
@@ -1746,6 +1804,37 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ ret = smu_system_features_control(smu, true);
+ if (ret) {
+ dev_err(adev->dev, "Failed to enable requested dpm features!\n");
+ return ret;
+ }
+
+ smu_init_xgmi_plpd_mode(smu);
+
+ ret = smu_feature_get_enabled_mask(smu, &features_supported);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+ return ret;
+ }
+ bitmap_copy(feature->supported,
+ (unsigned long *)&features_supported,
+ feature->feature_num);
+
+ if (!smu_is_dpm_running(smu))
+ dev_info(adev->dev, "dpm has been disabled\n");
+
+ /*
+ * Set initialized values (get from vbios) to dpm tables context such as
+ * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+ * type of clks.
+ */
+ ret = smu_set_default_dpm_table(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
+ return ret;
+ }
+
ret = smu_get_thermal_temperature_range(smu);
if (ret) {
dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
@@ -1788,6 +1877,9 @@ static int smu_start_smc_engine(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ smu_update_gpu_addresses(smu);
+
smu->smc_fw_state = SMU_FW_INIT;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -1825,7 +1917,7 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
+ if (amdgpu_sriov_multi_vf_mode(adev)) {
smu->pm_enabled = false;
return 0;
}
@@ -1850,7 +1942,6 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
smu_dpm_set_vcn_enable(smu, true, i);
smu_dpm_set_jpeg_enable(smu, true);
- smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true);
@@ -1932,6 +2023,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
return 0;
@@ -1965,6 +2057,12 @@ static int smu_disable_dpms(struct smu_context *smu)
smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
return 0;
+ /* vangogh s0ix */
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) &&
+ adev->in_s0ix)
+ return 0;
+
/*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.
@@ -2048,17 +2146,16 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
struct smu_context *smu = adev->powerplay.pp_handle;
int i, ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
smu_dpm_set_vcn_enable(smu, false, i);
+ adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;
+ }
smu_dpm_set_jpeg_enable(smu, false);
- smu_dpm_set_vpe_enable(smu, false);
- smu_dpm_set_umsch_mm_enable(smu, false);
-
- adev->vcn.cur_state = AMD_PG_STATE_GATE;
adev->jpeg.cur_state = AMD_PG_STATE_GATE;
+ smu_dpm_set_umsch_mm_enable(smu, false);
if (!smu->pm_enabled)
return 0;
@@ -2116,7 +2213,7 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block)
int ret;
uint64_t count;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
if (!smu->pm_enabled)
@@ -2152,7 +2249,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
if (!smu->pm_enabled)
@@ -2325,7 +2422,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
- dev_err(smu->adev->dev, "Failed to set performance level!");
+ if (ret == -EOPNOTSUPP)
+ dev_info(smu->adev->dev, "set performance level %d not supported",
+ level);
+ else
+ dev_err(smu->adev->dev, "Failed to set performance level %d",
+ level);
return ret;
}
@@ -2400,7 +2502,11 @@ static int smu_switch_power_profile(void *handle,
smu_power_profile_mode_get(smu, type);
else
smu_power_profile_mode_put(smu, type);
- ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ /* don't switch the active workload when paused */
+ if (smu->pause_workload)
+ ret = 0;
+ else
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
if (ret) {
if (enable)
smu_power_profile_mode_put(smu, type);
@@ -2413,6 +2519,35 @@ static int smu_switch_power_profile(void *handle,
return 0;
}
+static int smu_pause_power_profile(void *handle,
+ bool pause)
+{
+ struct smu_context *smu = handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ int ret;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ smu->pause_workload = pause;
+
+ /* force to bootup default profile */
+ if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu,
+ workload_mask,
+ NULL,
+ 0);
+ else
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ return ret;
+ }
+
+ return 0;
+}
+
static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
@@ -2671,6 +2806,17 @@ const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
.funcs = &smu_ip_funcs,
};
+const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle)
+{
+ struct smu_context *smu = (struct smu_context *)handle;
+ const struct ras_smu_drv *tmp = NULL;
+ int ret;
+
+ ret = smu_get_ras_smu_drv(smu, &tmp);
+
+ return ret ? NULL : tmp;
+}
+
static int smu_load_microcode(void *handle)
{
struct smu_context *smu = handle;
@@ -2764,6 +2910,9 @@ int smu_get_power_limit(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
+ if (!limit)
+ return -EINVAL;
+
switch (pp_power_type) {
case PP_PWR_TYPE_SUSTAINED:
limit_type = SMU_DEFAULT_PPT_LIMIT;
@@ -2795,12 +2944,15 @@ int smu_get_power_limit(void *handle,
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
if (smu->ppt_funcs->get_ppt_limit)
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
+ else
+ return -EOPNOTSUPP;
} else {
switch (limit_level) {
case SMU_PPT_LIMIT_CURRENT:
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
@@ -2832,37 +2984,34 @@ int smu_get_power_limit(void *handle,
return ret;
}
-static int smu_set_power_limit(void *handle, uint32_t limit)
+static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
{
struct smu_context *smu = handle;
- uint32_t limit_type = limit >> 24;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- limit &= (1<<24)-1;
- if (limit_type != SMU_DEFAULT_PPT_LIMIT)
- if (smu->ppt_funcs->set_power_limit)
- return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
-
- if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
- dev_err(smu->adev->dev,
- "New power limit (%d) is out of range [%d,%d]\n",
- limit, smu->min_power_limit, smu->max_power_limit);
- return -EINVAL;
+ if (limit_type == SMU_DEFAULT_PPT_LIMIT) {
+ if (!limit)
+ limit = smu->current_power_limit;
+ if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) is out of range [%d,%d]\n",
+ limit, smu->min_power_limit, smu->max_power_limit);
+ return -EINVAL;
+ }
}
- if (!limit)
- limit = smu->current_power_limit;
-
if (smu->ppt_funcs->set_power_limit) {
ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.power_limit = limit;
+ if (ret)
+ return ret;
+ if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ smu->user_dpm_profile.power_limits[limit_type] = limit;
}
- return ret;
+ return 0;
}
static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
@@ -2903,6 +3052,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_DCLK; break;
case PP_DCLK1:
clk_type = SMU_DCLK1; break;
+ case PP_ISPICLK:
+ clk_type = SMU_ISPICLK;
+ break;
+ case PP_ISPXCLK:
+ clk_type = SMU_ISPXCLK;
+ break;
case OD_SCLK:
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
@@ -3400,17 +3555,12 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
return ret;
}
-bool smu_mode2_reset_is_support(struct smu_context *smu)
+bool smu_link_reset_is_support(struct smu_context *smu)
{
- bool ret = false;
-
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
- ret = smu->ppt_funcs->mode2_reset_is_support(smu);
-
- return ret;
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
}
int smu_mode1_reset(struct smu_context *smu)
@@ -3443,6 +3593,19 @@ static int smu_mode2_reset(void *handle)
return ret;
}
+int smu_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu->ppt_funcs->link_reset)
+ ret = smu->ppt_funcs->link_reset(smu);
+
+ return ret;
+}
+
static int smu_enable_gfx_features(void *handle)
{
struct smu_context *smu = handle;
@@ -3713,6 +3876,64 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
+static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
+{
+ struct smu_context *smu = handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ enum smu_table_id table_id;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)
+ return -EOPNOTSUPP;
+
+ table_id = smu_metrics_get_temp_table_id(type);
+
+ if (table_id == SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ /* If the request is to get size alone, return the cached table size */
+ if (!table && tables[table_id].cache.size)
+ return tables[table_id].cache.size;
+
+ if (smu_table_cache_is_valid(&tables[table_id])) {
+ memcpy(table, tables[table_id].cache.buffer,
+ tables[table_id].cache.size);
+ return tables[table_id].cache.size;
+ }
+
+ return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
+}
+
+static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type)
+{
+ struct smu_context *smu = handle;
+ bool ret = false;
+
+ if (!smu->pm_enabled)
+ return false;
+
+ if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported)
+ ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type);
+
+ return ret;
+}
+
+static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
+{
+ struct smu_context *smu = handle;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
+ return -EOPNOTSUPP;
+
+ return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
+}
+
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
.set_fan_control_mode = smu_set_fan_control_mode,
@@ -3734,6 +3955,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_pp_table = smu_sys_get_pp_table,
.set_pp_table = smu_sys_set_pp_table,
.switch_power_profile = smu_switch_power_profile,
+ .pause_power_profile = smu_pause_power_profile,
/* export to amdgpu */
.dispatch_tasks = smu_handle_dpm_task,
.load_firmware = smu_load_microcode,
@@ -3770,6 +3992,9 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_uclk_dpm_states = smu_get_uclk_dpm_states,
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
+ .get_xcp_metrics = smu_sys_get_xcp_metrics,
+ .get_temp_metrics = smu_sys_get_temp_metrics,
+ .temp_metrics_is_supported = smu_temp_metrics_is_supported,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
@@ -3916,6 +4141,18 @@ int smu_send_rma_reason(struct smu_context *smu)
return ret;
}
+/**
+ * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU
+ * @smu: smu_context pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns true if supported, false otherwise.
+ */
+bool smu_reset_sdma_is_supported(struct smu_context *smu)
+{
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
+}
+
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
@@ -3925,3 +4162,16 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+
+bool smu_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
+}
+
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
+ smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3630593bce61..8815fc70b63b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -212,6 +212,7 @@ enum smu_power_src_type {
enum smu_ppt_limit_type {
SMU_DEFAULT_PPT_LIMIT = 0,
SMU_FAST_PPT_LIMIT,
+ SMU_LIMIT_TYPE_COUNT,
};
enum smu_ppt_limit_level {
@@ -231,7 +232,7 @@ enum smu_memory_pool_size {
struct smu_user_dpm_profile {
uint32_t fan_mode;
- uint32_t power_limit;
+ uint32_t power_limits[SMU_LIMIT_TYPE_COUNT];
uint32_t fan_speed_pwm;
uint32_t fan_speed_rpm;
uint32_t flags;
@@ -249,6 +250,14 @@ struct smu_user_dpm_profile {
tables[table_id].domain = d; \
} while (0)
+struct smu_table_cache {
+ void *buffer;
+ size_t size;
+ /* interval in ms*/
+ uint32_t interval;
+ unsigned long last_cache_time;
+};
+
struct smu_table {
uint64_t size;
uint32_t align;
@@ -257,6 +266,7 @@ struct smu_table {
void *cpu_addr;
struct amdgpu_bo *bo;
uint32_t version;
+ struct smu_table_cache cache;
};
enum smu_perf_level_designation {
@@ -322,6 +332,9 @@ enum smu_table_id {
SMU_TABLE_ECCINFO,
SMU_TABLE_COMBO_PPTABLE,
SMU_TABLE_WIFIBAND,
+ SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ SMU_TABLE_PMFW_SYSTEM_METRICS,
SMU_TABLE_COUNT,
};
@@ -396,12 +409,17 @@ struct smu_dpm_context {
struct smu_dpm_policy_ctxt *dpm_policies;
};
+struct smu_temp_context {
+ const struct smu_temp_funcs *temp_funcs;
+};
+
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
+ atomic_t isp_gated;
atomic_t umsch_mm_gated;
};
@@ -438,9 +456,11 @@ struct mclock_latency_table {
};
enum smu_reset_mode {
- SMU_RESET_MODE_0,
- SMU_RESET_MODE_1,
- SMU_RESET_MODE_2,
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+ SMU_RESET_MODE_3,
+ SMU_RESET_MODE_4,
};
enum smu_baco_state {
@@ -509,6 +529,17 @@ enum smu_fw_status {
*/
#define SMU_WBRF_EVENT_HANDLING_PACE 10
+enum smu_feature_cap_id {
+ SMU_FEATURE_CAP_ID__LINK_RESET = 0,
+ SMU_FEATURE_CAP_ID__SDMA_RESET,
+ SMU_FEATURE_CAP_ID__VCN_RESET,
+ SMU_FEATURE_CAP_ID__COUNT,
+};
+
+struct smu_feature_cap {
+ DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT);
+};
+
struct smu_context {
struct amdgpu_device *adev;
struct amdgpu_irq_src irq_source;
@@ -526,10 +557,12 @@ struct smu_context {
struct smu_table_context smu_table;
struct smu_dpm_context smu_dpm;
struct smu_power_context smu_power;
+ struct smu_temp_context smu_temp;
struct smu_feature smu_feature;
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
struct smu_temperature_range thermal_range;
+ struct smu_feature_cap fea_cap;
void *od_settings;
struct smu_umd_pstate_table pstate_table;
@@ -558,6 +591,7 @@ struct smu_context {
/* asic agnostic workload mask */
uint32_t workload_mask;
+ bool pause_workload;
/* default/user workload preference */
uint32_t power_profile_mode;
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
@@ -620,6 +654,28 @@ struct smu_context {
struct i2c_adapter;
/**
+ * struct smu_temp_funcs - Callbacks used to get temperature data.
+ */
+struct smu_temp_funcs {
+ /**
+ * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
+ * power delivery and voltage margins. Required for adaptive
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: Size of &table
+ */
+ ssize_t (*get_temp_metrics)(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table);
+
+ /**
+ * @temp_metrics_is_support: Get if specific temperature metrics is supported
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: true if supported else false
+ */
+ bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
+
+};
+
+/**
* struct pptable_funcs - Callbacks used to interact with the SMU.
*/
struct pptable_funcs {
@@ -1228,10 +1284,6 @@ struct pptable_funcs {
* @mode1_reset_is_support: Check if GPU supports mode1 reset.
*/
bool (*mode1_reset_is_support)(struct smu_context *smu);
- /**
- * @mode2_reset_is_support: Check if GPU supports mode2 reset.
- */
- bool (*mode2_reset_is_support)(struct smu_context *smu);
/**
* @mode1_reset: Perform mode1 reset.
@@ -1251,6 +1303,13 @@ struct pptable_funcs {
int (*enable_gfx_features)(struct smu_context *smu);
/**
+ * @link_reset: Perform link reset.
+ *
+ * The gfx device driver reset
+ */
+ int (*link_reset)(struct smu_context *smu);
+
+ /**
* @get_dpm_ultimate_freq: Get the hard frequency range of a clock
* domain in MHz.
*/
@@ -1378,6 +1437,11 @@ struct pptable_funcs {
int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
/**
+ * @reset_vcn: message SMU to soft reset vcn instance.
+ */
+ int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1416,6 +1480,12 @@ struct pptable_funcs {
int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
/**
+ * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
+ * management.
+ */
+ int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
+
+ /**
* @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
* management.
*/
@@ -1446,6 +1516,27 @@ struct pptable_funcs {
*/
int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
struct freq_band_range *exclusion_ranges);
+ /**
+ * @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
+ * Return: Size of table
+ */
+ ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
+ void *table);
+ /**
+ * @ras_send_msg: Send a message with a parameter from Ras
+ * &msg: Type of message.
+ * &param: Message parameter.
+ * &read_arg: SMU response (optional).
+ */
+ int (*ras_send_msg)(struct smu_context *smu,
+ enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+
+
+ /**
+ * @get_ras_smu_drv: Get RAS smu driver interface
+ * Return: ras_smu_drv *
+ */
+ int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv);
};
typedef enum {
@@ -1589,6 +1680,71 @@ typedef struct {
struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
enum pp_pm_policy p_type);
+static inline enum smu_table_id
+smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ return SMU_TABLE_BASEBOARD_TEMP_METRICS;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return SMU_TABLE_GPUBOARD_TEMP_METRICS;
+ default:
+ return SMU_TABLE_COUNT;
+ }
+
+ return SMU_TABLE_COUNT;
+}
+
+static inline void smu_table_cache_update_time(struct smu_table *table,
+ unsigned long time)
+{
+ table->cache.last_cache_time = time;
+}
+
+static inline bool smu_table_cache_is_valid(struct smu_table *table)
+{
+ if (!table->cache.buffer || !table->cache.last_cache_time ||
+ !table->cache.interval || !table->cache.size ||
+ time_after(jiffies,
+ table->cache.last_cache_time +
+ msecs_to_jiffies(table->cache.interval)))
+ return false;
+
+ return true;
+}
+
+static inline int smu_table_cache_init(struct smu_context *smu,
+ enum smu_table_id table_id, size_t size,
+ uint32_t cache_interval)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
+ if (!tables[table_id].cache.buffer)
+ return -ENOMEM;
+
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = cache_interval;
+ tables[table_id].cache.size = size;
+
+ return 0;
+}
+
+static inline void smu_table_cache_fini(struct smu_context *smu,
+ enum smu_table_id table_id)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ if (tables[table_id].cache.buffer) {
+ kfree(tables[table_id].cache.buffer);
+ tables[table_id].cache.buffer = NULL;
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = 0;
+ }
+}
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
@@ -1596,8 +1752,9 @@ int smu_get_power_limit(void *handle,
enum pp_power_type pp_power_type);
bool smu_mode1_reset_is_support(struct smu_context *smu);
-bool smu_mode2_reset_is_support(struct smu_context *smu);
+bool smu_link_reset_is_support(struct smu_context *smu);
int smu_mode1_reset(struct smu_context *smu);
+int smu_link_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs;
@@ -1608,7 +1765,7 @@ int smu_write_watermarks_table(struct smu_context *smu);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
-int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
uint32_t min, uint32_t max);
int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
@@ -1637,10 +1794,19 @@ int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
+bool smu_reset_sdma_is_supported(struct smu_context *smu);
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
+bool smu_reset_vcn_is_supported(struct smu_context *smu);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
enum pp_pm_policy p_type, char *sysbuf);
+const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle);
+int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
+ uint32_t param, uint32_t *readarg);
#endif
+
+void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
+bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 1bc30db22f9c..cd44f4254134 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -106,6 +106,7 @@ typedef struct {
#define NUM_FCLK_DPM_LEVELS 8
#define NUM_MEM_PSTATE_LEVELS 4
+#define ISP_ALL_TILES_MASK 0x7FF
typedef struct {
uint32_t UClk;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
new file mode 100644
index 000000000000..dd30d96e1ca2
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -0,0 +1,378 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_12_PMFW_H
+#define SMU_13_0_12_PMFW_H
+
+#define NUM_VCLK_DPM_LEVELS 4
+#define NUM_DCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_LCLK_DPM_LEVELS 4
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_XGMI_DPM_LEVELS 2
+#define NUM_CXL_BITRATES 4
+#define NUM_PCIE_BITRATES 4
+#define NUM_XGMI_BITRATES 4
+#define NUM_XGMI_WIDTHS 3
+#define NUM_TDP_GROUPS 4
+#define NUM_SOC_P2S_TABLES 6
+#define NUM_GFX_P2S_TABLES 8
+#define NUM_PSM_DIDT_THRESHOLDS 3
+#define NUM_XVMIN_VMIN_THRESHOLDS 3
+
+#define PRODUCT_MODEL_NUMBER_LEN 20
+#define PRODUCT_NAME_LEN 64
+#define PRODUCT_SERIAL_LEN 20
+#define PRODUCT_MANUFACTURER_NAME_LEN 32
+#define PRODUCT_FRU_ID_LEN 32
+
+typedef enum {
+/*0*/ FEATURE_DATA_CALCULATION = 0,
+/*1*/ FEATURE_DPM_FCLK = 1,
+/*2*/ FEATURE_DPM_GFXCLK = 2,
+/*3*/ FEATURE_DPM_LCLK = 3,
+/*4*/ FEATURE_DPM_SOCCLK = 4,
+/*5*/ FEATURE_DPM_UCLK = 5,
+/*6*/ FEATURE_DPM_VCN = 6,
+/*7*/ FEATURE_DPM_XGMI = 7,
+/*8*/ FEATURE_DS_FCLK = 8,
+/*9*/ FEATURE_DS_GFXCLK = 9,
+/*10*/ FEATURE_DS_LCLK = 10,
+/*11*/ FEATURE_DS_MP0CLK = 11,
+/*12*/ FEATURE_DS_MP1CLK = 12,
+/*13*/ FEATURE_DS_MPIOCLK = 13,
+/*14*/ FEATURE_DS_SOCCLK = 14,
+/*15*/ FEATURE_DS_VCN = 15,
+/*16*/ FEATURE_APCC_DFLL = 16,
+/*17*/ FEATURE_APCC_PLUS = 17,
+/*18*/ FEATURE_PPT = 18,
+/*19*/ FEATURE_TDC = 19,
+/*20*/ FEATURE_THERMAL = 20,
+/*21*/ FEATURE_SOC_PCC = 21,
+/*22*/ FEATURE_PROCHOT = 22,
+/*23*/ FEATURE_FDD_AID_HBM = 23,
+/*24*/ FEATURE_FDD_AID_SOC = 24,
+/*25*/ FEATURE_FDD_XCD_EDC = 25,
+/*26*/ FEATURE_FDD_XCD_XVMIN = 26,
+/*27*/ FEATURE_FW_CTF = 27,
+/*28*/ FEATURE_SMU_CG = 28,
+/*29*/ FEATURE_PSI7 = 29,
+/*30*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 30,
+/*31*/ FEATURE_SOC_DC_RTC = 31,
+/*32*/ FEATURE_GFX_DC_RTC = 32,
+/*33*/ FEATURE_DVM_MIN_PSM = 33,
+/*34*/ FEATURE_PRC = 34,
+/*35*/ FEATURE_PSM_SQ_THROTTLER = 35,
+/*36*/ FEATURE_PIT = 36,
+/*37*/ FEATURE_DVO = 37,
+/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
+/*39*/ FEATURE_GLOBAL_DPM = 39,
+/*40*/ FEATURE_HROM_EN = 40,
+
+/*41*/ NUM_FEATURES = 41
+} FEATURE_LIST_e;
+
+//enum for MPIO PCIe gen speed msgs
+typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_RESERVED,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
+ PCIE_LINK_SPEED_INDEX_TABLE_COUNT
+} PCIE_LINK_SPEED_INDEX_TABLE_e;
+
+typedef enum {
+ GFX_GUARDBAND_OFFSET_0,
+ GFX_GUARDBAND_OFFSET_1,
+ GFX_GUARDBAND_OFFSET_2,
+ GFX_GUARDBAND_OFFSET_3,
+ GFX_GUARDBAND_OFFSET_4,
+ GFX_GUARDBAND_OFFSET_5,
+ GFX_GUARDBAND_OFFSET_6,
+ GFX_GUARDBAND_OFFSET_7,
+ GFX_GUARDBAND_OFFSET_COUNT
+} GFX_GUARDBAND_OFFSET_e;
+
+typedef enum {
+ GFX_DVM_MARGINHI_0,
+ GFX_DVM_MARGINHI_1,
+ GFX_DVM_MARGINHI_2,
+ GFX_DVM_MARGINHI_3,
+ GFX_DVM_MARGINHI_4,
+ GFX_DVM_MARGINHI_5,
+ GFX_DVM_MARGINHI_6,
+ GFX_DVM_MARGINHI_7,
+ GFX_DVM_MARGINLO_0,
+ GFX_DVM_MARGINLO_1,
+ GFX_DVM_MARGINLO_2,
+ GFX_DVM_MARGINLO_3,
+ GFX_DVM_MARGINLO_4,
+ GFX_DVM_MARGINLO_5,
+ GFX_DVM_MARGINLO_6,
+ GFX_DVM_MARGINLO_7,
+ GFX_DVM_MARGIN_COUNT
+} GFX_DVM_MARGIN_e;
+
+typedef enum{
+ SYSTEM_TEMP_UBB_FPGA,
+ SYSTEM_TEMP_UBB_FRONT,
+ SYSTEM_TEMP_UBB_BACK,
+ SYSTEM_TEMP_UBB_OAM7,
+ SYSTEM_TEMP_UBB_IBC,
+ SYSTEM_TEMP_UBB_UFPGA,
+ SYSTEM_TEMP_UBB_OAM1,
+ SYSTEM_TEMP_OAM_0_1_HSC,
+ SYSTEM_TEMP_OAM_2_3_HSC,
+ SYSTEM_TEMP_OAM_4_5_HSC,
+ SYSTEM_TEMP_OAM_6_7_HSC,
+ SYSTEM_TEMP_UBB_FPGA_0V72_VR,
+ SYSTEM_TEMP_UBB_FPGA_3V3_VR,
+ SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
+ SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
+ SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
+ SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
+ SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
+ SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
+ SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
+ SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
+ SYSTEM_TEMP_IBC_HSC,
+ SYSTEM_TEMP_IBC,
+ SYSTEM_TEMP_MAX_ENTRIES = 32
+} SYSTEM_TEMP_e;
+
+typedef enum{
+ NODE_TEMP_RETIMER,
+ NODE_TEMP_IBC_TEMP,
+ NODE_TEMP_IBC_2_TEMP,
+ NODE_TEMP_VDD18_VR_TEMP,
+ NODE_TEMP_04_HBM_B_VR_TEMP,
+ NODE_TEMP_04_HBM_D_VR_TEMP,
+ NODE_TEMP_MAX_TEMP_ENTRIES = 12
+} NODE_TEMP_e;
+
+typedef enum {
+ SVI_VDDCR_VDD0_TEMP,
+ SVI_VDDCR_VDD1_TEMP,
+ SVI_VDDCR_VDD2_TEMP,
+ SVI_VDDCR_VDD3_TEMP,
+ SVI_VDDCR_SOC_A_TEMP,
+ SVI_VDDCR_SOC_C_TEMP,
+ SVI_VDDCR_SOCIO_A_TEMP,
+ SVI_VDDCR_SOCIO_C_TEMP,
+ SVI_VDD_085_HBM_TEMP,
+ SVI_VDDCR_11_HBM_B_TEMP,
+ SVI_VDDCR_11_HBM_D_TEMP,
+ SVI_VDD_USR_TEMP,
+ SVI_VDDIO_11_E32_TEMP,
+ SVI_MAX_TEMP_ENTRIES, // 13
+} SVI_TEMP_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x15
+
+#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint64_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+
+ //FREQUENCY RANGE
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[4];
+ uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[40];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
+
+ //PCIE BW Data and error count
+ uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitPptAcc[8];
+ uint64_t GfxclkBelowHostLimitThmAcc[8];
+ uint64_t GfxclkBelowHostLimitTotalAcc[8];
+ uint64_t GfxclkLowUtilizationAcc[8];
+
+ uint32_t AidTemperature[4];
+ uint32_t XcdTemperature[8];
+ uint32_t HbmTemperature[8];
+} MetricsTable_t;
+
+#define SMU_VF_METRICS_TABLE_MASK (1 << 31)
+#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK)
+
+#pragma pack(push, 4)
+typedef struct {
+ uint64_t AccumulationCounter; // Last update timestamp
+ uint16_t LabelVersion; // Defaults to 0.
+ uint16_t NodeIdentifier; // Unique identifier to each node on system.
+ int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius
+ int16_t spare[7];
+
+ //NPM: NODE POWER MANAGEMENT
+ uint32_t NodePowerLimit;
+ uint32_t NodePower;
+ uint32_t GlobalPPTResidencyAcc;
+} SystemMetricsTable_t;
+#pragma pack(pop)
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+ uint32_t InstGfxclk_TargFreq;
+ uint64_t AccGfxclk_TargFreq;
+ uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimitPpt;
+ uint64_t AccGfxclkBelowHostLimitThm;
+ uint64_t AccGfxclkBelowHostLimitTotal;
+ uint64_t AccGfxclkLowUtilization;
+} VfMetricsTable_t;
+
+/* FRU product information */
+typedef struct __attribute__((packed, aligned(4))) {
+ uint8_t ModelNumber[PRODUCT_MODEL_NUMBER_LEN];
+ uint8_t Name[PRODUCT_NAME_LEN];
+ uint8_t Serial[PRODUCT_SERIAL_LEN];
+ uint8_t ManufacturerName[PRODUCT_MANUFACTURER_NAME_LEN];
+ uint8_t FruId[PRODUCT_FRU_ID_LEN];
+} FRUProductInfo_t;
+
+#pragma pack(push, 4)
+typedef struct {
+ //FRU PRODUCT INFO
+ FRUProductInfo_t ProductInfo;
+
+ //POWER
+ uint32_t MaxSocketPowerLimit;
+
+ //FREQUENCY RANGE
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI
+ uint32_t MaxXgmiWidth;
+ uint32_t MaxXgmiBitrate;
+
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+
+ // General info
+ uint32_t pldmVersion[2];
+
+ //Node Power Limit
+ uint32_t MaxNodePowerLimit;
+
+ // PPT1 Configuration
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
new file mode 100644
index 000000000000..d09b6ae9827e
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_12_PPSMC_H
+#define SMU_13_0_12_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GfxDriverReset 0x3
+#define PPSMC_MSG_GetDriverIfVersion 0x4
+#define PPSMC_MSG_EnableAllSmuFeatures 0x5
+#define PPSMC_MSG_DisableAllSmuFeatures 0x6
+#define PPSMC_MSG_RequestI2cTransaction 0x7
+#define PPSMC_MSG_GetMetricsVersion 0x8
+#define PPSMC_MSG_GetMetricsTable 0x9
+#define PPSMC_MSG_GetEccInfoTable 0xA
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrLow 0xE
+#define PPSMC_MSG_SetToolsDramAddrHigh 0xF
+#define PPSMC_MSG_SetToolsDramAddrLow 0x10
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12
+#define PPSMC_MSG_SetSoftMinByFreq 0x13
+#define PPSMC_MSG_SetSoftMaxByFreq 0x14
+#define PPSMC_MSG_GetMinDpmFreq 0x15
+#define PPSMC_MSG_GetMaxDpmFreq 0x16
+#define PPSMC_MSG_GetDpmFreqByIndex 0x17
+#define PPSMC_MSG_SetPptLimit 0x18
+#define PPSMC_MSG_GetPptLimit 0x19
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B
+#define PPSMC_MSG_DramLogSetDramSize 0x1C
+#define PPSMC_MSG_GetDebugData 0x1D
+#define PPSMC_MSG_HeavySBR 0x1E
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F
+#define PPSMC_MSG_DFCstateControl 0x20
+#define PPSMC_MSG_GetGmiPwrDnHyst 0x21
+#define PPSMC_MSG_SetGmiPwrDnHyst 0x22
+#define PPSMC_MSG_GmiPwrDnControl 0x23
+#define PPSMC_MSG_EnterGfxoff 0x24
+#define PPSMC_MSG_ExitGfxoff 0x25
+#define PPSMC_MSG_EnableDeterminism 0x26
+#define PPSMC_MSG_DisableDeterminism 0x27
+#define PPSMC_MSG_DumpSTBtoDram 0x28
+#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29
+#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A
+#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D
+#define PPSMC_MSG_GfxDriverResetRecovery 0x2E
+#define PPSMC_MSG_TriggerVFFLR 0x2F
+#define PPSMC_MSG_SetSoftMinGfxClk 0x30
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x31
+#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
+#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
+#define PPSMC_MSG_PrepareForDriverUnload 0x34
+#define PPSMC_MSG_ReadThrottlerLimit 0x35
+#define PPSMC_MSG_QueryValidMcaCount 0x36
+#define PPSMC_MSG_McaBankDumpDW 0x37
+#define PPSMC_MSG_GetCTFLimit 0x38
+#define PPSMC_MSG_ClearMcaOnRead 0x39
+#define PPSMC_MSG_QueryValidMcaCeCount 0x3A
+#define PPSMC_MSG_McaBankCeDumpDW 0x3B
+#define PPSMC_MSG_SelectPLPDMode 0x40
+#define PPSMC_MSG_PmLogReadSample 0x41
+#define PPSMC_MSG_PmLogGetTableVersion 0x42
+#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
+#define PPSMC_MSG_SetThrottlingPolicy 0x44
+#define PPSMC_MSG_SetPhaseDetectCSBWThreshold 0x45
+#define PPSMC_MSG_SetPhaseDetectFreqHigh 0x46
+#define PPSMC_MSG_SetPhaseDetectFreqLow 0x47
+#define PPSMC_MSG_SetPhaseDetectDownHysterisis 0x48
+#define PPSMC_MSG_SetPhaseDetectAlphaX1e6 0x49
+#define PPSMC_MSG_SetPhaseDetectOnOff 0x4A
+#define PPSMC_MSG_GetPhaseDetectResidency 0x4B
+#define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C
+#define PPSMC_MSG_ResetSDMA 0x4D
+#define PPSMC_MSG_GetRasTableVersion 0x4E
+#define PPSMC_MSG_GetBadPageCount 0x50
+#define PPSMC_MSG_GetBadPageMcaAddress 0x51
+#define PPSMC_MSG_SetTimestamp 0x53
+#define PPSMC_MSG_SetTimestampHi 0x54
+#define PPSMC_MSG_GetTimestamp 0x55
+#define PPSMC_MSG_GetBadPageIpIdLoHi 0x57
+#define PPSMC_MSG_EraseRasTable 0x58
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_MSG_ResetVfArbitersByIndex 0x5A
+#define PPSMC_MSG_GetSystemMetricsTable 0x5C
+#define PPSMC_MSG_GetSystemMetricsVersion 0x5D
+#define PPSMC_MSG_ResetVCN 0x5E
+#define PPSMC_MSG_SetFastPptLimit 0x5F
+#define PPSMC_MSG_GetFastPptLimit 0x60
+#define PPSMC_Message_Count 0x61
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
+#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
+#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1
+#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2
+
+//CTF/Throttle Limit types
+#define PPSMC_AID_THM_TYPE 0x1
+#define PPSMC_CCD_THM_TYPE 0x2
+#define PPSMC_XCD_THM_TYPE 0x3
+#define PPSMC_HBM_THM_TYPE 0x4
+
+//PLPD modes
+#define PPSMC_PLPD_MODE_DEFAULT 0x1
+#define PPSMC_PLPD_MODE_OPTIMIZED 0x2
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_MSG;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 0f96b8c59a0e..01790a927930 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -34,6 +34,8 @@
#define NUM_PCIE_BITRATES 4
#define NUM_XGMI_BITRATES 4
#define NUM_XGMI_WIDTHS 3
+#define NUM_SOC_P2S_TABLES 3
+#define NUM_TDP_GROUPS 4
typedef enum {
/*0*/ FEATURE_DATA_CALCULATION = 0,
@@ -80,8 +82,10 @@ typedef enum {
/*41*/ FEATURE_CXL_QOS = 41,
/*42*/ FEATURE_SOC_DC_RTC = 42,
/*43*/ FEATURE_GFX_DC_RTC = 43,
+/*44*/ FEATURE_DVM_MIN_PSM = 44,
+/*45*/ FEATURE_PRC = 45,
-/*44*/ NUM_FEATURES = 44
+/*46*/ NUM_FEATURES = 46
} FEATURE_LIST_e;
//enum for MPIO PCIe gen speed msgs
@@ -123,8 +127,9 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xE
+#define SMU_METRICS_TABLE_VERSION 0x11
+// Unified metrics table for smu_v13_0_6
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -234,8 +239,15 @@ typedef struct __attribute__((packed, aligned(4))) {
//PCIE BW Data and error count
uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
-} MetricsTableX_t;
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitPptAcc[8];
+ uint64_t GfxclkBelowHostLimitThmAcc[8];
+ uint64_t GfxclkBelowHostLimitTotalAcc[8];
+ uint64_t GfxclkLowUtilizationAcc[8];
+} MetricsTableV0_t;
+
+// Metrics table for smu_v13_0_6 APUS
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -326,15 +338,134 @@ typedef struct __attribute__((packed, aligned(4))) {
// VCN/JPEG ACTIVITY
uint32_t VcnBusy[4];
uint32_t JpegBusy[32];
-} MetricsTableA_t;
+} MetricsTableV1_t;
+
+// Metrics table for smu_v13_0_12
+typedef struct __attribute__((packed, aligned(4))) {
+ uint64_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+
+ //FREQUENCY RANGE
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[4];
+ uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
-#define SMU_VF_METRICS_TABLE_VERSION 0x3
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
+
+ //PCIE BW Data and error count
+ uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitAcc[8];
+} MetricsTableV2_t;
+
+#define SMU_VF_METRICS_TABLE_VERSION 0x5
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
uint64_t AccGfxclk_TargFreq;
uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
+#pragma pack(push, 4)
+typedef struct {
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+ // General info
+ uint32_t pldmVersion[2];
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 147bfb12fd75..63a088ef7169 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -92,9 +92,11 @@
#define PPSMC_MSG_McaBankCeDumpDW 0x3B
#define PPSMC_MSG_SelectPLPDMode 0x40
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
-#define PPSMC_MSG_SelectPstatePolicy 0x44
+#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_Message_Count 0x4E
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_MSG_ResetVCN 0x5B
+#define PPSMC_Message_Count 0x5C
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index e4cd6a0d13da..9b71a8afdd35 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -273,10 +273,22 @@
__SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
- __SMU_DUMMY_MAP(SelectPstatePolicy), \
+ __SMU_DUMMY_MAP(SetThrottlingPolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
- __SMU_DUMMY_MAP(ResetSDMA),
+ __SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(ResetVCN), \
+ __SMU_DUMMY_MAP(GetStaticMetricsTable), \
+ __SMU_DUMMY_MAP(GetSystemMetricsTable), \
+ __SMU_DUMMY_MAP(GetRASTableVersion), \
+ __SMU_DUMMY_MAP(GetBadPageCount), \
+ __SMU_DUMMY_MAP(GetBadPageMcaAddr), \
+ __SMU_DUMMY_MAP(SetTimestamp), \
+ __SMU_DUMMY_MAP(GetTimestamp), \
+ __SMU_DUMMY_MAP(GetBadPageIpid), \
+ __SMU_DUMMY_MAP(EraseRasTable), \
+ __SMU_DUMMY_MAP(SetFastPptLimit), \
+ __SMU_DUMMY_MAP(GetFastPptLimit),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -303,6 +315,8 @@ enum smu_clk_type {
SMU_MCLK,
SMU_PCIE,
SMU_LCLK,
+ SMU_ISPICLK,
+ SMU_ISPXCLK,
SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
@@ -355,6 +369,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(DS_FCLK), \
__SMU_DUMMY_MAP(DS_MP1CLK), \
__SMU_DUMMY_MAP(DS_MP0CLK), \
+ __SMU_DUMMY_MAP(DS_MPIOCLK), \
__SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN), \
__SMU_DUMMY_MAP(DPM_GFX_PACE), \
__SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \
@@ -451,7 +466,9 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(APT_PF_DCS), \
__SMU_DUMMY_MAP(GFX_EDC_XVMIN), \
__SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \
- __SMU_DUMMY_MAP(FAN_ABNORMAL),
+ __SMU_DUMMY_MAP(FAN_ABNORMAL), \
+ __SMU_DUMMY_MAP(PIT), \
+ __SMU_DUMMY_MAP(HROM_EN),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
@@ -463,6 +480,7 @@ enum smu_feature_mask {
/* Message category flags */
#define SMU_MSG_VF_FLAG (1U << 0)
#define SMU_MSG_RAS_PRI (1U << 1)
+#define SMU_MSG_NO_PRECHECK (1U << 2)
/* Firmware capability flags */
#define SMU_FW_CAP_RAS_PRI (1U << 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index ed8304d82831..56ae555bb52a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -281,11 +281,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_11_0_dpm_table *single_dpm_table);
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index ae3563d71fa0..4263798d716b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -53,6 +53,10 @@
#define SMU_13_VCLK_SHIFT 16
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+#define SMUQ10_FRAC(x) ((x) & 0x3ff)
+#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+
extern const int pmfw_decoded_link_speed[5];
extern const int pmfw_decoded_link_width[7];
@@ -107,6 +111,8 @@ struct smu_13_0_dpm_context {
struct smu_13_0_dpm_tables dpm_tables;
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
+ uint64_t caps;
+ uint32_t board_volt;
};
enum smu_13_0_power_state {
@@ -157,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en);
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count);
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu);
int smu_v13_0_notify_display_change(struct smu_context *smu);
@@ -178,13 +182,6 @@ int smu_v13_0_disable_thermal_alert(struct smu_context *smu);
int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value);
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
-
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req);
-
uint32_t
smu_v13_0_get_fan_control_mode(struct smu_context *smu);
@@ -221,11 +218,6 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool automatic);
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max);
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
@@ -303,5 +295,8 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value);
+
+void smu_v13_0_interrupt_work(struct smu_context *smu);
+void smu_v13_0_reset_custom_level(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h
index 251ed011b3b0..251ed011b3b0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 8aa61a9f7778..4fff78da81ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1267,6 +1267,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
+ if (!speed || speed > UINT_MAX/8)
+ return -EINVAL;
+
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
@@ -1344,7 +1347,7 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (max_power_limit)
*max_power_limit = power_limit;
- /**
+ /*
* No lower bound is imposed on the limit. Any unreasonable limit set
* will result in frequent throttling.
*/
@@ -1742,10 +1745,10 @@ static int arcturus_i2c_control_init(struct smu_context *smu)
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -1753,27 +1756,12 @@ static int arcturus_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void arcturus_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -1894,7 +1882,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 9548bd3c624b..55401e6b2b0b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -291,11 +291,12 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
- int ret = 0, size = 0;
+ int ret = 0, size = 0, start_offset = 0;
uint32_t cur_value = 0;
int i;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -353,7 +354,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
return ret;
}
- return size;
+ return size - start_offset;
}
static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 7fad5dfb39c4..7c9f77124ab2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1469,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
uint16_t *curve_settings;
- int i, levels, size = 0, ret = 0;
+ int i, levels, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1484,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_GFXCLK:
@@ -1497,11 +1498,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_DCEFCLK:
ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
if (ret)
- return size;
+ return size - start_offset;
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
- return size;
+ return size - start_offset;
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
if (ret < 0)
@@ -1511,7 +1512,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
for (i = 0; i < count; i++) {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
- return size;
+ return size - start_offset;
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
@@ -1519,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu,
} else {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
if (ret)
- return size;
+ return size - start_offset;
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
if (ret)
- return size;
+ return size - start_offset;
freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 :
@@ -1653,7 +1654,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int navi10_force_clk_levels(struct smu_context *smu,
@@ -2444,7 +2445,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
/* lclk dpm table setup */
for (i = 0; i < MAX_PCIE_CONF; i++) {
@@ -2453,25 +2455,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16) |
- ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
- (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
- pptable->PcieLaneCount[i] : pcie_width_cap);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
-
- if (ret)
- return ret;
-
- if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
- if (pptable->PcieLaneCount[i] > pcie_width_cap)
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
+ pptable->PcieLaneCount[i] > pcie_width_cap) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
+ pptable->PcieGenSpeed[i] > pcie_gen_cap ?
+ pcie_gen_cap : pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
+ pptable->PcieLaneCount[i] > pcie_width_cap ?
+ pcie_width_cap : pptable->PcieLaneCount[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_gen_cap << 8;
+ smu_pcie_arg |= pcie_width_cap;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static inline void navi10_dump_od_table(struct smu_context *smu,
@@ -2885,7 +2889,7 @@ static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
dummy_table += 0x1000;
}
- amdgpu_asic_flush_hdp(smu->adev, NULL);
+ amdgpu_hdp_flush(smu->adev, NULL);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
@@ -3142,10 +3146,10 @@ static int navi10_i2c_control_init(struct smu_context *smu)
control->quirks = &navi10_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -3153,27 +3157,12 @@ static int navi10_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void navi10_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 19a25fdc2f5b..774283ac7827 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1281,7 +1281,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
OverDriveTable_t *od_table =
(OverDriveTable_t *)table_context->overdrive_table;
- int i, size = 0, ret = 0;
+ int i, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1289,6 +1289,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_GFXCLK:
@@ -1434,7 +1435,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
@@ -2145,7 +2146,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint8_t min_gen_speed, max_gen_speed;
uint8_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
@@ -2170,19 +2172,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16 |
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) ||
+ table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) {
+ smu_pcie_arg = (i << 16 |
pcie_table->pcie_gen[i] << 8 |
pcie_table->pcie_lane[i]);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
@@ -2644,10 +2649,10 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
control->quirks = &sienna_cichlid_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
/* assign the buses used for the FRU EEPROM and RAS EEPROM */
@@ -2656,27 +2661,12 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void sienna_cichlid_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -3089,11 +3079,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
-static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int sienna_cichlid_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -3229,7 +3214,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
- .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
.mode2_reset = sienna_cichlid_mode2_reset,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 189c6a32b6bd..b0d6487171d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -227,6 +227,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
break;
case IP_VERSION(11, 0, 12):
@@ -471,10 +472,11 @@ int smu_v11_0_init_power(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
- size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) ==
- IP_VERSION(11, 5, 0) ?
- sizeof(struct smu_11_5_power_context) :
- sizeof(struct smu_11_0_power_context);
+ u32 ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ size_t size = ((ip_version == IP_VERSION(11, 5, 0)) ||
+ (ip_version == IP_VERSION(11, 5, 2))) ?
+ sizeof(struct smu_11_5_power_context) :
+ sizeof(struct smu_11_0_power_context);
smu_power->power_context = kzalloc(size, GFP_KERNEL);
if (!smu_power->power_context)
@@ -731,6 +733,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
*/
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
return 0;
@@ -1019,7 +1022,12 @@ int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
{
- return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
+ int ret = 0;
+
+ if (smu->smu_table.thermal_controller_type)
+ ret = amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
+
+ return ret;
}
static uint16_t convert_to_vddc(uint8_t vid)
@@ -1110,6 +1118,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -1200,7 +1209,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
- if (speed == 0)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement
@@ -2055,45 +2064,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v11_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index a55ea76d7399..9626da2dba58 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -565,7 +565,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -576,6 +576,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -658,7 +659,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int vangogh_print_clk_levels(struct smu_context *smu,
@@ -666,8 +667,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
uint32_t min, max;
@@ -679,34 +679,29 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_CCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
- smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
- size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
- smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
break;
case SMU_SOCCLK:
/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
@@ -786,7 +781,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int vangogh_common_print_clk_levels(struct smu_context *smu,
@@ -2224,6 +2219,9 @@ static int vangogh_post_smu_init(struct smu_context *smu)
uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+ if (adev->in_s0ix)
+ return 0;
+
/* allow message will be sent after enable message on Vangogh*/
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
@@ -2315,8 +2313,7 @@ static int vangogh_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
+ struct smu_11_5_power_context *power_context = smu->smu_power.power_context;
uint32_t ppt_limit;
int ret = 0;
@@ -2352,12 +2349,11 @@ static int vangogh_get_power_limit(struct smu_context *smu,
}
static int vangogh_get_ppt_limit(struct smu_context *smu,
- uint32_t *ppt_limit,
- enum smu_ppt_limit_type type,
- enum smu_ppt_limit_level level)
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
{
- struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
+ struct smu_11_5_power_context *power_context = smu->smu_power.power_context;
if (!power_context)
return -EOPNOTSUPP;
@@ -2406,7 +2402,6 @@ static int vangogh_set_power_limit(struct smu_context *smu,
smu->current_power_limit = ppt_limit;
break;
case SMU_FAST_PPT_LIMIT:
- ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
if (ppt_limit > power_context->max_fast_ppt_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 37d82a71a2d7..eaa9ea162f16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -470,7 +470,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
uint32_t min = 0, max = 0;
- uint32_t ret = 0;
+ int ret = 0;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetMinGfxclkFrequency,
@@ -494,10 +494,9 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
bool cur_value_match_level = false;
memset(&metrics, 0, sizeof(metrics));
@@ -507,31 +506,28 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMinGfxclkFrequency,
- 0, &min);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMaxGfxclkFrequency,
- 0, &max);
- if (ret)
- return ret;
- size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
break;
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
- max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
- size += sysfs_emit_at(buf, size, "OD_SCLK\n");
- size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
- size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
- }
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ size += sysfs_emit_at(buf, size, "OD_SCLK\n");
+ size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
+ size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
break;
case SMU_GFXCLK:
case SMU_SCLK:
@@ -555,7 +551,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
i == 2 ? "*" : "");
}
- return size;
+ return size - start_offset;
case SMU_SOCCLK:
count = NUM_SOCCLK_DPM_LEVELS;
cur_value = metrics.ClockFrequency[CLOCK_SOCCLK];
@@ -612,7 +608,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
@@ -1285,6 +1281,12 @@ static int renoir_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = renoir_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
ret = renoir_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 7f3493b6c53c..51f1fa9789ab 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -24,7 +24,7 @@
# It provides the smu management services for the driver.
SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \
- smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o
+ smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o smu_v13_0_12_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index f6b029354327..18d5d0704509 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
+static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk;
+
+ if (amdgpu_sriov_vf(smu->adev)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+
+ if (min) {
+ if (!min_clk)
+ return -ENODATA;
+ *min = min_clk;
+ }
+ if (max) {
+ if (!max_clk)
+ return -ENODATA;
+ *max = max_clk;
+ }
+
+ } else {
+ return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ }
+
+ return 0;
+}
+
static int aldebaran_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -1270,6 +1325,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int r;
/* Disable determinism if switching to another mode */
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
@@ -1282,7 +1338,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
return 0;
-
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ r = smu_v13_0_set_performance_level(smu, level);
+ if (!r)
+ smu_v13_0_reset_custom_level(smu);
+ return r;
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1423,7 +1483,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
+ ret = aldebaran_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
+ if (ret)
+ return ret;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1577,33 +1641,22 @@ static int aldebaran_i2c_control_init(struct smu_context *smu)
control->quirks = &aldebaran_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- i2c_del_adapter(control);
-
- return res;
}
static void aldebaran_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -1717,7 +1770,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
@@ -1732,7 +1785,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
- gpu_metrics->average_mm_activity = 0;
/* Valid power data is available only from primary die */
if (aldebaran_is_primary(smu)) {
@@ -1977,11 +2029,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
return true;
}
-static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int aldebaran_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
@@ -2078,7 +2125,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
.get_bamaco_support = aldebaran_get_bamaco_support,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
.set_df_cstate = aldebaran_set_df_cstate,
@@ -2087,7 +2134,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = aldebaran_get_gpu_metrics,
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
- .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
.mode1_reset = aldebaran_mode1_reset,
.set_mp1_state = aldebaran_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 7bb45ff6d5c8..a89075e25717 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -58,6 +58,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
@@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -267,10 +273,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu ||
- amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
- amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
- adev->pm.fw_version = smu_version;
+ adev->pm.fw_version = smu_version;
/* only for dGPU w/ SMU13*/
if (adev->pm.fw)
@@ -285,7 +288,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_driver_if_version) {
+ if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION &&
+ if_version != smu->smc_driver_if_version) {
dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
@@ -712,18 +716,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
-{
- int ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
- if (ret)
- dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
-
- return ret;
-}
-
int smu_v13_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
@@ -764,18 +756,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
-{
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
-
- return ret;
-}
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1076,56 +1056,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req)
-{
- enum amd_pp_clock_type clk_type = clock_req->clock_type;
- int ret = 0;
- enum smu_clk_type clk_select = 0;
- uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
- smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- switch (clk_type) {
- case amd_pp_dcef_clock:
- clk_select = SMU_DCEFCLK;
- break;
- case amd_pp_disp_clock:
- clk_select = SMU_DISPCLK;
- break;
- case amd_pp_pixel_clock:
- clk_select = SMU_PIXCLK;
- break;
- case amd_pp_phy_clock:
- clk_select = SMU_PHYCLK;
- break;
- case amd_pp_mem_clock:
- clk_select = SMU_UCLK;
- break;
- default:
- dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
- ret = -EINVAL;
- break;
- }
-
- if (ret)
- goto failed;
-
- if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
- return 0;
-
- ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
-
- if (clk_select == SMU_UCLK)
- smu->hard_min_uclk_req_from_dal = clk_freq;
- }
-
-failed:
- return ret;
-}
-
uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1229,7 +1159,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t tach_period;
int ret;
- if (!speed)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
ret = smu_v13_0_auto_fan_control(smu, 0);
@@ -1321,11 +1251,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
-static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
+void smu_v13_0_interrupt_work(struct smu_context *smu)
{
- return smu_cmn_send_smc_msg(smu,
- SMU_MSG_ReenableAcDcInterrupt,
- NULL);
+ smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
}
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
@@ -1378,12 +1308,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) {
case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
@@ -1650,45 +1580,6 @@ out:
return ret;
}
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
-{
- int ret = 0, clk_id = 0;
- uint32_t param;
-
- if (min <= 0 && max <= 0)
- return -EINVAL;
-
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
- return 0;
-
- clk_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_CLK,
- clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
@@ -2496,7 +2387,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
&dpm_context->dpm_tables.pcie_table;
int num_of_levels = pcie_table->num_of_link_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
if (!num_of_levels)
return 0;
@@ -2512,30 +2404,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
}
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
- }
-
- return 0;
+ return ret;
}
int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
@@ -2598,3 +2498,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
return ret;
}
+
+void smu_v13_0_reset_custom_level(struct smu_context *smu)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->uclk_pstate.custom.min = 0;
+ pstate_table->uclk_pstate.custom.max = 0;
+ pstate_table->gfxclk_pstate.custom.min = 0;
+ pstate_table->gfxclk_pstate.custom.max = 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 3aa705aae4c0..677781060246 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -126,7 +126,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
- MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
@@ -140,7 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
- MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0),
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
@@ -149,7 +149,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
- MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
+ MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 0),
MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
@@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -836,6 +816,10 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_MEMACTIVITY:
*value = metrics->AverageUclkActivity;
break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = max(metrics->Vcn0ActivityPercentage,
+ metrics->Vcn1ActivityPercentage);
+ break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = metrics->AverageSocketPower << 8;
break;
@@ -962,6 +946,12 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v13_0_0_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -1205,15 +1195,16 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1544,7 +1535,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
@@ -2643,11 +2634,12 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
&backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
+ if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) &&
+ ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500))) {
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_POWERSAVING);
@@ -2834,10 +2826,10 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v13_0_0_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2847,27 +2839,12 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -3139,6 +3116,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ uint32_t smu_pcie_arg;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3168,7 +3229,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_0_print_clk_levels,
.force_clk_levels = smu_v13_0_0_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
@@ -3219,6 +3280,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
@@ -3232,4 +3294,9 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_0_workload_map;
smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION;
smu_v13_0_0_set_smu_mailbox_registers(smu);
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(smu->adev))
+ smu->adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
new file mode 100644
index 000000000000..9e635f733fbf
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -0,0 +1,1073 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_v13_0_12_pmfw.h"
+#include "smu_v13_0_6_ppt.h"
+#include "smu_v13_0_12_ppsmc.h"
+#include "smu_v13_0.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_fru_eeprom.h"
+#include <linux/pci.h>
+#include "smu_cmn.h"
+#include "amdgpu_ras.h"
+
+#undef MP1_Public
+#undef smnMP1_FIRMWARE_FLAGS
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \
+ [smu_feature] = { 1, (smu_13_0_12_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE \
+ (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
+ FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK))
+
+#define NUM_JPEG_RINGS_FW 10
+#define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \
+ (ARRAY_SIZE(gpu_metrics->jpeg_busy) / 4)
+
+const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = {
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT, FEATURE_SOC_PCC),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_HROM_EN_BIT, FEATURE_HROM_EN),
+};
+
+const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
+ MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
+ MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
+ MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
+ MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
+ MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
+ MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
+ MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
+ MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
+ MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
+ MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
+ MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
+ MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
+ MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
+ MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
+ MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1),
+ MSG_MAP(GetRASTableVersion, PPSMC_MSG_GetRasTableVersion, 0),
+ MSG_MAP(GetBadPageCount, PPSMC_MSG_GetBadPageCount, 0),
+ MSG_MAP(GetBadPageMcaAddr, PPSMC_MSG_GetBadPageMcaAddress, 0),
+ MSG_MAP(SetTimestamp, PPSMC_MSG_SetTimestamp, 0),
+ MSG_MAP(GetTimestamp, PPSMC_MSG_GetTimestamp, 0),
+ MSG_MAP(GetBadPageIpid, PPSMC_MSG_GetBadPageIpIdLoHi, 0),
+ MSG_MAP(EraseRasTable, PPSMC_MSG_EraseRasTable, 0),
+ MSG_MAP(SetFastPptLimit, PPSMC_MSG_SetFastPptLimit, 1),
+ MSG_MAP(GetFastPptLimit, PPSMC_MSG_GetFastPptLimit, 1),
+};
+
+int smu_v13_0_12_tables_init(struct smu_context *smu)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table_cache *cache;
+ int ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v13_0_12_get_system_metrics_size(), 5);
+
+ if (ret)
+ return ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ sizeof(*baseboard_temp_metrics), 50);
+ if (ret)
+ return ret;
+ /* Initialize base board temperature metrics */
+ cache = &(tables[SMU_TABLE_BASEBOARD_TEMP_METRICS].cache);
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *) cache->buffer;
+ smu_cmn_init_baseboard_temp_metrics(baseboard_temp_metrics, 1, 0);
+ /* Initialize GPU board temperature metrics */
+ ret = smu_table_cache_init(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ sizeof(*gpuboard_temp_metrics), 50);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ return ret;
+ }
+ cache = &(tables[SMU_TABLE_GPUBOARD_TEMP_METRICS].cache);
+ gpuboard_temp_metrics = (struct amdgpu_gpuboard_temp_metrics_v1_0 *)cache->buffer;
+ smu_cmn_init_gpuboard_temp_metrics(gpuboard_temp_metrics, 1, 0);
+
+ return 0;
+}
+
+void smu_v13_0_12_tables_fini(struct smu_context *smu)
+{
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+}
+
+static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
+ uint64_t *feature_mask)
+{
+ int ret;
+
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask);
+
+ if (ret == -EIO) {
+ *feature_mask = 0;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_12_fru_get_product_info(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct amdgpu_fru_info *fru_info;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->fru_info) {
+ adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
+ if (!adev->fru_info)
+ return -ENOMEM;
+ }
+
+ fru_info = adev->fru_info;
+ strscpy(fru_info->product_number, static_metrics->ProductInfo.ModelNumber,
+ sizeof(fru_info->product_number));
+ strscpy(fru_info->product_name, static_metrics->ProductInfo.Name,
+ sizeof(fru_info->product_name));
+ strscpy(fru_info->serial, static_metrics->ProductInfo.Serial,
+ sizeof(fru_info->serial));
+ strscpy(fru_info->manufacturer_name, static_metrics->ProductInfo.ManufacturerName,
+ sizeof(fru_info->manufacturer_name));
+ strscpy(fru_info->fru_id, static_metrics->ProductInfo.FruId,
+ sizeof(fru_info->fru_id));
+
+ return 0;
+}
+
+int smu_v13_0_12_get_max_metrics_size(void)
+{
+ return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
+}
+
+size_t smu_v13_0_12_get_system_metrics_size(void)
+{
+ return sizeof(SystemMetricsTable_t);
+}
+
+static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint16_t max_speed;
+ uint8_t max_width;
+ int ret;
+
+ if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) {
+ max_width = (uint8_t)static_metrics->MaxXgmiWidth;
+ max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ ret = 0;
+ } else {
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
+ if (!ret) {
+ max_width = (uint8_t)metrics->XgmiWidth;
+ max_speed = (uint16_t)metrics->XgmiBitrate;
+ }
+ }
+ if (!ret)
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
+}
+
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t table_version;
+ int ret, i, n;
+
+ if (!pptable->Init) {
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
+ &table_version);
+ if (ret)
+ return ret;
+ smu_table->tables[SMU_TABLE_SMU_METRICS].version =
+ table_version;
+
+ pptable->MaxSocketPowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxSocketPowerLimit);
+ pptable->MaxGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxGfxclkFrequency);
+ pptable->MinGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MinGfxclkFrequency);
+
+ for (i = 0; i < 4; ++i) {
+ pptable->FclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->FclkFrequencyTable[i]);
+ pptable->UclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->UclkFrequencyTable[i]);
+ pptable->SocclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->SocclkFrequencyTable[i]);
+ pptable->VclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->VclkFrequencyTable[i]);
+ pptable->DclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->DclkFrequencyTable[i]);
+ pptable->LclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->LclkFrequencyTable[i]);
+ }
+
+ /* use AID0 serial number by default */
+ pptable->PublicSerialNumber_AID =
+ static_metrics->PublicSerialNumber_AID[0];
+
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ static_metrics->PublicSerialNumber_AID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ static_metrics->PublicSerialNumber_XCD[i]);
+ }
+
+ ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics);
+ if (ret)
+ return ret;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+ }
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
+ pptable->MaxNodePowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxNodePowerLimit);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) &&
+ static_metrics->PPT1Max) {
+ pptable->PPT1Max = static_metrics->PPT1Max;
+ pptable->PPT1Min = static_metrics->PPT1Min;
+ pptable->PPT1Default = static_metrics->PPT1Default;
+ }
+ smu_v13_0_12_init_xgmi_data(smu, static_metrics);
+ pptable->Init = true;
+ }
+
+ return 0;
+}
+
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu)
+{
+ int ret;
+ uint64_t feature_enabled;
+
+ ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled);
+
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct amdgpu_device *adev = smu->adev;
+ int xcc_id;
+
+ /* For clocks with multiple instances, only report the first one */
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ case METRICS_AVERAGE_GFXCLK:
+ xcc_id = GET_INST(GC, 0);
+ *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ break;
+ case METRICS_CURR_SOCCLK:
+ case METRICS_AVERAGE_SOCCLK:
+ *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+ break;
+ case METRICS_CURR_UCLK:
+ case METRICS_AVERAGE_UCLK:
+ *value = SMUQ10_ROUND(metrics->UclkFrequency);
+ break;
+ case METRICS_CURR_VCLK:
+ *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+ break;
+ case METRICS_CURR_DCLK:
+ *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+ break;
+ case METRICS_CURR_FCLK:
+ *value = SMUQ10_ROUND(metrics->FclkFrequency);
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ break;
+ case METRICS_CURR_SOCKETPOWER:
+ *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ /* This is the max of all VRs and not just SOC VR.
+ * No need to define another data type for the same.
+ */
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table *sys_table;
+ int ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ if (smu_table_cache_is_valid(sys_table))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSystemMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export system metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_hdp_invalidate(smu->adev, NULL);
+ smu_table_cache_update_time(sys_table, jiffies);
+ memcpy(sys_table->cache.buffer, table->cpu_addr,
+ smu_v13_0_12_get_system_metrics_size());
+
+ return 0;
+}
+
+static enum amdgpu_node_temp smu_v13_0_12_get_node_sensor_type(NODE_TEMP_e type)
+{
+ switch (type) {
+ case NODE_TEMP_RETIMER:
+ return AMDGPU_RETIMER_X_TEMP;
+ case NODE_TEMP_IBC_TEMP:
+ return AMDGPU_OAM_X_IBC_TEMP;
+ case NODE_TEMP_IBC_2_TEMP:
+ return AMDGPU_OAM_X_IBC_2_TEMP;
+ case NODE_TEMP_VDD18_VR_TEMP:
+ return AMDGPU_OAM_X_VDD18_VR_TEMP;
+ case NODE_TEMP_04_HBM_B_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_B_VR_TEMP;
+ case NODE_TEMP_04_HBM_D_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_D_VR_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_vr_temp smu_v13_0_12_get_vr_sensor_type(SVI_TEMP_e type)
+{
+ switch (type) {
+ case SVI_VDDCR_VDD0_TEMP:
+ return AMDGPU_VDDCR_VDD0_TEMP;
+ case SVI_VDDCR_VDD1_TEMP:
+ return AMDGPU_VDDCR_VDD1_TEMP;
+ case SVI_VDDCR_VDD2_TEMP:
+ return AMDGPU_VDDCR_VDD2_TEMP;
+ case SVI_VDDCR_VDD3_TEMP:
+ return AMDGPU_VDDCR_VDD3_TEMP;
+ case SVI_VDDCR_SOC_A_TEMP:
+ return AMDGPU_VDDCR_SOC_A_TEMP;
+ case SVI_VDDCR_SOC_C_TEMP:
+ return AMDGPU_VDDCR_SOC_C_TEMP;
+ case SVI_VDDCR_SOCIO_A_TEMP:
+ return AMDGPU_VDDCR_SOCIO_A_TEMP;
+ case SVI_VDDCR_SOCIO_C_TEMP:
+ return AMDGPU_VDDCR_SOCIO_C_TEMP;
+ case SVI_VDD_085_HBM_TEMP:
+ return AMDGPU_VDD_085_HBM_TEMP;
+ case SVI_VDDCR_11_HBM_B_TEMP:
+ return AMDGPU_VDDCR_11_HBM_B_TEMP;
+ case SVI_VDDCR_11_HBM_D_TEMP:
+ return AMDGPU_VDDCR_11_HBM_D_TEMP;
+ case SVI_VDD_USR_TEMP:
+ return AMDGPU_VDD_USR_TEMP;
+ case SVI_VDDIO_11_E32_TEMP:
+ return AMDGPU_VDDIO_11_E32_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_system_temp smu_v13_0_12_get_system_sensor_type(SYSTEM_TEMP_e type)
+{
+ switch (type) {
+ case SYSTEM_TEMP_UBB_FPGA:
+ return AMDGPU_UBB_FPGA_TEMP;
+ case SYSTEM_TEMP_UBB_FRONT:
+ return AMDGPU_UBB_FRONT_TEMP;
+ case SYSTEM_TEMP_UBB_BACK:
+ return AMDGPU_UBB_BACK_TEMP;
+ case SYSTEM_TEMP_UBB_OAM7:
+ return AMDGPU_UBB_OAM7_TEMP;
+ case SYSTEM_TEMP_UBB_IBC:
+ return AMDGPU_UBB_IBC_TEMP;
+ case SYSTEM_TEMP_UBB_UFPGA:
+ return AMDGPU_UBB_UFPGA_TEMP;
+ case SYSTEM_TEMP_UBB_OAM1:
+ return AMDGPU_UBB_OAM1_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_HSC:
+ return AMDGPU_OAM_0_1_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_2_3_HSC:
+ return AMDGPU_OAM_2_3_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_HSC:
+ return AMDGPU_OAM_4_5_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_6_7_HSC:
+ return AMDGPU_OAM_6_7_HSC_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_0V72_VR:
+ return AMDGPU_UBB_FPGA_0V72_VR_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_3V3_VR:
+ return AMDGPU_UBB_FPGA_3V3_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR:
+ return AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR:
+ return AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_0V9_VR:
+ return AMDGPU_RETIMER_0_1_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_0V9_VR:
+ return AMDGPU_RETIMER_4_5_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_2_3_0V9_VR:
+ return AMDGPU_RETIMER_2_3_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_6_7_0V9_VR:
+ return AMDGPU_RETIMER_6_7_0V9_VR_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR:
+ return AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR:
+ return AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP;
+ case SYSTEM_TEMP_IBC_HSC:
+ return AMDGPU_IBC_HSC_TEMP;
+ case SYSTEM_TEMP_IBC:
+ return AMDGPU_IBC_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool smu_v13_0_12_is_temp_metrics_supported(struct smu_context *smu,
+ enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ if (smu->adev->gmc.xgmi.physical_node_id == 0 &&
+ smu->adev->gmc.xgmi.num_physical_nodes > 1 &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS)))
+ return true;
+ break;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS));
+ default:
+ break;
+ }
+
+ return false;
+}
+
+int smu_v13_0_12_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *sys_table;
+ int ret;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
+ return -EOPNOTSUPP;
+
+ if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) {
+ *value = pptable->MaxNodePowerLimit;
+ return 0;
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ *value = SMUQ10_ROUND(metrics->NodePowerLimit);
+ break;
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ *value = SMUQ10_ROUND(metrics->NodePower);
+ break;
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *data_table;
+ struct smu_table *sys_table;
+ int ret, sensor_type;
+ u32 idx, sensors;
+ ssize_t size;
+
+ if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ /* Initialize base board temperature metrics */
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_BASEBOARD_TEMP_METRICS];
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ } else {
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_GPUBOARD_TEMP_METRICS];
+ gpuboard_temp_metrics =
+ (struct amdgpu_gpuboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+ smu_table_cache_update_time(data_table, jiffies);
+
+ if (type == SMU_TEMP_METRIC_GPUBOARD) {
+ gpuboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ gpuboard_temp_metrics->label_version = metrics->LabelVersion;
+ gpuboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < NODE_TEMP_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->NodeTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_node_sensor_type(sensors);
+ gpuboard_temp_metrics->node_temp[idx] =
+ ((int)metrics->NodeTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->node_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+
+ idx = 0;
+
+ for (sensors = 0; sensors < SVI_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->VrTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_vr_sensor_type(sensors);
+ gpuboard_temp_metrics->vr_temp[idx] =
+ ((int)metrics->VrTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->vr_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ } else if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ baseboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ baseboard_temp_metrics->label_version = metrics->LabelVersion;
+ baseboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < SYSTEM_TEMP_MAX_ENTRIES; sensors++) {
+ if (metrics->SystemTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_system_sensor_type(sensors);
+ baseboard_temp_metrics->system_temp[idx] =
+ ((int)metrics->SystemTemperatures[sensors]) & 0xFFFFFF;
+ baseboard_temp_metrics->system_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ }
+
+ memcpy(table, data_table->cache.buffer, size);
+
+ return size;
+}
+
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
+{
+ const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
+ struct smu_v13_0_6_partition_metrics *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ MetricsTable_t *metrics;
+ int inst, j, k, idx;
+ u32 inst_mask;
+
+ metrics = (MetricsTable_t *)smu_metrics;
+ xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instance */
+ inst = GET_INST(VCN, k);
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(metrics->
+ JpegBusy[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ metrics->VclkFrequency[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ metrics->DclkFrequency[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ metrics->SocclkFrequency[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(metrics->UclkFrequency);
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]);
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
+ idx++;
+ }
+
+ return sizeof(*xcp_metrics);
+}
+
+void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
+ void *smu_metrics,
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, xcc_id, inst, i, j;
+ u8 num_jpeg_rings_gpu_metrics;
+ MetricsTable_t *metrics;
+
+ metrics = (MetricsTable_t *)smu_metrics;
+
+ gpu_metrics->temperature_hotspot =
+ SMUQ10_ROUND(metrics->MaxSocketTemperature);
+ /* Individual HBM stack temperature is not reported */
+ gpu_metrics->temperature_mem =
+ SMUQ10_ROUND(metrics->MaxHbmTemperature);
+ /* Reports max temperature of all voltage rails */
+ gpu_metrics->temperature_vrsoc =
+ SMUQ10_ROUND(metrics->MaxVrTemperature);
+
+ gpu_metrics->average_gfx_activity =
+ SMUQ10_ROUND(metrics->SocketGfxBusy);
+ gpu_metrics->average_umc_activity =
+ SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+
+ gpu_metrics->mem_max_bandwidth =
+ SMUQ10_ROUND(metrics->MaxDramBandwidth);
+
+ gpu_metrics->curr_socket_power =
+ SMUQ10_ROUND(metrics->SocketPower);
+ /* Energy counter reported in 15.259uJ (2^-16) units */
+ gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+
+ for (i = 0; i < MAX_GFX_CLKS; i++) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0)
+ gpu_metrics->current_gfxclk[i] =
+ SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+
+ if (i < MAX_CLKS) {
+ gpu_metrics->current_socclk[i] =
+ SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+ inst = GET_INST(VCN, i);
+ if (inst >= 0) {
+ gpu_metrics->current_vclk0[i] =
+ SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+ gpu_metrics->current_dclk0[i] =
+ SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+ }
+ }
+ }
+
+ gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency);
+
+ /* Total accumulated cycle counter */
+ gpu_metrics->accumulation_counter = metrics->AccumulationCounter;
+
+ /* Accumulated throttler residencies */
+ gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc;
+ gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc;
+ gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc;
+ gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc;
+ gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc;
+
+ /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
+ gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0);
+
+ gpu_metrics->pcie_link_width = metrics->PCIeLinkWidth;
+ gpu_metrics->pcie_link_speed =
+ pcie_gen_to_speed(metrics->PCIeLinkSpeed);
+ gpu_metrics->pcie_bandwidth_acc =
+ SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+ gpu_metrics->pcie_bandwidth_inst =
+ SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+ gpu_metrics->pcie_l0_to_recov_count_acc = metrics->PCIeL0ToRecoveryCountAcc;
+ gpu_metrics->pcie_replay_count_acc = metrics->PCIenReplayAAcc;
+ gpu_metrics->pcie_replay_rover_count_acc =
+ metrics->PCIenReplayARolloverCountAcc;
+ gpu_metrics->pcie_nak_sent_count_acc = metrics->PCIeNAKSentCountAcc;
+ gpu_metrics->pcie_nak_rcvd_count_acc = metrics->PCIeNAKReceivedCountAcc;
+ gpu_metrics->pcie_lc_perf_other_end_recovery = metrics->PCIeOtherEndRecoveryAcc;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+ gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+
+ for (i = 0; i < NUM_XGMI_LINKS; i++) {
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] =
+ SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
+ gpu_metrics->xgmi_write_data_acc[j] =
+ SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
+ ret = amdgpu_get_xgmi_link_status(adev, i);
+ if (ret >= 0)
+ gpu_metrics->xgmi_link_status[j] = ret;
+ }
+
+ num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+
+ for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) {
+ gpu_metrics->jpeg_busy[(i * num_jpeg_rings_gpu_metrics) +
+ j] =
+ SMUQ10_ROUND(
+ metrics->JpegBusy[(inst *
+ NUM_JPEG_RINGS_FW) +
+ j]);
+ }
+ gpu_metrics->vcn_busy[i] = SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ }
+
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ inst = GET_INST(GC, i);
+ gpu_metrics->gfx_busy_inst[i] =
+ SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ gpu_metrics->gfx_busy_acc[i] =
+ SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics
+ ->gfx_below_host_limit_ppt_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics
+ ->gfx_below_host_limit_thm_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->gfx_low_utilization_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_total_acc
+ [i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ };
+ }
+
+ gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
+ gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate;
+
+ gpu_metrics->firmware_timestamp = metrics->Timestamp;
+}
+
+const struct smu_temp_funcs smu_v13_0_12_temp_funcs = {
+ .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported,
+ .get_temp_metrics = smu_v13_0_12_get_temp_metrics,
+};
+
+static int smu_v13_0_12_get_ras_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetRASTableVersion, 0, table_version);
+}
+
+static int smu_v13_0_12_get_badpage_count(struct amdgpu_device *adev, uint32_t *count,
+ uint32_t timeout)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint64_t end, now;
+ int ret = 0;
+
+ now = (uint64_t)ktime_to_ms(ktime_get());
+ end = now + timeout;
+ do {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageCount, 0, count);
+ /* eeprom is not ready */
+ if (ret != -EBUSY)
+ return ret;
+ mdelay(10);
+ now = (uint64_t)ktime_to_ms(ktime_get());
+ } while (now < end);
+
+ dev_err(adev->dev,
+ "smu get bad page count timeout!\n");
+ return ret;
+}
+
+static int smu_v13_0_12_set_timestamp(struct amdgpu_device *adev, uint64_t timestamp)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetTimestamp, (uint32_t)timestamp, 0);
+}
+
+static int smu_v13_0_12_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetTimestamp, index, &temp);
+ if (!ret)
+ *timestamp = temp;
+
+ return ret;
+}
+
+static int smu_v13_0_12_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp_arg, temp_ipid_lo, temp_ipid_high;
+ int ret;
+
+ temp_arg = index | (1 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_lo);
+ if (ret)
+ return ret;
+
+ temp_arg = index | (2 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_high);
+ if (!ret)
+ *ipid = (uint64_t)temp_ipid_high << 32 | temp_ipid_lo;
+ return ret;
+}
+
+static int smu_v13_0_12_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EraseRasTable, 0, result);
+}
+
+static int smu_v13_0_12_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp_arg, temp_addr_lo, temp_addr_high;
+ int ret;
+
+ temp_arg = index | (1 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_lo);
+ if (ret)
+ return ret;
+
+ temp_arg = index | (2 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_high);
+ if (!ret)
+ *mca_addr = (uint64_t)temp_addr_high << 32 | temp_addr_lo;
+ return ret;
+}
+
+static const struct ras_eeprom_smu_funcs smu_v13_0_12_eeprom_smu_funcs = {
+ .get_ras_table_version = smu_v13_0_12_get_ras_table_version,
+ .get_badpage_count = smu_v13_0_12_get_badpage_count,
+ .get_badpage_mca_addr = smu_v13_0_12_get_badpage_mca_addr,
+ .set_timestamp = smu_v13_0_12_set_timestamp,
+ .get_timestamp = smu_v13_0_12_get_timestamp,
+ .get_badpage_ipid = smu_v13_0_12_get_badpage_ipid,
+ .erase_ras_table = smu_v13_0_12_erase_ras_table,
+};
+
+static void smu_v13_0_12_ras_smu_feature_flags(struct amdgpu_device *adev, uint64_t *flags)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if (!flags)
+ return;
+
+ *flags = 0ULL;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(RAS_EEPROM)))
+ *flags |= RAS_SMU_FEATURE_BIT__RAS_EEPROM;
+
+}
+
+const struct ras_smu_drv smu_v13_0_12_ras_smu_drv = {
+ .smu_eeprom_funcs = &smu_v13_0_12_eeprom_smu_funcs,
+ .ras_smu_feature_flags = smu_v13_0_12_ras_smu_feature_flags,
+};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index b081ae3e8f43..6908f9930f16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -497,11 +497,12 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -565,7 +566,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_4_read_sensor(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index f5db181ef489..4576bf008b22 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -861,11 +861,12 @@ out:
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min = 0, max = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -928,7 +929,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 5b86df0c8536..44e1cd821eec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,24 +101,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-static inline bool smu_v13_0_6_is_unified_metrics(struct smu_context *smu)
-{
- return (smu->adev->flags & AMD_IS_APU) &&
- smu->smc_fw_version <= 0x4556900;
-}
-
-static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *smu)
-{
- switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- return smu->smc_fw_version >= 0x557600;
- case IP_VERSION(13, 0, 14):
- return smu->smc_fw_version >= 0x05550E00;
- default:
- return false;
- }
-}
-
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -161,9 +143,9 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
- MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -192,8 +174,10 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
- MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0),
+ MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
};
// clang-format on
@@ -230,7 +214,11 @@ static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_CO
SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
- SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK),
};
#define TABLE_PMSTATUSLOG 0
@@ -252,27 +240,13 @@ static const uint8_t smu_v13_0_6_throttler_map[] = {
[THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
};
-struct PPTable_t {
- uint32_t MaxSocketPowerLimit;
- uint32_t MaxGfxclkFrequency;
- uint32_t MinGfxclkFrequency;
- uint32_t FclkFrequencyTable[4];
- uint32_t UclkFrequencyTable[4];
- uint32_t SocclkFrequencyTable[4];
- uint32_t VclkFrequencyTable[4];
- uint32_t DclkFrequencyTable[4];
- uint32_t LclkFrequencyTable[4];
- uint32_t MaxLclkDpmRange;
- uint32_t MinLclkDpmRange;
- uint64_t PublicSerialNumber_AID;
- bool Init;
-};
-
-#define SMUQ10_TO_UINT(x) ((x) >> 10)
-#define SMUQ10_FRAC(x) ((x) & 0x3ff)
-#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
-#define GET_METRIC_FIELD(field, flag) ((flag) ?\
- (metrics_a->field) : (metrics_x->field))
+#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\
+ (metrics_v0->field) : (metrics_v2->field))
+#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\
+ (metrics_v1->field) : GET_GPU_METRIC_FIELD(field, version))
+#define METRICS_TABLE_SIZE (max3(sizeof(MetricsTableV0_t),\
+ sizeof(MetricsTableV1_t),\
+ sizeof(MetricsTableV2_t)))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -281,6 +255,233 @@ struct smu_v13_0_6_dpm_map {
uint32_t *freq_table;
};
+static inline int smu_v13_0_6_get_metrics_version(struct smu_context *smu)
+{
+ if ((smu->adev->flags & AMD_IS_APU) &&
+ smu->smc_fw_version <= 0x4556900)
+ return METRICS_VERSION_V1;
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12))
+ return METRICS_VERSION_V2;
+
+ return METRICS_VERSION_V0;
+}
+
+static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ dpm_context->caps |= BIT_ULL(cap);
+}
+
+static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ dpm_context->caps &= ~BIT_ULL(cap);
+}
+
+bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ return !!(dpm_context->caps & BIT_ULL(cap));
+}
+
+static void smu_v13_0_14_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver >= 0x05550E00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
+ if (fw_ver >= 0x05550B00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ if (fw_ver >= 0x5551200)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551800)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
+ if (fw_ver >= 0x5551600) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
+}
+
+static void smu_v13_0_12_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(PER_INST_METRICS) };
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver < 0x00561900)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
+
+ if (fw_ver >= 0x00561700)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+
+ if (fw_ver >= 0x00561E00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+
+ if (fw_ver >= 0x00562500)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+
+ if (fw_ver >= 0x04560100) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
+
+ if (fw_ver > 0x04560900)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
+
+ if (fw_ver >= 0x04560D00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(FAST_PPT));
+
+ if (fw_ver >= 0x04560700) {
+ if (fw_ver >= 0x04560900) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ if (smu->adev->gmc.xgmi.physical_node_id == 0)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(NPM_METRICS));
+ } else if (!amdgpu_sriov_vf(smu->adev))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ } else {
+ smu_v13_0_12_tables_fini(smu);
+ }
+}
+
+static void smu_v13_0_6_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t fw_ver = smu->smc_fw_version;
+ uint32_t pgm = (fw_ver >> 24) & 0xFF;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver < 0x552F00)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
+ if (fw_ver < 0x554500)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT));
+
+ if (adev->flags & AMD_IS_APU) {
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+
+ if (fw_ver >= 0x04556A00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ } else {
+ if (fw_ver >= 0x557600)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
+ if (fw_ver < 0x00556000)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+ if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX));
+ if (fw_ver < 0x556300)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+ if (fw_ver < 0x554800)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE));
+ if (fw_ver >= 0x556F00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ if (fw_ver < 0x00555a00)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+ if (fw_ver < 0x00555600)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if ((pgm == 7 && fw_ver >= 0x7550E00) ||
+ (pgm == 0 && fw_ver >= 0x00557E00))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (fw_ver >= 0x00558200)
+ amdgpu_virt_attr_set(&adev->virt.virt_caps,
+ AMDGPU_VIRT_CAP_POWER_LIMIT,
+ AMDGPU_CAP_ATTR_RW);
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
+ } else {
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ }
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
+ }
+ if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
+ ((pgm == 0) && (fw_ver >= 0x00557900)) ||
+ ((pgm == 4) && (fw_ver >= 0x4557000)))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+
+ if ((pgm == 0 && fw_ver >= 0x00558200) ||
+ (pgm == 7 && fw_ver >= 0x07551400))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
+}
+
+static void smu_v13_0_x_init_caps(struct smu_context *smu)
+{
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ return smu_v13_0_12_init_caps(smu);
+ case IP_VERSION(13, 0, 14):
+ return smu_v13_0_14_init_caps(smu);
+ default:
+ return smu_v13_0_6_init_caps(smu);
+ }
+}
+
+static int smu_v13_0_6_check_fw_version(struct smu_context *smu)
+{
+ int r;
+
+ r = smu_v13_0_check_fw_version(smu);
+ /* Initialize caps flags once fw version is fetched */
+ if (!r)
+ smu_v13_0_x_init_caps(smu);
+
+ return r;
+}
+
static int smu_v13_0_6_init_microcode(struct smu_context *smu)
{
const struct smc_firmware_header_v2_1 *v2_1;
@@ -293,8 +494,9 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
int var = (adev->pdev->device & 0xF);
char ucode_prefix[15];
- /* No need to load P2S tables in IOV mode */
- if (amdgpu_sriov_vf(adev))
+ /* No need to load P2S tables in IOV mode or for smu v13.0.12 */
+ if (amdgpu_sriov_vf(adev) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)))
return 0;
if (!(adev->flags & AMD_IS_APU)) {
@@ -350,14 +552,20 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics;
+ void *driver_pptable __free(kfree) = NULL;
+ void *metrics_table __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
+ int gpu_metrcs_size = METRICS_TABLE_SIZE;
+ int ret;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
- max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
+ max(gpu_metrcs_size,
+ smu_v13_0_12_get_max_metrics_size()),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -365,28 +573,40 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
- sizeof(MetricsTableA_t)), GFP_KERNEL);
- if (!smu_table->metrics_table)
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v13_0_12_get_system_metrics_size(), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+
+ metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7);
- smu_table->gpu_metrics_table =
- kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
- if (!smu_table->gpu_metrics_table) {
- kfree(smu_table->metrics_table);
+ driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+ if (!driver_pptable)
return -ENOMEM;
- }
- smu_table->driver_pptable =
- kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
- if (!smu_table->driver_pptable) {
- kfree(smu_table->metrics_table);
- kfree(smu_table->gpu_metrics_table);
- return -ENOMEM;
+ ret = smu_table_cache_init(smu, SMU_TABLE_SMU_METRICS,
+ sizeof(struct smu_v13_0_6_gpu_metrics), 1);
+ if (ret)
+ return ret;
+
+ gpu_metrics = (struct smu_v13_0_6_gpu_metrics
+ *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer);
+
+ smu_v13_0_6_gpu_metrics_init(gpu_metrics, 1, 9);
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12)) {
+ ret = smu_v13_0_12_tables_init(smu);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS);
+ return ret;
+ }
}
+ smu_table->metrics_table = no_free_ptr(metrics_table);
+ smu_table->driver_pptable = no_free_ptr(driver_pptable);
+
return 0;
}
@@ -413,7 +633,7 @@ static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu,
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SelectPstatePolicy,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetThrottlingPolicy,
param, NULL);
if (ret)
@@ -515,6 +735,14 @@ static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+ smu_v13_0_12_tables_fini(smu);
+ smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS);
+ return smu_v13_0_fini_smc_tables(smu);
+}
+
static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num)
@@ -528,8 +756,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
-static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
- void *metrics_table, bool bypass_cache)
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
@@ -546,7 +774,7 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
return ret;
}
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ amdgpu_hdp_invalidate(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
smu_table->metrics_time = jiffies;
@@ -583,10 +811,8 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
memset(&pm_metrics->common_header, 0,
sizeof(pm_metrics->common_header));
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6))
- pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 6);
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
- pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 14);
+ pm_metrics->common_header.mp1_ip_discovery_version =
+ amdgpu_ip_version(smu->adev, MP1_HWIP, 0);
pm_metrics->common_header.pmfw_version = pmfw_version;
pm_metrics->common_header.pmmetrics_version = table_version;
pm_metrics->common_header.structure_size =
@@ -595,16 +821,77 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
return pm_metrics->common_header.structure_size;
}
+static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+}
+
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_hdp_invalidate(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
+static void smu_v13_0_6_update_caps(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) &&
+ !pptable->PPT1Max)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(FAST_PPT));
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
- MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
+ MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
+ MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
+ MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
- int ret, i, retry = 100;
+ int version = smu_v13_0_6_get_metrics_version(smu);
+ int ret, i, retry = 100, n;
uint32_t table_version;
+ uint16_t max_speed;
+ uint8_t max_width;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_12_setup_driver_pptable(smu);
+ if (ret)
+ return ret;
+ goto out;
+ }
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
@@ -614,7 +901,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (GET_METRIC_FIELD(AccumulationCounter, flag))
+ if (GET_METRIC_FIELD(AccumulationCounter, version))
break;
usleep_range(1000, 1100);
@@ -631,33 +918,61 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
table_version;
pptable->MaxSocketPowerLimit =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, version));
pptable->MaxGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
+ max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version);
+ max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version);
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, version)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, version)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
- GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]);
+ GET_METRIC_FIELD(SocclkFrequencyTable, version)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, version)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, version)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, version)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0];
+ pptable->PublicSerialNumber_AID =
+ GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
+
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ GET_METRIC_FIELD(PublicSerialNumber_AID,
+ version)[i]);
+ }
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ GET_METRIC_FIELD(PublicSerialNumber_XCD,
+ version)[i]);
+ }
pptable->Init = true;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+ smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
+ }
}
-
+out:
+ smu_v13_0_6_update_caps(smu);
return 0;
}
@@ -665,51 +980,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- uint32_t clock_limit = 0, param;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ /* Use dpm tables, if data is already fetched */
+ if (pptable->Init) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
- if (pptable->Init)
- clock_limit = pptable->UclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
break;
case SMU_GFXCLK:
case SMU_SCLK:
- if (pptable->Init)
- clock_limit = pptable->MinGfxclkFrequency;
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
break;
case SMU_SOCCLK:
- if (pptable->Init)
- clock_limit = pptable->SocclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.soc_table;
break;
case SMU_FCLK:
- if (pptable->Init)
- clock_limit = pptable->FclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
break;
case SMU_VCLK:
- if (pptable->Init)
- clock_limit = pptable->VclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
break;
case SMU_DCLK:
- if (pptable->Init)
- clock_limit = pptable->DclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
break;
default:
- break;
+ return -EINVAL;
}
- if (min)
- *min = clock_limit;
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+ if (min)
+ *min = min_clk;
if (max)
- *max = clock_limit;
+ *max = max_clk;
- return 0;
+ if (min_clk && max_clk)
+ return 0;
}
if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
@@ -798,8 +1113,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
smu_v13_0_6_setup_driver_pptable(smu);
/* DPM policy not supported in older firmwares */
- if (!(smu->adev->flags & AMD_IS_APU) &&
- (smu->smc_fw_version < 0x00556000)) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) {
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
smu_dpm->dpm_policies->policy_mask &=
@@ -974,9 +1288,10 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
- MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
+ MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
+ MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
+ int version = smu_v13_0_6_get_metrics_version(smu);
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -985,56 +1300,60 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
if (ret)
return ret;
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
+
/* For clocks with multiple instances, only report the first one */
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
- if (smu->smc_fw_version >= 0x552F00) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, version)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, version)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, version));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -1099,7 +1418,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
return -EINVAL;
if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) {
- size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk);
+ size += sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk);
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i,
clocks.data[i].clocks_in_khz /
@@ -1134,7 +1453,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
enum smu_clk_type type, char *buf)
{
- int now, size = 0;
+ int now, size = 0, start_offset = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct smu_13_0_dpm_table *single_dpm_table;
@@ -1143,10 +1462,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
uint32_t min_clk, max_clk;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
dpm_context = smu_dpm->dpm_context;
@@ -1167,8 +1487,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
return ret;
}
- min_clk = pstate_table->gfxclk_pstate.curr.min;
- max_clk = pstate_table->gfxclk_pstate.curr.max;
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ min_clk = single_dpm_table->min;
+ max_clk = single_dpm_table->max;
if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
@@ -1198,6 +1519,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX)))
+ return 0;
+
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
pstate_table->uclk_pstate.curr.min,
@@ -1214,9 +1538,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "mclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "mclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_SOCCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
&now);
@@ -1228,9 +1556,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "socclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "socclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_FCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
&now);
@@ -1242,9 +1574,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "fclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "fclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_VCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
&now);
@@ -1256,9 +1592,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "vclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "vclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_DCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
&now);
@@ -1270,14 +1610,18 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "dclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "dclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
default:
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
@@ -1453,6 +1797,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, void *data,
uint32_t *size)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
int ret = 0;
if (amdgpu_ras_intr_triggered())
@@ -1497,6 +1842,24 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VDDBOARD:
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ *(uint32_t *)data = dpm_context->board_volt;
+ *size = 4;
+ break;
+ } else {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT:
+ ret = smu_v13_0_12_get_npm_data(smu, sensor, (uint32_t *)data);
+ if (ret)
+ return ret;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -1528,7 +1891,7 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
if (current_power_limit)
*current_power_limit = power_limit;
if (default_power_limit)
- *default_power_limit = power_limit;
+ *default_power_limit = pptable->MaxSocketPowerLimit;
if (max_power_limit) {
*max_power_limit = pptable->MaxSocketPowerLimit;
@@ -1543,9 +1906,66 @@ static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
enum smu_ppt_limit_type limit_type,
uint32_t limit)
{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret;
+
+ if (limit_type == SMU_FAST_PPT_LIMIT) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)))
+ return -EOPNOTSUPP;
+ if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) should be between min %d max %d\n",
+ limit, pptable->PPT1Min, pptable->PPT1Max);
+ return -EINVAL;
+ }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit,
+ limit, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Set fast PPT limit failed!\n");
+ return ret;
+ }
+
return smu_v13_0_set_power_limit(smu, limit_type, limit);
}
+static int smu_v13_0_6_get_ppt_limit(struct smu_context *smu,
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret = 0;
+
+ if (type == SMU_FAST_PPT_LIMIT) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)))
+ return -EOPNOTSUPP;
+ switch (level) {
+ case SMU_PPT_LIMIT_MAX:
+ *ppt_limit = pptable->PPT1Max;
+ break;
+ case SMU_PPT_LIMIT_CURRENT:
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit, ppt_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
+ break;
+ case SMU_PPT_LIMIT_DEFAULT:
+ *ppt_limit = pptable->PPT1Default;
+ break;
+ case SMU_PPT_LIMIT_MIN:
+ *ppt_limit = pptable->PPT1Min;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+ }
+ return -EOPNOTSUPP;
+}
+
static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1676,7 +2096,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)
static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
{
/* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
- if (smu->smc_fw_version < 0x554800)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE)))
return 0;
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
@@ -1764,7 +2184,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
}
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -1773,7 +2193,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
break;
}
- return -EINVAL;
+ return -EOPNOTSUPP;
}
static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
@@ -1821,9 +2241,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
/* For VF, only allowed in FW versions 85.102 or greater */
- if (amdgpu_sriov_vf(adev) &&
- ((smu->smc_fw_version < 0x556600) ||
- (adev->flags & AMD_IS_APU)))
+ if (!smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(SET_UCLK_MAX)))
return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
@@ -1978,7 +2397,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -2027,7 +2446,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
ret = smu_cmn_get_enabled_mask(smu, feature_mask);
- if (ret == -EIO && smu->smc_fw_version < 0x552F00) {
+ if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
*feature_mask = 0;
ret = 0;
}
@@ -2040,6 +2459,9 @@ static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
int ret;
uint64_t feature_enabled;
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+ return smu_v13_0_12_is_dpm_running(smu);
+
ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
if (ret)
@@ -2064,7 +2486,7 @@ static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
memcpy(table->cpu_addr, table_data, table_size);
/* Flush hdp cache */
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
NULL);
@@ -2190,10 +2612,10 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v13_0_6_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2201,27 +2623,12 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -2318,94 +2725,224 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
return pcie_gen_to_speed(speed_level + 1);
}
-static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
+static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
+ void *table)
{
- bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
- int ret = 0, xcc_id, inst, i, j, k, idx;
+ const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
+ int version = smu_v13_0_6_get_metrics_version(smu);
+ struct smu_v13_0_6_partition_metrics *xcp_metrics;
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
- MetricsTableX_t *metrics_x;
- MetricsTableA_t *metrics_a;
+ int ret, inst, i, j, k, idx;
+ MetricsTableV1_t *metrics_v1;
+ MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
- u16 link_width_level;
u32 inst_mask;
+ bool per_inst;
- metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
- if (ret) {
- kfree(metrics_x);
+ if (!table)
+ return sizeof(*xcp_metrics);
+
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ if (xcp->id == xcp_id)
+ break;
+ }
+ if (i == adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
+ smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1);
+
+ metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_v0)
+ return -ENOMEM;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
+ if (ret)
return ret;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_xcp_metrics(smu, xcp, table,
+ metrics_v0);
+
+ metrics_v1 = (MetricsTableV1_t *)metrics_v0;
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
+
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(
+ JpegBusy,
+ version)[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
+
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(VclkFrequency, version)[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(DclkFrequency, version)[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(SocclkFrequency, version)[inst]);
+
+ idx++;
}
- metrics_a = (MetricsTableA_t *)metrics_x;
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ if (per_inst) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency,
+ version)[inst]);
+
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitPptAcc
+ [inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitThmAcc
+ [inst]);
+ xcp_metrics->gfx_low_utilization_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0
+ ->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitTotalAcc
+ [inst]);
+ }
+ idx++;
+ }
+ }
+
+ return sizeof(*xcp_metrics);
+}
+
+static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics;
+ int version = smu_v13_0_6_get_metrics_version(smu);
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, xcc_id, inst, i, j;
+ MetricsTableV1_t *metrics_v1;
+ MetricsTableV2_t *metrics_v2;
+ u16 link_width_level;
+ u8 num_jpeg_rings;
+ bool per_inst;
+
+ metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
+ if (ret)
+ return ret;
+
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
+ gpu_metrics = (struct smu_v13_0_6_gpu_metrics
+ *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer);
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0,
+ gpu_metrics);
+ goto fill;
+ }
+
+ metrics_v1 = (MetricsTableV1_t *)metrics_v0;
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
gpu_metrics->temperature_hotspot =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version));
gpu_metrics->average_gfx_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
gpu_metrics->average_umc_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
+
+ gpu_metrics->mem_max_bandwidth =
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, version));
gpu_metrics->curr_socket_power =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag);
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, version);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency,
+ version)[inst]);
gpu_metrics->current_dclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency,
+ version)[inst]);
}
}
}
- gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
/* Total accumulated cycle counter */
- gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag);
+ gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version);
/* Accumulated throttler residencies */
- gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag);
- gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag);
- gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag);
- gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag);
- gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag);
+ gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, version);
+ gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, version);
+ gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, version);
+ gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, version);
+ gpu_metrics->hbm_thm_residency_acc =
+ GET_METRIC_FIELD(HbmThmResidencyAcc, version);
/* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
- gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0);
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak,
+ version) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
/*Check smu version, PCIE link speed and width will be reported from pmfw metric
* table for both pf & one vf for smu version 85.99.0 or higher else report only
* for pf from registers
*/
- if (smu->smc_fw_version >= 0x556300) {
- gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) {
+ gpu_metrics->pcie_link_width = GET_GPU_METRIC_FIELD(PCIeLinkWidth, version);
gpu_metrics->pcie_link_speed =
- pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
+ pcie_gen_to_speed(GET_GPU_METRIC_FIELD(PCIeLinkSpeed, version));
} else if (!amdgpu_sriov_vf(adev)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
@@ -2418,96 +2955,99 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
}
gpu_metrics->pcie_bandwidth_acc =
- SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidthAcc, version)[0]);
gpu_metrics->pcie_bandwidth_inst =
- SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidth, version)[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
- metrics_x->PCIeL0ToRecoveryCountAcc;
+ GET_GPU_METRIC_FIELD(PCIeL0ToRecoveryCountAcc, version);
gpu_metrics->pcie_replay_count_acc =
- metrics_x->PCIenReplayAAcc;
+ GET_GPU_METRIC_FIELD(PCIenReplayAAcc, version);
gpu_metrics->pcie_replay_rover_count_acc =
- metrics_x->PCIenReplayARolloverCountAcc;
+ GET_GPU_METRIC_FIELD(PCIenReplayARolloverCountAcc, version);
gpu_metrics->pcie_nak_sent_count_acc =
- metrics_x->PCIeNAKSentCountAcc;
+ GET_GPU_METRIC_FIELD(PCIeNAKSentCountAcc, version);
gpu_metrics->pcie_nak_rcvd_count_acc =
- metrics_x->PCIeNAKReceivedCountAcc;
- if (smu_v13_0_6_is_other_end_count_available(smu))
+ GET_GPU_METRIC_FIELD(PCIeNAKReceivedCountAcc, version);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS)))
gpu_metrics->pcie_lc_perf_other_end_recovery =
- metrics_x->PCIeOtherEndRecoveryAcc;
+ GET_GPU_METRIC_FIELD(PCIeOtherEndRecoveryAcc, version);
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, version));
gpu_metrics->mem_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]);
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
+ gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
- }
-
- gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
-
- apu_per_inst = (adev->flags & AMD_IS_APU) && (smu->smc_fw_version >= 0x04556A00);
- smu_13_0_6_per_inst = !(adev->flags & AMD_IS_APU) &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
- == IP_VERSION(13, 0, 6)) &&
- (smu->smc_fw_version >= 0x556F00);
- smu_13_0_14_per_inst = !(adev->flags & AMD_IS_APU) &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
- == IP_VERSION(13, 0, 14)) &&
- (smu->smc_fw_version >= 0x05550B00);
-
- per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst;
-
- for_each_xcp(adev->xcp_mgr, xcp, i) {
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- /* Both JPEG and VCN has same instances */
- inst = GET_INST(VCN, k);
-
- for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- gpu_metrics->xcp_stats[i].jpeg_busy
- [(idx * adev->jpeg.num_jpeg_rings) + j] =
- SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag)
- [(inst * adev->jpeg.num_jpeg_rings) + j]);
- }
- gpu_metrics->xcp_stats[i].vcn_busy[idx] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]);
- idx++;
-
- }
-
- if (per_inst) {
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- inst = GET_INST(GC, k);
- gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
- SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
- gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
- SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
- idx++;
+ gpu_metrics->xgmi_link_status[j] = ret;
+ }
+
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+
+ num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ inst = GET_INST(JPEG, i);
+ for (j = 0; j < num_jpeg_rings; ++j)
+ gpu_metrics->jpeg_busy[(i * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(
+ JpegBusy,
+ version)[(inst * num_jpeg_rings) + j]);
+ }
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ gpu_metrics->vcn_busy[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
+ }
+
+ if (per_inst) {
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ inst = GET_INST(GC, i);
+ gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
+ gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->gfx_below_host_limit_ppt_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitPptAcc
+ [inst]);
+ gpu_metrics->gfx_below_host_limit_thm_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitThmAcc
+ [inst]);
+ gpu_metrics->gfx_low_utilization_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0
+ ->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_total_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitTotalAcc
+ [inst]);
}
}
}
- gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag));
- gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag));
+ gpu_metrics->xgmi_link_width = GET_METRIC_FIELD(XgmiWidth, version);
+ gpu_metrics->xgmi_link_speed = GET_METRIC_FIELD(XgmiBitrate, version);
- gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag);
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
- *table = (void *)gpu_metrics;
- kfree(metrics_x);
+fill:
+ *table = tables[SMU_TABLE_SMU_METRICS].cache.buffer;
return sizeof(*gpu_metrics);
}
@@ -2600,7 +3140,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
return -EINVAL;
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
- if (smu->smc_fw_version < 0x554500)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(CTF_LIMIT)))
return 0;
/* Get SOC Max operating temperature */
@@ -2674,14 +3214,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_4, NULL);
+ return ret;
+}
+
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
return true;
}
-static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
{
- return true;
+ struct amdgpu_device *adev = smu->adev;
+ int var = (adev->pdev->device & 0xF);
+
+ if (var == 0x0 || var == 0x1 || var == 0x3)
+ return true;
+
+ return false;
}
static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
@@ -2702,11 +3257,10 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret;
/* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
- if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(RMA_MSG)))
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
@@ -2718,19 +3272,35 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
return ret;
}
+/**
+ * smu_v13_0_6_reset_sdma_is_supported - Check if SDMA reset is supported
+ * @smu: smu_context pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns false if the capability is not supported.
+ */
+static bool smu_v13_0_6_reset_sdma_is_supported(struct smu_context *smu)
+{
+ bool ret = true;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET))) {
+ dev_info(smu->adev->dev,
+ "SDMA reset capability is not supported\n");
+ ret = false;
+ }
+
+ return ret;
+}
+
static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /* the message is only valid on SMU 13.0.6 with pmfw 85.121.00 and above */
- if ((adev->flags & AMD_IS_APU) ||
- amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6) ||
- smu->smc_fw_version < 0x00557900)
- return 0;
+ if (!smu_v13_0_6_reset_sdma_is_supported(smu))
+ return -EOPNOTSUPP;
ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_ResetSDMA, inst_mask, NULL);
+ SMU_MSG_ResetSDMA, inst_mask, NULL);
if (ret)
dev_err(smu->adev->dev,
"failed to send ResetSDMA event with mask 0x%x\n",
@@ -2739,6 +3309,56 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static bool smu_v13_0_6_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(VCN_RESET));
+}
+
+static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetVCN event with mask 0x%x\n",
+ inst_mask);
+ return ret;
+}
+
+static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg)
+{
+ int ret;
+
+ switch (msg) {
+ case SMU_MSG_QueryValidMcaCount:
+ case SMU_MSG_QueryValidMcaCeCount:
+ case SMU_MSG_McaBankDumpDW:
+ case SMU_MSG_McaBankCeDumpDW:
+ case SMU_MSG_ClearMcaOnRead:
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg);
+ break;
+ default:
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_post_init(struct smu_context *smu)
+{
+ if (smu_v13_0_6_is_link_reset_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
+
+ if (smu_v13_0_6_reset_sdma_is_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
+
+ if (smu_v13_0_6_reset_vcn_is_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
+
+ return 0;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3049,7 +3669,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
if (instlo != 0x03b30400)
return false;
- if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) {
errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
errcode &= 0xff;
} else {
@@ -3335,9 +3955,10 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
int error_code;
- if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600)
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND)))
error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
else
error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
@@ -3354,6 +3975,35 @@ static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
.parse_error_code = aca_smu_parse_error_code,
};
+static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu)
+{
+ smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
+ == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL;
+}
+
+static int smu_v13_0_6_get_ras_smu_drv(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv)
+{
+ if (!ras_smu_drv)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(smu->adev))
+ return -EOPNOTSUPP;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_HROM_EN_BIT))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(RAS_EEPROM));
+
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ *ras_smu_drv = &smu_v13_0_12_ras_smu_drv;
+ break;
+ default:
+ *ras_smu_drv = NULL;
+ break;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -3370,12 +4020,12 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.init_microcode = smu_v13_0_6_init_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_6_init_smc_tables,
- .fini_smc_tables = smu_v13_0_fini_smc_tables,
+ .fini_smc_tables = smu_v13_0_6_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
/* pptable related */
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_v13_0_6_check_fw_version,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
@@ -3385,6 +4035,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_enabled_mask = smu_v13_0_6_get_enabled_mask,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_power_limit = smu_v13_0_6_set_power_limit,
+ .get_ppt_limit = smu_v13_0_6_get_ppt_limit,
.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
.register_irq_handler = smu_v13_0_6_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
@@ -3398,29 +4049,38 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_pm_metrics = smu_v13_0_6_get_pm_metrics,
+ .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
+ .link_reset = smu_v13_0_6_link_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
+ .dpm_reset_vcn = smu_v13_0_6_reset_vcn,
+ .post_init = smu_v13_0_6_post_init,
+ .ras_send_msg = smu_v13_0_6_ras_send_msg,
+ .get_ras_smu_drv = smu_v13_0_6_get_ras_smu_drv,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
- smu->message_map = smu_v13_0_6_message_map;
+ smu->message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
+ smu_v13_0_12_message_map : smu_v13_0_6_message_map;
smu->clock_map = smu_v13_0_6_clk_map;
- smu->feature_map = smu_v13_0_6_feature_mask_map;
+ smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
+ smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
- smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
+ smu->smc_driver_if_version = SMU_IGNORE_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
smu_v13_0_set_smu_mailbox_registers(smu);
+ smu_v13_0_6_set_temp_funcs(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
}
+
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index f0fa42a645c0..6cbdd7c5ded9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -26,7 +26,240 @@
#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
+
+typedef enum {
+/*0*/ METRICS_VERSION_V0 = 0,
+/*1*/ METRICS_VERSION_V1 = 1,
+/*2*/ METRICS_VERSION_V2 = 2,
+
+/*3*/ NUM_METRICS = 3
+} METRICS_LIST_e;
+
+struct PPTable_t {
+ uint32_t MaxSocketPowerLimit;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+ uint64_t PublicSerialNumber_AID;
+ uint32_t MaxNodePowerLimit;
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
+ bool Init;
+};
+
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(VCN_RESET),
+ SMU_CAP(STATIC_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(BOARD_VOLTAGE),
+ SMU_CAP(PLDM_VERSION),
+ SMU_CAP(TEMP_METRICS),
+ SMU_CAP(NPM_METRICS),
+ SMU_CAP(RAS_EEPROM),
+ SMU_CAP(FAST_PPT),
+ SMU_CAP(ALL),
+};
+
+#define SMU_13_0_6_NUM_XGMI_LINKS 8
+#define SMU_13_0_6_MAX_GFX_CLKS 8
+#define SMU_13_0_6_MAX_CLKS 4
+#define SMU_13_0_6_MAX_XCC 8
+#define SMU_13_0_6_MAX_VCN 4
+#define SMU_13_0_6_MAX_JPEG 40
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache);
+
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+int smu_v13_0_12_get_max_metrics_size(void);
+size_t smu_v13_0_12_get_system_metrics_size(void);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member, uint32_t *value);
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
+ struct amdgpu_xcp *xcp, void *table,
+ void *smu_metrics);
+int smu_v13_0_12_tables_init(struct smu_context *smu);
+void smu_v13_0_12_tables_fini(struct smu_context *smu);
+int smu_v13_0_12_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs;
+extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv;
+
+#if defined(SWSMU_CODE_LAYER_L2)
+#include "smu_cmn.h"
+
+/* SMUv 13.0.6 GPU metrics*/
+#define SMU_13_0_6_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hotspot); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_mem); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_vrsoc); \
+ SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \
+ SMU_MTYPE(U16), curr_socket_power); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_gfx_activity); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_umc_activity); \
+ SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U64), mem_max_bandwidth); \
+ SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), energy_accumulator); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \
+ SMU_MTYPE(U64), system_clock_counter); \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), prochot_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), ppt_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), socket_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), vr_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), hbm_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), gfxclk_lock_status); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), pcie_link_width); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \
+ SMU_MTYPE(U16), pcie_link_speed); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_width); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \
+ SMU_MTYPE(U16), xgmi_link_speed); \
+ SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_activity_acc); \
+ SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), mem_activity_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U64), pcie_bandwidth_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U64), pcie_bandwidth_inst); \
+ SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_rover_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), pcie_nak_sent_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), pcie_nak_rcvd_count_acc); \
+ SMU_ARRAY(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_read_data_acc, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_ARRAY(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_write_data_acc, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_status, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \
+ SMU_MTYPE(U64), firmware_timestamp); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_GFX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_uclk); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \
+ SMU_MUNIT(NONE), SMU_MTYPE(U32), \
+ pcie_lc_perf_other_end_recovery); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ jpeg_busy, SMU_13_0_6_MAX_JPEG); \
+ SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ vcn_busy, SMU_13_0_6_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \
+ gfx_busy_acc, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_low_utilization_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
+ SMU_13_0_6_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS);
+void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
+ void *smu_metrics,
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics);
+
+#define SMU_13_0_6_PARTITION_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_uclk); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ jpeg_busy, SMU_13_0_6_MAX_JPEG); \
+ SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ vcn_busy, SMU_13_0_6_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \
+ gfx_busy_acc, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_low_utilization_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
+ SMU_13_0_6_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics,
+ SMU_13_0_6_PARTITION_METRICS_FIELDS);
+
+#endif /* SWSMU_CODE_LAYER_L2 */
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index aabb94796005..a3fc35b9011e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
SkuTable_t *skutable = &driver_ppt->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -807,6 +787,10 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
else
*value = metrics->AverageMemclkFrequencyPreDs;
break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = max(metrics->Vcn0ActivityPercentage,
+ metrics->Vcn1ActivityPercentage);
+ break;
case METRICS_AVERAGE_VCLK:
*value = metrics->AverageVclk0Frequency;
break;
@@ -951,6 +935,12 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v13_0_7_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_7_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -1194,15 +1184,16 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1533,7 +1524,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input)
@@ -2729,6 +2720,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ int link_level;
+ uint32_t smu_pcie_arg;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2758,7 +2832,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_7_print_clk_levels,
.force_clk_levels = smu_v13_0_7_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
@@ -2797,6 +2871,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
@@ -2810,5 +2885,4 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_7_workload_map;
smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
- smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 73b4506ef5a8..5d7e671fa3c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1041,12 +1041,13 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
static int yellow_carp_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
uint32_t clk_limit = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -1111,7 +1112,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
static int yellow_carp_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 9b2f4fe1578b..f9b0938c57ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin");
#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -245,6 +250,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
@@ -769,6 +775,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -948,6 +955,14 @@ static int smu_v14_0_irq_process(struct amdgpu_device *adev,
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
+ /*
+ * ctxid is used to distinguish different
+ * events for SMCToHost interrupt.
+ */
+ uint32_t ctxid = entry->src_data[0];
+ uint32_t data;
+ uint32_t high;
+
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
@@ -962,6 +977,50 @@ static int smu_v14_0_irq_process(struct amdgpu_device *adev,
src_id);
break;
}
+ } else if (client_id == SOC15_IH_CLIENTID_MP1) {
+ if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
+
+ switch (ctxid) {
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
+ high = smu->thermal_range.software_shutdown_temp +
+ smu->thermal_range.software_shutdown_temp_offset;
+ high = min_t(typeof(high),
+ SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ high);
+ dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
+ high,
+ smu->thermal_range.software_shutdown_temp_offset);
+
+ data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
+ data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
+ DIG_THERM_INTH,
+ (high & 0xff));
+ data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
+ break;
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
+ high = min_t(typeof(high),
+ SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ smu->thermal_range.software_shutdown_temp);
+ dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
+
+ data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
+ data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
+ DIG_THERM_INTH,
+ (high & 0xff));
+ data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
+ break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
+ }
+ }
}
return 0;
@@ -1895,16 +1954,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
NULL);
}
-static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
-{
- int ret = 0;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
- ret = smu_v14_0_allow_ih_interrupt(smu);
-
- return ret;
-}
-
int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
@@ -1916,7 +1965,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
if (ret)
return ret;
- return smu_v14_0_process_pending_interrupt(smu);
+ return smu_v14_0_allow_ih_interrupt(smu);
}
int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 84f9b007b59f..b1bd946d8e30 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1132,11 +1132,12 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, ret = 0, size = 0;
+ int i, idx, ret = 0, size = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -1202,16 +1203,18 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ u32 min,
+ u32 max,
+ bool __always_unused automatic)
{
- enum smu_message_type msg_set_min, msg_set_max;
- int ret = 0;
+ enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT;
+ enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT;
+ int ret = -EINVAL;
if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type))
return -EINVAL;
@@ -1240,16 +1243,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
msg_set_min = SMU_MSG_SetHardMinVcn1;
msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
+ case SMU_ISPICLK:
+ msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq;
+ break;
+ case SMU_ISPXCLK:
+ msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq;
+ break;
default:
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
- if (ret)
- return ret;
+ if (min && msg_set_min != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+
+ if (max && msg_set_max != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL);
- return smu_cmn_send_smc_msg_with_param(smu, msg_set_max,
- max, NULL);
+ return ret;
}
static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
@@ -1278,7 +1288,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
if (ret)
break;
- ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
break;
default:
ret = -EINVAL;
@@ -1426,7 +1436,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1438,7 +1449,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ false);
if (ret)
return ret;
}
@@ -1447,7 +1459,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ false);
if (ret)
return ret;
}
@@ -1456,7 +1469,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1465,7 +1479,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK1,
vclk1_min,
- vclk1_max);
+ vclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1474,7 +1489,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
@@ -1483,7 +1499,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK1,
dclk1_min,
- dclk1_max);
+ dclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1533,6 +1550,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_v14_0_0_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile,
+ ISP_ALL_TILES_MASK, NULL);
+}
+
static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -1662,6 +1687,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
@@ -1669,6 +1695,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
+ .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
.set_mall_enable = smu_v14_0_common_set_mall_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5cad09c5f2ff..2cea688c604f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -79,6 +79,7 @@
#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
+#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
@@ -501,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_14_0_dpm_table *dpm_table;
- struct smu_14_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -618,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
-
- if (link_level == 0)
- link_level++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -756,6 +734,10 @@ static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_MEMACTIVITY:
*value = metrics->AverageUclkActivity;
break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = max(metrics->AverageVcn0ActivityPercentage,
+ metrics->Vcn1ActivityPercentage);
+ break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = metrics->AverageSocketPower << 8;
break;
@@ -882,6 +864,12 @@ static int smu_v14_0_2_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v14_0_2_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -1042,6 +1030,10 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
od_max_setting = overdrive_upperlimits->FanMinimumPwm;
break;
+ case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE:
+ od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable;
+ od_max_setting = overdrive_upperlimits->FanZeroRpmEnable;
+ break;
default:
od_min_setting = od_max_setting = INT_MAX;
break;
@@ -1064,15 +1056,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
struct smu_14_0_dpm_table *single_dpm_table;
struct smu_14_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1193,16 +1186,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- PPTable_t *pptable = smu->smu_table.driver_pptable;
- const OverDriveLimits_t * const overdrive_upperlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMax;
- const OverDriveLimits_t * const overdrive_lowerlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMin;
-
size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
- size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
- overdrive_lowerlimits->GfxclkFoffset,
- overdrive_upperlimits->GfxclkFoffset);
+ size += sysfs_emit_at(buf, size, "%dMhz\n",
+ od_table->OverDriveTable.GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1327,6 +1313,24 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
min_value, max_value);
break;
+ case SMU_OD_FAN_ZERO_RPM_ENABLE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_ZERO_FAN_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanZeroRpmEnable);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n",
+ min_value, max_value);
+ break;
+
case SMU_OD_RANGE:
if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
@@ -1337,12 +1341,8 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &min_value,
- NULL);
- smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
- NULL,
+ &min_value,
&max_value);
size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
@@ -1375,7 +1375,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
@@ -1465,10 +1465,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_14_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
- int num_of_levels = pcie_table->num_of_link_levels;
+ int num_of_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+ num_of_levels = pcie_table->num_of_link_levels;
if (!num_of_levels)
return 0;
@@ -1483,30 +1504,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
- pcie_table->pcie_lane[i] = pcie_width_cap;
- }
- }
-
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
- if (ret)
- return ret;
+ if (ret)
+ break;
+ }
+ }
}
- return 0;
+ return ret;
}
static const struct smu_temperature_range smu14_thermal_policy[] = {
@@ -1627,6 +1658,39 @@ out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
}
+static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANPWM,
+ speed);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+ return ret;
+ }
+
+ /* Convert the PMFW output which is in percent to pwm(255) based */
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ if (!speed)
+ return -EINVAL;
+
+ return smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
+}
+
static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -1634,9 +1698,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *min_power_limit)
{
struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_14_0_2_powerplay_table *powerplay_table =
+ table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
- uint32_t power_limit;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
@@ -1649,11 +1715,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit)
- *max_power_limit = msg_limit;
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt;
+ od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
+ } else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
+ }
+ }
+
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- if (min_power_limit)
- *min_power_limit = 0;
+ if (max_power_limit) {
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
+
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 + od_percent_lower);
+ *min_power_limit /= 100;
+ }
return 0;
}
@@ -2004,10 +2088,10 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v14_0_2_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2017,27 +2101,12 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -2238,7 +2307,9 @@ static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu)
OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
- OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET;
}
static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu,
@@ -2317,6 +2388,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
user_od_table_bak.OverDriveTable.FanTargetTemperature;
user_od_table->OverDriveTable.FanMinimumPwm =
user_od_table_bak.OverDriveTable.FanMinimumPwm;
+ user_od_table->OverDriveTable.FanZeroRpmEnable =
+ user_od_table_bak.OverDriveTable.FanZeroRpmEnable;
}
smu_v14_0_2_set_supported_od_feature_mask(smu);
@@ -2364,6 +2437,11 @@ static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long inp
od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+ od_table->OverDriveTable.FanZeroRpmEnable =
+ boot_overdrive_table->OverDriveTable.FanZeroRpmEnable;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
case PP_OD_EDIT_ACOUSTIC_LIMIT:
od_table->OverDriveTable.AcousticLimitRpmThreshold =
boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
@@ -2417,36 +2495,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -ENOTSUPP;
}
- for (i = 0; i < size; i += 2) {
- if (i + 2 > size) {
- dev_info(adev->dev, "invalid number of input parameters %d\n", size);
- return -EINVAL;
- }
-
- switch (input[i]) {
- case 1:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMAX,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
+ if (size != 1) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
- default:
- dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
- dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
- return -EINVAL;
- }
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
+ minimum, maximum);
+ return -EINVAL;
}
+ od_table->OverDriveTable.GfxclkFoffset = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
@@ -2658,6 +2724,27 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
+ dev_warn(adev->dev, "Zero RPM setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanZeroRpmEnable = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
+
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size == 1) {
ret = smu_v14_0_2_od_restore_table_single(smu, input[0]);
@@ -2804,6 +2891,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.set_performance_level = smu_v14_0_set_performance_level,
.gfx_off_control = smu_v14_0_gfx_off_control,
.get_unique_id = smu_v14_0_2_get_unique_id,
+ .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
+ .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
.get_power_limit = smu_v14_0_2_get_power_limit,
.set_power_limit = smu_v14_0_2_set_power_limit,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 9f55207ea9bc..4040ff926544 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
#define SMU_RESP_BUSY_OTHER 0xFC
#define SMU_RESP_DEBUG_END 0xFB
+#define SMU_RESP_UNEXP (~0U)
/**
* __smu_cmn_poll_stat -- poll for a status from the SMU
* @smu: a pointer to SMU context
@@ -163,14 +164,27 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
msg_index, param, message);
break;
case SMU_RESP_BUSY_OTHER:
- dev_err_ratelimited(adev->dev,
- "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
- msg_index, param, message);
+ /* It is normal for SMU_MSG_GetBadPageCount to return busy
+ * so don't print error at this case.
+ */
+ if (msg != SMU_MSG_GetBadPageCount)
+ dev_err_ratelimited(adev->dev,
+ "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
+ msg_index, param, message);
break;
case SMU_RESP_DEBUG_END:
dev_err_ratelimited(adev->dev,
"SMU: I'm debugging!");
break;
+ case SMU_RESP_UNEXP:
+ if (amdgpu_device_bus_status_check(smu->adev)) {
+ /* print error immediately if device is off the bus */
+ dev_err(adev->dev,
+ "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
+ reg_c2pmsg_90, msg_index, param, message);
+ break;
+ }
+ fallthrough;
default:
dev_err_ratelimited(adev->dev,
"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
@@ -246,11 +260,12 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
uint32_t flags, resp;
- bool fed_status;
+ bool fed_status, pri;
flags = __smu_cmn_get_msg_flags(smu, msg);
*poll = true;
+ pri = !!(flags & SMU_MSG_NO_PRECHECK);
/* When there is RAS fatal error, FW won't process non-RAS priority
* messages. Don't allow any messages other than RAS priority messages.
*/
@@ -262,15 +277,18 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
smu_get_message_name(smu, msg));
return -EACCES;
}
+ }
+ if (pri || fed_status) {
/* FW will ignore non-priority messages when a RAS fatal error
- * is detected. Hence it is possible that a previous message
- * wouldn't have got response. Allow to continue without polling
- * for response status for priority messages.
+ * or reset condition is detected. Hence it is possible that a
+ * previous message wouldn't have got response. Allow to
+ * continue without polling for response status for priority
+ * messages.
*/
resp = RREG32(smu->resp_reg);
dev_dbg(adev->dev,
- "Sending RAS priority message %s response status: %x",
+ "Sending priority message %s response status: %x",
smu_get_message_name(smu, msg), resp);
if (resp == 0)
*poll = false;
@@ -459,8 +477,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
}
if (read_arg) {
smu_cmn_read_arg(smu, read_arg);
- dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\
- readval: 0x%08x\n",
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",
smu_get_message_name(smu, msg), index, param, reg, *read_arg);
} else {
dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
@@ -956,7 +973,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+ if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;
@@ -967,7 +984,7 @@ int smu_cmn_update_table(struct smu_context *smu,
* Flush hdp cache: to guard the content seen by
* GPU is consitent with CPU.
*/
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
}
ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
@@ -979,7 +996,7 @@ int smu_cmn_update_table(struct smu_context *smu,
return ret;
if (!drv2smu) {
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table_data, table->cpu_addr, table_size);
}
@@ -1052,70 +1069,6 @@ int smu_cmn_get_combo_pptable(struct smu_context *smu)
false);
}
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
-{
- struct metrics_table_header *header = (struct metrics_table_header *)table;
- uint16_t structure_size;
-
-#define METRICS_VERSION(a, b) ((a << 16) | b)
-
- switch (METRICS_VERSION(frev, crev)) {
- case METRICS_VERSION(1, 0):
- structure_size = sizeof(struct gpu_metrics_v1_0);
- break;
- case METRICS_VERSION(1, 1):
- structure_size = sizeof(struct gpu_metrics_v1_1);
- break;
- case METRICS_VERSION(1, 2):
- structure_size = sizeof(struct gpu_metrics_v1_2);
- break;
- case METRICS_VERSION(1, 3):
- structure_size = sizeof(struct gpu_metrics_v1_3);
- break;
- case METRICS_VERSION(1, 4):
- structure_size = sizeof(struct gpu_metrics_v1_4);
- break;
- case METRICS_VERSION(1, 5):
- structure_size = sizeof(struct gpu_metrics_v1_5);
- break;
- case METRICS_VERSION(1, 6):
- structure_size = sizeof(struct gpu_metrics_v1_6);
- break;
- case METRICS_VERSION(1, 7):
- structure_size = sizeof(struct gpu_metrics_v1_7);
- break;
- case METRICS_VERSION(2, 0):
- structure_size = sizeof(struct gpu_metrics_v2_0);
- break;
- case METRICS_VERSION(2, 1):
- structure_size = sizeof(struct gpu_metrics_v2_1);
- break;
- case METRICS_VERSION(2, 2):
- structure_size = sizeof(struct gpu_metrics_v2_2);
- break;
- case METRICS_VERSION(2, 3):
- structure_size = sizeof(struct gpu_metrics_v2_3);
- break;
- case METRICS_VERSION(2, 4):
- structure_size = sizeof(struct gpu_metrics_v2_4);
- break;
- case METRICS_VERSION(3, 0):
- structure_size = sizeof(struct gpu_metrics_v3_0);
- break;
- default:
- return;
- }
-
-#undef METRICS_VERSION
-
- memset(header, 0xFF, structure_size);
-
- header->format_revision = frev;
- header->content_revision = crev;
- header->structure_size = structure_size;
-
-}
-
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index a020277dec3e..8d7c4814c68f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,6 +40,59 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define SMU_IGNORE_IF_VERSION 0xFFFFFFFF
+
+#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
+ do { \
+ typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
+ struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = frev; \
+ header->content_revision = crev; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_partition_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
@@ -125,8 +178,6 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
int smu_cmn_get_combo_pptable(struct smu_context *smu);
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
-
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
@@ -151,5 +202,72 @@ void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
u32 workload_mask,
u32 *backend_workload_mask);
+/*SMU gpu metrics */
+
+/* Attribute ID mapping */
+#define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X
+/* Type ID mapping */
+#define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X
+/* Unit ID mapping */
+#define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X
+
+/* Map TYPEID to C type */
+#define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID
+
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64
+
+/* struct members */
+#define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \
+ u64 NAME##_ftype; \
+ SMU_CTYPE(TYPEID) NAME
+
+#define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \
+ u64 NAME##_ftype; \
+ SMU_CTYPE(TYPEID) NAME[SIZE]
+
+/* Init functions for scalar/array fields - init to 0xFFs */
+#define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \
+ do { \
+ obj->NAME##_ftype = \
+ AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \
+ obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \
+ count++; \
+ } while (0)
+
+#define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \
+ do { \
+ obj->NAME##_ftype = \
+ AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \
+ memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \
+ count++; \
+ } while (0)
+
+/* Declare Metrics Class and Template object */
+#define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \
+ struct __packed CLASSNAME { \
+ struct metrics_table_header header; \
+ int attr_count; \
+ SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \
+ }; \
+ static inline void CLASSNAME##_init(struct CLASSNAME *obj, \
+ uint8_t frev, uint8_t crev) \
+ { \
+ int count = 0; \
+ memset(obj, 0xFF, sizeof(*obj)); \
+ obj->header.format_revision = frev; \
+ obj->header.content_revision = crev; \
+ obj->header.structure_size = sizeof(*obj); \
+ SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \
+ SMU_METRICS_INIT_ARRAY) \
+ obj->attr_count = count; \
+ }
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index c09ecf1a68a0..34f6b4b1c3ba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -100,6 +100,7 @@
#define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu)
#define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable)
#define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range)
+#define smu_get_ras_smu_drv(smu, ras_smu_drv) smu_ppt_funcs(get_ras_smu_drv, -EOPNOTSUPP, smu, ras_smu_drv)
#endif
#endif
diff --git a/drivers/gpu/drm/amd/ras/Makefile b/drivers/gpu/drm/amd/ras/Makefile
new file mode 100644
index 000000000000..bbdaba811d34
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/Makefile
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+ifeq ($(AMD_GPU_RAS_MGR),)
+ AMD_GPU_RAS_MGR := ras_mgr
+endif
+
+subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/rascore
+subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/$(AMD_GPU_RAS_MGR)
+
+RAS_LIBS = $(AMD_GPU_RAS_MGR) rascore
+
+AMD_RAS = $(addsuffix /Makefile, $(addprefix $(AMD_GPU_RAS_FULL_PATH)/,$(RAS_LIBS)))
+
+include $(AMD_RAS)
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/Makefile b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile
new file mode 100644
index 000000000000..5e5a2cfa4068
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile
@@ -0,0 +1,33 @@
+# Copyright 2025 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+RAS_MGR_FILES = amdgpu_ras_sys.o \
+ amdgpu_ras_mgr.o \
+ amdgpu_ras_eeprom_i2c.o \
+ amdgpu_ras_mp1_v13_0.o \
+ amdgpu_ras_cmd.o \
+ amdgpu_ras_process.o \
+ amdgpu_ras_nbio_v7_9.o
+
+
+RAS_MGR = $(addprefix $(AMD_GPU_RAS_PATH)/ras_mgr/, $(RAS_MGR_FILES))
+
+AMD_GPU_RAS_FILES += $(RAS_MGR)
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
new file mode 100644
index 000000000000..78419b7f7729
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_cmd.h"
+#include "amdgpu_ras_mgr.h"
+
+/* inject address is 52 bits */
+#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+
+#define AMDGPU_RAS_TYPE_RASCORE 0x1
+#define AMDGPU_RAS_TYPE_AMDGPU 0x2
+#define AMDGPU_RAS_TYPE_VF 0x3
+
+static int amdgpu_ras_trigger_error_prepare(struct ras_core_context *ras_core,
+ struct ras_cmd_inject_error_req *block_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+
+ if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) {
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ RAS_DEV_WARN(adev, "Failed to disallow df cstate");
+
+ ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW);
+ if (ret && (ret != -EOPNOTSUPP))
+ RAS_DEV_WARN(adev, "Failed to disallow XGMI power down");
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_trigger_error_end(struct ras_core_context *ras_core,
+ struct ras_cmd_inject_error_req *block_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+
+ if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) {
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT);
+ if (ret && (ret != -EOPNOTSUPP))
+ RAS_DEV_WARN(adev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ RAS_DEV_WARN(adev, "Failed to allow df cstate");
+ }
+
+ return 0;
+}
+
+static uint64_t local_addr_to_xgmi_global_addr(struct ras_core_context *ras_core,
+ uint64_t addr)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+
+ return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
+
+static int amdgpu_ras_inject_error(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_cmd_inject_error_req *req =
+ (struct ras_cmd_inject_error_req *)cmd->input_buff_raw;
+ int ret = RAS_CMD__ERROR_GENERIC;
+
+ if (req->block_id == RAS_BLOCK_ID__UMC) {
+ if (amdgpu_ras_mgr_check_retired_addr(adev, req->address)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ req->address);
+ return RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+ if ((req->address >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (req->address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ RAS_DEV_WARN(adev, "RAS WARN: input address 0x%llx is invalid.",
+ req->address);
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+ }
+
+ /* Calculate XGMI relative offset */
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ req->block_id != RAS_BLOCK_ID__GFX) {
+ req->address = local_addr_to_xgmi_global_addr(ras_core, req->address);
+ }
+ }
+
+ amdgpu_ras_trigger_error_prepare(ras_core, req);
+ ret = rascore_handle_cmd(ras_core, cmd, data);
+ amdgpu_ras_trigger_error_end(ras_core, req);
+ if (ret) {
+ RAS_DEV_ERR(adev, "ras inject block %u failed %d\n", req->block_id, ret);
+ ret = RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+
+ return ret;
+}
+
+static int amdgpu_ras_get_ras_safe_fb_addr_ranges(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_cmd_dev_handle *input_data =
+ (struct ras_cmd_dev_handle *)cmd->input_buff_raw;
+ struct ras_cmd_ras_safe_fb_address_ranges_rsp *ranges =
+ (struct ras_cmd_ras_safe_fb_address_ranges_rsp *)cmd->output_buff_raw;
+ struct amdgpu_mem_partition_info *mem_ranges;
+ uint32_t i = 0;
+
+ if (cmd->input_size != sizeof(*input_data))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ mem_ranges = adev->gmc.mem_partitions;
+ for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
+ ranges->range[i].start = mem_ranges[i].range.fpfn << AMDGPU_GPU_PAGE_SHIFT;
+ ranges->range[i].size = mem_ranges[i].size;
+ ranges->range[i].idx = i;
+ }
+
+ ranges->num_ranges = adev->gmc.num_mem_partitions;
+
+ ranges->version = 0;
+ cmd->output_size = sizeof(struct ras_cmd_ras_safe_fb_address_ranges_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_translate_fb_address(struct ras_core_context *ras_core,
+ enum ras_fb_addr_type src_type,
+ enum ras_fb_addr_type dest_type,
+ union ras_translate_fb_address *src_addr,
+ union ras_translate_fb_address *dest_addr)
+{
+ uint64_t soc_phy_addr;
+ int ret = RAS_CMD__SUCCESS;
+
+ /* Does not need to be queued as event as this is a SW translation */
+ switch (src_type) {
+ case RAS_FB_ADDR_SOC_PHY:
+ soc_phy_addr = src_addr->soc_phy_addr;
+ break;
+ case RAS_FB_ADDR_BANK:
+ ret = ras_cmd_translate_bank_to_soc_pa(ras_core,
+ src_addr->bank_addr, &soc_phy_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+ break;
+ default:
+ return RAS_CMD__ERROR_INVALID_CMD;
+ }
+
+ switch (dest_type) {
+ case RAS_FB_ADDR_SOC_PHY:
+ dest_addr->soc_phy_addr = soc_phy_addr;
+ break;
+ case RAS_FB_ADDR_BANK:
+ ret = ras_cmd_translate_soc_pa_to_bank(ras_core,
+ soc_phy_addr, &dest_addr->bank_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+ break;
+ default:
+ return RAS_CMD__ERROR_INVALID_CMD;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_translate_fb_address(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_translate_fb_address_req *req_buff =
+ (struct ras_cmd_translate_fb_address_req *)cmd->input_buff_raw;
+ struct ras_cmd_translate_fb_address_rsp *rsp_buff =
+ (struct ras_cmd_translate_fb_address_rsp *)cmd->output_buff_raw;
+ int ret = RAS_CMD__ERROR_GENERIC;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_translate_fb_address_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if ((req_buff->src_addr_type >= RAS_FB_ADDR_UNKNOWN) ||
+ (req_buff->dest_addr_type >= RAS_FB_ADDR_UNKNOWN))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ ret = ras_translate_fb_address(ras_core, req_buff->src_addr_type,
+ req_buff->dest_addr_type, &req_buff->trans_addr, &rsp_buff->trans_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ rsp_buff->version = 0;
+ cmd->output_size = sizeof(struct ras_cmd_translate_fb_address_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static struct ras_cmd_func_map amdgpu_ras_cmd_maps[] = {
+ {RAS_CMD__INJECT_ERROR, amdgpu_ras_inject_error},
+ {RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, amdgpu_ras_get_ras_safe_fb_addr_ranges},
+ {RAS_CMD__TRANSLATE_FB_ADDRESS, amdgpu_ras_translate_fb_address},
+};
+
+int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_func_map *ras_cmd = NULL;
+ int i, res;
+
+ for (i = 0; i < ARRAY_SIZE(amdgpu_ras_cmd_maps); i++) {
+ if (cmd->cmd_id == amdgpu_ras_cmd_maps[i].cmd_id) {
+ ras_cmd = &amdgpu_ras_cmd_maps[i];
+ break;
+ }
+ }
+
+ if (ras_cmd)
+ res = ras_cmd->func(ras_core, cmd, NULL);
+ else
+ res = RAS_CMD__ERROR_UKNOWN_CMD;
+
+ return res;
+}
+
+int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd)
+{
+ struct ras_core_context *cmd_core = ras_core;
+ int timeout = 60;
+ int res;
+
+ cmd->cmd_res = RAS_CMD__ERROR_INVALID_CMD;
+ cmd->output_size = 0;
+
+ if (!ras_core_is_enabled(cmd_core))
+ return RAS_CMD__ERROR_ACCESS_DENIED;
+
+ while (ras_core_gpu_in_reset(cmd_core)) {
+ msleep(1000);
+ if (!timeout--)
+ return RAS_CMD__ERROR_TIMEOUT;
+ }
+
+ res = amdgpu_ras_handle_cmd(cmd_core, cmd, NULL);
+ if (res == RAS_CMD__ERROR_UKNOWN_CMD)
+ res = rascore_handle_cmd(cmd_core, cmd, NULL);
+
+ cmd->cmd_res = res;
+
+ if (cmd->output_size > cmd->output_buf_size) {
+ RAS_DEV_ERR(cmd_core->dev,
+ "Output size 0x%x exceeds output buffer size 0x%x!\n",
+ cmd->output_size, cmd->output_buf_size);
+ return RAS_CMD__SUCCESS_EXEED_BUFFER;
+ }
+
+ return RAS_CMD__SUCCESS;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h
new file mode 100644
index 000000000000..5973b156cc85
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_RAS_CMD_H__
+#define __AMDGPU_RAS_CMD_H__
+#include "ras.h"
+
+enum amdgpu_ras_cmd_id {
+ RAS_CMD__AMDGPU_BEGIN = RAS_CMD_ID_AMDGPU_START,
+ RAS_CMD__TRANSLATE_MEMORY_FD,
+ RAS_CMD__AMDGPU_SUPPORTED_MAX = RAS_CMD_ID_AMDGPU_END,
+};
+
+struct ras_cmd_translate_memory_fd_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t type;
+ uint32_t fd;
+ uint64_t address;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_translate_memory_fd_rsp {
+ uint32_t version;
+ uint32_t padding;
+ uint64_t start;
+ uint64_t size;
+ uint32_t reserved[2];
+};
+
+int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data);
+int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd);
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c
new file mode 100644
index 000000000000..3ed3ff42b7e1
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_eeprom_i2c.h"
+#include "ras_eeprom.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3. See top of amdgpu_eeprom.c.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+#define to_amdgpu_ras(x) (container_of(x, struct amdgpu_ras, eeprom_control))
+
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+
+static int ras_eeprom_i2c_config(struct ras_core_context *ras_core)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ u8 i2c_addr;
+
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
+ return 0;
+ }
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return 0;
+ default:
+ return -ENODATA;
+ }
+ return -ENODATA;
+}
+
+static int ras_eeprom_i2c_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ struct i2c_adapter *i2c_adap = ras_core->ras_eeprom.i2c_adapter;
+ u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE];
+ struct i2c_msg msgs[] = {
+ {
+ .flags = 0,
+ .len = EEPROM_OFFSET_SIZE,
+ .buf = eeprom_offset_buf,
+ },
+ {
+ .flags = read ? I2C_M_RD : 0,
+ },
+ };
+ const u8 *p = eeprom_buf;
+ int r;
+ u16 len;
+
+ for (r = 0; buf_size > 0;
+ buf_size -= len, eeprom_addr += len, eeprom_buf += len) {
+ /* Set the EEPROM address we want to write to/read from.
+ */
+ msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr);
+ msgs[1].addr = msgs[0].addr;
+ msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff;
+ msgs[0].buf[1] = eeprom_addr & 0xff;
+
+ if (!read) {
+ /* Write the maximum amount of data, without
+ * crossing the device's page boundary, as per
+ * its spec. Partial page writes are allowed,
+ * starting at any location within the page,
+ * so long as the page boundary isn't crossed
+ * over (actually the page pointer rolls
+ * over).
+ *
+ * As per the AT24CM02 EEPROM spec, after
+ * writing into a page, the I2C driver should
+ * terminate the transfer, i.e. in
+ * "i2c_transfer()" below, with a STOP
+ * condition, so that the self-timed write
+ * cycle begins. This is implied for the
+ * "i2c_transfer()" abstraction.
+ */
+ len = min(EEPROM_PAGE_SIZE - (eeprom_addr & EEPROM_PAGE_MASK),
+ buf_size);
+ } else {
+ /* Reading from the EEPROM has no limitation
+ * on the number of bytes read from the EEPROM
+ * device--they are simply sequenced out.
+ * Keep in mind that i2c_msg.len is u16 type.
+ */
+ len = min(U16_MAX, buf_size);
+ }
+ msgs[1].len = len;
+ msgs[1].buf = eeprom_buf;
+
+
+ /* This constitutes a START-STOP transaction.
+ */
+ r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs));
+ if (r != ARRAY_SIZE(msgs))
+ break;
+
+ if (!read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+ }
+
+ return r < 0 ? r : eeprom_buf - p;
+}
+
+const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func = {
+ .eeprom_i2c_xfer = ras_eeprom_i2c_xfer,
+ .update_eeprom_i2c_config = ras_eeprom_i2c_config,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h
new file mode 100644
index 000000000000..3b5878605411
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_EEPROM_I2C_H__
+#define __AMDGPU_RAS_EEPROM_I2C_H__
+#include "ras.h"
+
+extern const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func;
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
new file mode 100644
index 000000000000..afe8135b6258
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_cmd.h"
+#include "amdgpu_ras_process.h"
+#include "amdgpu_ras_eeprom_i2c.h"
+#include "amdgpu_ras_mp1_v13_0.h"
+#include "amdgpu_ras_nbio_v7_9.h"
+
+#define MAX_SOCKET_NUM_PER_HIVE 8
+#define MAX_AID_NUM_PER_SOCKET 4
+#define MAX_XCD_NUM_PER_AID 2
+
+/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
+#define TYPICAL_ECC_BAD_PAGE_RATE (100ULL * SZ_1M)
+
+#define COUNT_BAD_PAGE_THRESHOLD(size) (((size) >> 21) << 4)
+
+/* Reserve 8 physical dram row for possible retirement.
+ * In worst cases, it will lose 8 * 2MB memory in vram domain
+ */
+#define RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20)
+
+
+static void ras_mgr_init_event_mgr(struct ras_event_manager *mgr)
+{
+ struct ras_event_state *event_state;
+ int i;
+
+ memset(mgr, 0, sizeof(*mgr));
+ atomic64_set(&mgr->seqno, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
+ event_state = &mgr->event_state[i];
+ event_state->last_seqno = RAS_EVENT_INVALID_ID;
+ atomic64_set(&event_state->count, 0);
+ }
+}
+
+static void amdgpu_ras_mgr_init_event_mgr(struct ras_core_context *ras_core)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_event_manager *event_mgr;
+ struct amdgpu_hive_info *hive;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr;
+
+ /* init event manager with node 0 on xgmi system */
+ if (!amdgpu_reset_in_recovery(adev)) {
+ if (!hive || adev->gmc.xgmi.node_id == 0)
+ ras_mgr_init_event_mgr(event_mgr);
+ }
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+}
+
+static int amdgpu_ras_mgr_init_aca_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_aca_config *aca_cfg = &config->aca_cfg;
+
+ aca_cfg->socket_num_per_hive = MAX_SOCKET_NUM_PER_HIVE;
+ aca_cfg->aid_num_per_socket = MAX_AID_NUM_PER_SOCKET;
+ aca_cfg->xcd_num_per_aid = MAX_XCD_NUM_PER_AID;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_eeprom_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_eeprom_config *eeprom_cfg = &config->eeprom_cfg;
+
+ eeprom_cfg->eeprom_sys_fn = &amdgpu_ras_eeprom_i2c_sys_func;
+ eeprom_cfg->eeprom_i2c_adapter = adev->pm.ras_eeprom_i2c_bus;
+ if (eeprom_cfg->eeprom_i2c_adapter) {
+ const struct i2c_adapter_quirks *quirks =
+ ((struct i2c_adapter *)eeprom_cfg->eeprom_i2c_adapter)->quirks;
+
+ if (quirks) {
+ eeprom_cfg->max_i2c_read_len = quirks->max_read_len;
+ eeprom_cfg->max_i2c_write_len = quirks->max_write_len;
+ }
+ }
+
+ /*
+ * amdgpu_bad_page_threshold is used to config
+ * the threshold for the number of bad pages.
+ * -1: Threshold is set to default value
+ * Driver will issue a warning message when threshold is reached
+ * and continue runtime services.
+ * 0: Disable bad page retirement
+ * Driver will not retire bad pages
+ * which is intended for debugging purpose.
+ * -2: Threshold is determined by a formula
+ * that assumes 1 bad page per 100M of local memory.
+ * Driver will continue runtime services when threhold is reached.
+ * 0 < threshold < max number of bad page records in EEPROM,
+ * A user-defined threshold is set
+ * Driver will halt runtime services when this custom threshold is reached.
+ */
+ if (amdgpu_bad_page_threshold == NONSTOP_OVER_THRESHOLD)
+ eeprom_cfg->eeprom_record_threshold_count =
+ div64_u64(adev->gmc.mc_vram_size, TYPICAL_ECC_BAD_PAGE_RATE);
+ else if (amdgpu_bad_page_threshold == WARN_NONSTOP_OVER_THRESHOLD)
+ eeprom_cfg->eeprom_record_threshold_count =
+ COUNT_BAD_PAGE_THRESHOLD(RAS_RESERVED_VRAM_SIZE_DEFAULT);
+ else
+ eeprom_cfg->eeprom_record_threshold_count = amdgpu_bad_page_threshold;
+
+ eeprom_cfg->eeprom_record_threshold_config = amdgpu_bad_page_threshold;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_mp1_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_mp1_config *mp1_cfg = &config->mp1_cfg;
+ int ret = 0;
+
+ switch (config->mp1_ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ mp1_cfg->mp1_sys_fn = &amdgpu_ras_mp1_sys_func_v13_0;
+ break;
+ default:
+ RAS_DEV_ERR(adev,
+ "The mp1(0x%x) ras config is not right!\n",
+ config->mp1_ip_version);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_mgr_init_nbio_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_nbio_config *nbio_cfg = &config->nbio_cfg;
+ int ret = 0;
+
+ switch (config->nbio_ip_version) {
+ case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
+ nbio_cfg->nbio_sys_fn = &amdgpu_ras_nbio_sys_func_v7_9;
+ break;
+ default:
+ RAS_DEV_ERR(adev,
+ "The nbio(0x%x) ras config is not right!\n",
+ config->nbio_ip_version);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_mgr_get_ras_psp_system_status(struct ras_core_context *ras_core,
+ struct ras_psp_sys_status *status)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ta_context *context = &adev->psp.ras_context.context;
+
+ status->initialized = context->initialized;
+ status->session_id = context->session_id;
+ status->psp_cmd_mutex = &adev->psp.mutex;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_get_ras_ta_init_param(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t nps_mode;
+
+ if (amdgpu_ras_is_poison_mode_supported(adev))
+ ras_ta_param->poison_mode_en = 1;
+
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
+ ras_ta_param->dgpu_mode = 1;
+
+ ras_ta_param->xcc_mask = adev->gfx.xcc_mask;
+ ras_ta_param->channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
+
+ ras_ta_param->active_umc_mask = adev->umc.active_mask;
+
+ if (!amdgpu_ras_mgr_get_curr_nps_mode(adev, &nps_mode))
+ ras_ta_param->nps_mode = nps_mode;
+
+ return 0;
+}
+
+const struct ras_psp_sys_func amdgpu_ras_psp_sys_func = {
+ .get_ras_psp_system_status = amdgpu_ras_mgr_get_ras_psp_system_status,
+ .get_ras_ta_init_param = amdgpu_ras_mgr_get_ras_ta_init_param,
+};
+
+static int amdgpu_ras_mgr_init_psp_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_psp_config *psp_cfg = &config->psp_cfg;
+
+ psp_cfg->psp_sys_fn = &amdgpu_ras_psp_sys_func;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_umc_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_umc_config *umc_cfg = &config->umc_cfg;
+
+ umc_cfg->umc_vram_type = adev->gmc.vram_type;
+
+ return 0;
+}
+
+static struct ras_core_context *amdgpu_ras_mgr_create_ras_core(struct amdgpu_device *adev)
+{
+ struct ras_core_config init_config;
+
+ memset(&init_config, 0, sizeof(init_config));
+
+ init_config.umc_ip_version = amdgpu_ip_version(adev, UMC_HWIP, 0);
+ init_config.mp1_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ init_config.gfx_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
+ init_config.nbio_ip_version = amdgpu_ip_version(adev, NBIO_HWIP, 0);
+ init_config.psp_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+
+ if (init_config.umc_ip_version == IP_VERSION(12, 0, 0) ||
+ init_config.umc_ip_version == IP_VERSION(12, 5, 0))
+ init_config.aca_ip_version = IP_VERSION(1, 0, 0);
+
+ init_config.sys_fn = &amdgpu_ras_sys_fn;
+ init_config.ras_eeprom_supported = true;
+ init_config.poison_supported =
+ amdgpu_ras_is_poison_mode_supported(adev);
+
+ amdgpu_ras_mgr_init_aca_config(adev, &init_config);
+ amdgpu_ras_mgr_init_eeprom_config(adev, &init_config);
+ amdgpu_ras_mgr_init_mp1_config(adev, &init_config);
+ amdgpu_ras_mgr_init_nbio_config(adev, &init_config);
+ amdgpu_ras_mgr_init_psp_config(adev, &init_config);
+ amdgpu_ras_mgr_init_umc_config(adev, &init_config);
+
+ return ras_core_create(&init_config);
+}
+
+static int amdgpu_ras_mgr_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr;
+ int ret = 0;
+
+ /* Disabled by default */
+ con->uniras_enabled = false;
+
+ /* Enabled only in debug mode */
+ if (adev->debug_enable_ras_aca) {
+ con->uniras_enabled = true;
+ RAS_DEV_INFO(adev, "Debug amdgpu uniras!");
+ }
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ ras_mgr = kzalloc(sizeof(*ras_mgr), GFP_KERNEL);
+ if (!ras_mgr)
+ return -EINVAL;
+
+ con->ras_mgr = ras_mgr;
+ ras_mgr->adev = adev;
+
+ ras_mgr->ras_core = amdgpu_ras_mgr_create_ras_core(adev);
+ if (!ras_mgr->ras_core) {
+ RAS_DEV_ERR(adev, "Failed to create ras core!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ras_mgr->ras_core->dev = adev;
+
+ amdgpu_ras_process_init(adev);
+ ras_core_sw_init(ras_mgr->ras_core);
+ amdgpu_ras_mgr_init_event_mgr(ras_mgr->ras_core);
+ return 0;
+
+err:
+ kfree(ras_mgr);
+ return ret;
+}
+
+static int amdgpu_ras_mgr_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = (struct amdgpu_ras_mgr *)con->ras_mgr;
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr)
+ return 0;
+
+ amdgpu_ras_process_fini(adev);
+ ras_core_sw_fini(ras_mgr->ras_core);
+ ras_core_destroy(ras_mgr->ras_core);
+ ras_mgr->ras_core = NULL;
+
+ kfree(con->ras_mgr);
+ con->ras_mgr = NULL;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ int ret;
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ ret = ras_core_hw_init(ras_mgr->ras_core);
+ if (ret) {
+ RAS_DEV_ERR(adev, "Failed to initialize ras core!\n");
+ return ret;
+ }
+
+ ras_mgr->ras_is_ready = true;
+
+ amdgpu_enable_uniras(adev, true);
+
+ RAS_DEV_INFO(adev, "AMDGPU RAS Is Ready.\n");
+ return 0;
+}
+
+static int amdgpu_ras_mgr_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ ras_core_hw_fini(ras_mgr->ras_core);
+
+ ras_mgr->ras_is_ready = false;
+
+ return 0;
+}
+
+struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(struct amdgpu_device *adev)
+{
+ if (!adev || !adev->psp.ras_context.ras)
+ return NULL;
+
+ return (struct amdgpu_ras_mgr *)adev->psp.ras_context.ras->ras_mgr;
+}
+
+static const struct amd_ip_funcs __maybe_unused ras_v1_0_ip_funcs = {
+ .name = "ras_v1_0",
+ .sw_init = amdgpu_ras_mgr_sw_init,
+ .sw_fini = amdgpu_ras_mgr_sw_fini,
+ .hw_init = amdgpu_ras_mgr_hw_init,
+ .hw_fini = amdgpu_ras_mgr_hw_fini,
+};
+
+const struct amdgpu_ip_block_version ras_v1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_RAS,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ras_v1_0_ip_funcs,
+};
+
+int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EPERM;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EPERM;
+
+ RAS_DEV_INFO(adev, "Enable amdgpu unified ras!");
+ return ras_core_set_status(ras_mgr->ras_core, enable);
+}
+
+bool amdgpu_uniras_enabled(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return ras_core_is_enabled(ras_mgr->ras_core);
+}
+
+static bool amdgpu_ras_mgr_is_ready(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (ras_mgr && ras_mgr->ras_core && ras_mgr->ras_is_ready &&
+ ras_core_is_ready(ras_mgr->ras_core))
+ return true;
+
+ return false;
+}
+
+int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return ras_core_handle_nbio_irq(ras_mgr->ras_core, data);
+}
+
+uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev,
+ enum ras_seqno_type seqno_type)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ int ret;
+ uint64_t seq_no;
+
+ if (!amdgpu_ras_mgr_is_ready(adev) ||
+ (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX))
+ return 0;
+
+ seq_no = ras_core_gen_seqno(ras_mgr->ras_core, seqno_type);
+
+ if ((seqno_type == RAS_SEQNO_TYPE_DE) ||
+ (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)) {
+ ret = ras_core_put_seqno(ras_mgr->ras_core, seqno_type, seq_no);
+ if (ret)
+ RAS_DEV_WARN(adev, "There are too many ras interrupts!");
+ }
+
+ return seq_no;
+}
+
+int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_ih_info *ih_info = (struct ras_ih_info *)data;
+ uint64_t seq_no = 0;
+ int ret = 0;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ if (ih_info && (ih_info->block == AMDGPU_RAS_BLOCK__UMC)) {
+ if (ras_mgr->ras_core->poison_supported) {
+ seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_DE);
+ RAS_DEV_INFO(adev,
+ "{%llu} RAS poison is created, no user action is needed.\n",
+ seq_no);
+ }
+
+ ret = amdgpu_ras_process_handle_umc_interrupt(adev, ih_info);
+ } else if (ras_mgr->ras_core->poison_supported) {
+ ret = amdgpu_ras_process_handle_unexpected_interrupt(adev, ih_info);
+ } else {
+ RAS_DEV_WARN(adev,
+ "No RAS interrupt handler for non-UMC block with poison disabled.\n");
+ }
+
+ return ret;
+}
+
+int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return amdgpu_ras_process_handle_consumption_interrupt(adev, data);
+}
+
+int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return ras_core_update_ecc_info(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ con->gpu_reset_flags |= flags;
+ return amdgpu_ras_reset_gpu(adev);
+}
+
+bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return false;
+
+ return ras_eeprom_check_safety_watermark(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev,
+ uint32_t *nps_mode)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ uint32_t mode;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EINVAL;
+
+ mode = ras_core_get_curr_nps_mode(ras_mgr->ras_core);
+ if (!mode || mode > AMDGPU_NPS8_PARTITION_MODE)
+ return -EINVAL;
+
+ *nps_mode = mode;
+
+ return 0;
+}
+
+bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return false;
+
+ return ras_umc_check_retired_addr(ras_mgr->ras_core, addr);
+}
+
+bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core || !ras_mgr->ras_is_ready)
+ return false;
+
+ return ras_core_gpu_is_rma(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev,
+ uint32_t cmd_id, void *input, uint32_t input_size,
+ void *output, uint32_t out_size)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_cmd_ctx *cmd_ctx;
+ uint32_t ctx_buf_size = PAGE_SIZE;
+ int ret;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ cmd_ctx = kzalloc(ctx_buf_size, GFP_KERNEL);
+ if (!cmd_ctx)
+ return -ENOMEM;
+
+ cmd_ctx->cmd_id = cmd_id;
+
+ memcpy(cmd_ctx->input_buff_raw, input, input_size);
+ cmd_ctx->input_size = input_size;
+ cmd_ctx->output_buf_size = ctx_buf_size - sizeof(*cmd_ctx);
+
+ ret = amdgpu_ras_submit_cmd(ras_mgr->ras_core, cmd_ctx);
+ if (!ret && !cmd_ctx->cmd_res && output && (out_size == cmd_ctx->output_size))
+ memcpy(output, cmd_ctx->output_buff_raw, cmd_ctx->output_size);
+
+ kfree(cmd_ctx);
+
+ return ret;
+}
+
+int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev)) {
+ RAS_DEV_ERR(adev, "Invalid ras suspend!\n");
+ return -EPERM;
+ }
+
+ amdgpu_ras_process_pre_reset(adev);
+ return 0;
+}
+
+int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev)) {
+ RAS_DEV_ERR(adev, "Invalid ras resume!\n");
+ return -EPERM;
+ }
+
+ amdgpu_ras_process_post_reset(adev);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h
new file mode 100644
index 000000000000..8fb7eb4b8f13
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_MGR_H__
+#define __AMDGPU_RAS_MGR_H__
+#include "ras.h"
+#include "amdgpu_ras_process.h"
+
+enum ras_ih_type {
+ RAS_IH_NONE,
+ RAS_IH_FROM_BLOCK_CONTROLLER,
+ RAS_IH_FROM_CONSUMER_CLIENT,
+ RAS_IH_FROM_FATAL_ERROR,
+};
+
+struct ras_ih_info {
+ uint32_t block;
+ union {
+ struct amdgpu_iv_entry iv_entry;
+ struct {
+ uint16_t pasid;
+ uint32_t reset;
+ pasid_notify pasid_fn;
+ void *data;
+ };
+ };
+};
+
+struct amdgpu_ras_mgr {
+ struct amdgpu_device *adev;
+ struct ras_core_context *ras_core;
+ struct delayed_work retire_page_dwork;
+ struct ras_event_manager ras_event_mgr;
+ uint64_t last_poison_consumption_seqno;
+ bool ras_is_ready;
+
+ bool is_paused;
+ struct completion ras_event_done;
+};
+
+extern const struct amdgpu_ip_block_version ras_v1_0_ip_block;
+
+struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(
+ struct amdgpu_device *adev);
+int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable);
+bool amdgpu_uniras_enabled(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags);
+uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev,
+ enum ras_seqno_type seqno_type);
+bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, uint32_t *nps_mode);
+bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev,
+ uint32_t cmd_id, void *input, uint32_t input_size,
+ void *output, uint32_t out_size);
+int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c
new file mode 100644
index 000000000000..79a51b1603ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_smu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_ras_mp1_v13_0.h"
+
+#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A
+#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B
+
+static int mp1_v13_0_get_valid_bank_count(struct ras_core_context *ras_core,
+ u32 msg, u32 *count)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ u32 smu_msg;
+ int ret = 0;
+
+ if (!count)
+ return -EINVAL;
+
+ smu_msg = (msg == RAS_MP1_MSG_QueryValidMcaCeCount) ?
+ SMU_MSG_QueryValidMcaCeCount : SMU_MSG_QueryValidMcaCount;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ ret = amdgpu_smu_ras_send_msg(adev, smu_msg, 0, count);
+ up_read(&adev->reset_domain->sem);
+ } else {
+ ret = -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ if (ret)
+ *count = 0;
+
+ return ret;
+}
+
+static int mp1_v13_0_dump_valid_bank(struct ras_core_context *ras_core,
+ u32 msg, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t data[2] = {0, 0};
+ uint32_t param;
+ int ret = 0;
+ int i, offset;
+ u32 smu_msg = (msg == RAS_MP1_MSG_McaBankCeDumpDW) ?
+ SMU_MSG_McaBankCeDumpDW : SMU_MSG_McaBankDumpDW;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ offset = reg_idx * 8;
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ param = ((idx & 0xffff) << 16) | ((offset + (i << 2)) & 0xfffc);
+ ret = amdgpu_smu_ras_send_msg(adev, smu_msg, param, &data[i]);
+ if (ret) {
+ RAS_DEV_ERR(adev, "ACA failed to read register[%d], offset:0x%x\n",
+ reg_idx, offset);
+ break;
+ }
+ }
+ up_read(&adev->reset_domain->sem);
+
+ if (!ret)
+ *val = (uint64_t)data[1] << 32 | data[0];
+ } else {
+ ret = -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ return ret;
+}
+
+const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0 = {
+ .mp1_get_valid_bank_count = mp1_v13_0_get_valid_bank_count,
+ .mp1_dump_valid_bank = mp1_v13_0_dump_valid_bank,
+};
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h
new file mode 100644
index 000000000000..71c614ae1ae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_RAS_MP1_V13_0_H__
+#define __AMDGPU_RAS_MP1_V13_0_H__
+#include "ras.h"
+
+extern const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c
new file mode 100644
index 000000000000..2783f5875c7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_nbio_v7_9.h"
+#include "nbio/nbio_7_9_0_offset.h"
+#include "nbio/nbio_7_9_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+
+static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = {
+ .set = nbio_v7_9_set_ras_controller_irq_state,
+ .process = nbio_v7_9_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_9_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_9_process_err_event_athub_irq,
+};
+
+static int nbio_v7_9_init_ras_controller_interrupt(struct ras_core_context *ras_core, bool state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_9_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+
+ return r;
+}
+
+static int nbio_v7_9_init_ras_err_event_athub_interrupt(struct ras_core_context *ras_core,
+ bool state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_9_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9 = {
+ .set_ras_controller_irq_state = nbio_v7_9_init_ras_controller_interrupt,
+ .set_ras_err_event_athub_irq_state = nbio_v7_9_init_ras_err_event_athub_interrupt,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h
new file mode 100644
index 000000000000..272259e9a0e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RAS_NBIO_V7_9_H__
+#define __AMDGPU_RAS_NBIO_V7_9_H__
+
+extern const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
new file mode 100644
index 000000000000..5782c007de71
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_process.h"
+
+#define RAS_MGR_RETIRE_PAGE_INTERVAL 100
+#define RAS_EVENT_PROCESS_TIMEOUT 1200
+
+static void ras_process_retire_page_dwork(struct work_struct *work)
+{
+ struct amdgpu_ras_mgr *ras_mgr =
+ container_of(work, struct amdgpu_ras_mgr, retire_page_dwork.work);
+ struct amdgpu_device *adev = ras_mgr->adev;
+ int ret;
+
+ if (amdgpu_ras_is_rma(adev))
+ return;
+
+ /* If gpu reset is ongoing, delay retiring the bad pages */
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
+ schedule_delayed_work(&ras_mgr->retire_page_dwork,
+ msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL * 3));
+ return;
+ }
+
+ ret = ras_umc_handle_bad_pages(ras_mgr->ras_core, NULL);
+ if (!ret)
+ schedule_delayed_work(&ras_mgr->retire_page_dwork,
+ msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL));
+}
+
+int amdgpu_ras_process_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ ras_mgr->is_paused = false;
+ init_completion(&ras_mgr->ras_event_done);
+
+ INIT_DELAYED_WORK(&ras_mgr->retire_page_dwork, ras_process_retire_page_dwork);
+
+ return 0;
+}
+
+int amdgpu_ras_process_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ ras_mgr->is_paused = false;
+ /* Save all cached bad pages to eeprom */
+ flush_delayed_work(&ras_mgr->retire_page_dwork);
+ cancel_delayed_work_sync(&ras_mgr->retire_page_dwork);
+ return 0;
+}
+
+int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr->ras_core)
+ return -EINVAL;
+
+ return ras_process_add_interrupt_req(ras_mgr->ras_core, NULL, true);
+}
+
+int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, void *data)
+{
+ amdgpu_ras_set_fed(adev, true);
+ return amdgpu_ras_mgr_reset_gpu(adev, AMDGPU_RAS_GPU_RESET_MODE1_RESET);
+}
+
+int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_ih_info *ih_info = (struct ras_ih_info *)data;
+ struct ras_event_req req;
+ uint64_t seqno;
+
+ if (!ih_info)
+ return -EINVAL;
+
+ memset(&req, 0, sizeof(req));
+ req.block = ih_info->block;
+ req.data = ih_info->data;
+ req.pasid = ih_info->pasid;
+ req.pasid_fn = ih_info->pasid_fn;
+ req.reset = ih_info->reset;
+
+ seqno = ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, false);
+
+ /* When the ACA register cannot be read from FW, the poison
+ * consumption seqno in the fifo will not pop up, so it is
+ * necessary to check whether the seqno is the previous seqno.
+ */
+ if (seqno == ras_mgr->last_poison_consumption_seqno) {
+ /* Pop and discard the previous seqno */
+ ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, true);
+ seqno = ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, false);
+ }
+ ras_mgr->last_poison_consumption_seqno = seqno;
+ req.seqno = seqno;
+
+ return ras_process_add_interrupt_req(ras_mgr->ras_core, &req, false);
+}
+
+int amdgpu_ras_process_begin(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (ras_mgr->is_paused)
+ return -EAGAIN;
+
+ reinit_completion(&ras_mgr->ras_event_done);
+ return 0;
+}
+
+int amdgpu_ras_process_end(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ complete(&ras_mgr->ras_event_done);
+ return 0;
+}
+
+int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ long rc;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ if (!ras_mgr->ras_core->is_initialized)
+ return -EPERM;
+
+ ras_mgr->is_paused = true;
+
+ /* Wait for RAS event processing to complete */
+ rc = wait_for_completion_interruptible_timeout(&ras_mgr->ras_event_done,
+ msecs_to_jiffies(RAS_EVENT_PROCESS_TIMEOUT));
+ if (rc <= 0)
+ RAS_DEV_WARN(adev, "Waiting for ras process to complete %s\n",
+ rc ? "interrupted" : "timeout");
+
+ flush_delayed_work(&ras_mgr->retire_page_dwork);
+ return 0;
+}
+
+int amdgpu_ras_process_post_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ if (!ras_mgr->ras_core->is_initialized)
+ return -EPERM;
+
+ ras_mgr->is_paused = false;
+
+ schedule_delayed_work(&ras_mgr->retire_page_dwork, 0);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h
new file mode 100644
index 000000000000..d55cdaeac441
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_PROCESS_H__
+#define __AMDGPU_RAS_PROCESS_H__
+#include "ras_process.h"
+#include "amdgpu_ras_mgr.h"
+
+enum ras_ih_type;
+int amdgpu_ras_process_init(struct amdgpu_device *adev);
+int amdgpu_ras_process_fini(struct amdgpu_device *adev);
+int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_begin(struct amdgpu_device *adev);
+int amdgpu_ras_process_end(struct amdgpu_device *adev);
+int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev);
+int amdgpu_ras_process_post_reset(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c
new file mode 100644
index 000000000000..45ed8c3b5563
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+
+static int amdgpu_ras_sys_detect_fatal_event(struct ras_core_context *ras_core, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+ uint64_t seq_no;
+
+ ret = amdgpu_ras_global_ras_isr(adev);
+ if (ret)
+ return ret;
+
+ seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_UE);
+ RAS_DEV_INFO(adev,
+ "{%llu} Uncorrectable hardware error(ERREVENT_ATHUB_INTERRUPT) detected!\n",
+ seq_no);
+
+ return amdgpu_ras_process_handle_unexpected_interrupt(adev, data);
+}
+
+static int amdgpu_ras_sys_poison_consumption_event(struct ras_core_context *ras_core,
+ void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_event_req *req = (struct ras_event_req *)data;
+ pasid_notify pasid_fn;
+
+ if (!req)
+ return -EINVAL;
+
+ if (req->pasid_fn) {
+ pasid_fn = (pasid_notify)req->pasid_fn;
+ pasid_fn(adev, req->pasid, req->data);
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t *seqno)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_event_manager *event_mgr;
+ struct ras_event_state *event_state;
+ struct amdgpu_hive_info *hive;
+ enum ras_event_type event_type;
+ uint64_t seq_no;
+
+ if (!ras_mgr || !seqno ||
+ (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX))
+ return -EINVAL;
+
+ switch (seqno_type) {
+ case RAS_SEQNO_TYPE_UE:
+ event_type = RAS_EVENT_TYPE_FATAL;
+ break;
+ case RAS_SEQNO_TYPE_CE:
+ case RAS_SEQNO_TYPE_DE:
+ event_type = RAS_EVENT_TYPE_POISON_CREATION;
+ break;
+ case RAS_SEQNO_TYPE_POISON_CONSUMPTION:
+ event_type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
+ break;
+ default:
+ event_type = RAS_EVENT_TYPE_INVALID;
+ break;
+ }
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr;
+ event_state = &event_mgr->event_state[event_type];
+ if ((event_type == RAS_EVENT_TYPE_FATAL) && amdgpu_ras_in_recovery(adev)) {
+ seq_no = event_state->last_seqno;
+ } else {
+ seq_no = atomic64_inc_return(&event_mgr->seqno);
+ event_state->last_seqno = seq_no;
+ atomic64_inc(&event_state->count);
+ }
+ amdgpu_put_xgmi_hive(hive);
+
+ *seqno = seq_no;
+ return 0;
+
+}
+
+static int amdgpu_ras_sys_event_notifier(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(ras_core->dev);
+ int ret = 0;
+
+ switch (event_id) {
+ case RAS_EVENT_ID__BAD_PAGE_DETECTED:
+ schedule_delayed_work(&ras_mgr->retire_page_dwork, 0);
+ break;
+ case RAS_EVENT_ID__POISON_CONSUMPTION:
+ amdgpu_ras_sys_poison_consumption_event(ras_core, data);
+ break;
+ case RAS_EVENT_ID__RESERVE_BAD_PAGE:
+ ret = amdgpu_ras_reserve_page(ras_core->dev, *(uint64_t *)data);
+ break;
+ case RAS_EVENT_ID__FATAL_ERROR_DETECTED:
+ ret = amdgpu_ras_sys_detect_fatal_event(ras_core, data);
+ break;
+ case RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM:
+ ret = amdgpu_dpm_send_hbm_bad_pages_num(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP:
+ ret = amdgpu_dpm_send_hbm_bad_channel_flag(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__DEVICE_RMA:
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_RMA, NULL, NULL);
+ ret = amdgpu_dpm_send_rma_reason(ras_core->dev);
+ break;
+ case RAS_EVENT_ID__RESET_GPU:
+ ret = amdgpu_ras_mgr_reset_gpu(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN:
+ ret = amdgpu_ras_process_begin(ras_core->dev);
+ break;
+ case RAS_EVENT_ID__RAS_EVENT_PROC_END:
+ ret = amdgpu_ras_process_end(ras_core->dev);
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev, "Invalid ras notify event:%d\n", event_id);
+ break;
+ }
+
+ return ret;
+}
+
+static u64 amdgpu_ras_sys_get_utc_second_timestamp(struct ras_core_context *ras_core)
+{
+ return ktime_get_real_seconds();
+}
+
+static int amdgpu_ras_sys_check_gpu_status(struct ras_core_context *ras_core,
+ uint32_t *status)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t gpu_status = 0;
+
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev))
+ gpu_status |= RAS_GPU_STATUS__IN_RESET;
+
+ if (amdgpu_sriov_vf(adev))
+ gpu_status |= RAS_GPU_STATUS__IS_VF;
+
+ *status = gpu_status;
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+
+ dev_info->device_id = adev->pdev->device;
+ dev_info->vendor_id = adev->pdev->vendor;
+ dev_info->socket_id = adev->smuio.funcs->get_socket_id(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_gpu_reset_lock(struct ras_core_context *ras_core,
+ bool down, bool try)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret = 0;
+
+ if (down && try)
+ ret = down_read_trylock(&adev->reset_domain->sem);
+ else if (down)
+ down_read(&adev->reset_domain->sem);
+ else
+ up_read(&adev->reset_domain->sem);
+
+ return ret;
+}
+
+static bool amdgpu_ras_sys_detect_ras_interrupt(struct ras_core_context *ras_core)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+static int amdgpu_ras_sys_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct psp_context *psp = &adev->psp;
+ struct psp_ring *psp_ring;
+ struct ta_mem_context *mem_ctx;
+
+ if (mem_type == GPU_MEM_TYPE_RAS_PSP_RING) {
+ psp_ring = &psp->km_ring;
+ gpu_mem->mem_bo = adev->firmware.rbuf;
+ gpu_mem->mem_size = psp_ring->ring_size;
+ gpu_mem->mem_mc_addr = psp_ring->ring_mem_mc_addr;
+ gpu_mem->mem_cpu_addr = psp_ring->ring_mem;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_CMD) {
+ gpu_mem->mem_bo = psp->cmd_buf_bo;
+ gpu_mem->mem_size = PSP_CMD_BUFFER_SIZE;
+ gpu_mem->mem_mc_addr = psp->cmd_buf_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->cmd_buf_mem;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_FENCE) {
+ gpu_mem->mem_bo = psp->fence_buf_bo;
+ gpu_mem->mem_size = PSP_FENCE_BUFFER_SIZE;
+ gpu_mem->mem_mc_addr = psp->fence_buf_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->fence_buf;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_TA_FW) {
+ gpu_mem->mem_bo = psp->fw_pri_bo;
+ gpu_mem->mem_size = PSP_1_MEG;
+ gpu_mem->mem_mc_addr = psp->fw_pri_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->fw_pri_buf;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_TA_CMD) {
+ mem_ctx = &psp->ras_context.context.mem_context;
+ gpu_mem->mem_bo = mem_ctx->shared_bo;
+ gpu_mem->mem_size = mem_ctx->shared_mem_size;
+ gpu_mem->mem_mc_addr = mem_ctx->shared_mc_addr;
+ gpu_mem->mem_cpu_addr = mem_ctx->shared_buf;
+ } else {
+ return -EINVAL;
+ }
+
+ if (!gpu_mem->mem_bo || !gpu_mem->mem_size ||
+ !gpu_mem->mem_mc_addr || !gpu_mem->mem_cpu_addr) {
+ RAS_DEV_ERR(ras_core->dev, "The ras psp gpu memory is invalid!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+
+ return 0;
+}
+
+const struct ras_sys_func amdgpu_ras_sys_fn = {
+ .ras_notifier = amdgpu_ras_sys_event_notifier,
+ .get_utc_second_timestamp = amdgpu_ras_sys_get_utc_second_timestamp,
+ .gen_seqno = amdgpu_ras_sys_gen_seqno,
+ .check_gpu_status = amdgpu_ras_sys_check_gpu_status,
+ .get_device_system_info = amdgpu_ras_sys_get_device_system_info,
+ .gpu_reset_lock = amdgpu_ras_sys_gpu_reset_lock,
+ .detect_ras_interrupt = amdgpu_ras_sys_detect_ras_interrupt,
+ .get_gpu_mem = amdgpu_ras_sys_get_gpu_mem,
+ .put_gpu_mem = amdgpu_ras_sys_put_gpu_mem,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
new file mode 100644
index 000000000000..8156531a7b63
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_SYS_H__
+#define __RAS_SYS_H__
+#include <linux/stdarg.h>
+#include <linux/printk.h>
+#include <linux/dev_printk.h>
+#include <linux/mempool.h>
+#include "amdgpu.h"
+
+#define RAS_DEV_ERR(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_err(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_ERR fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_WARN(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_warn(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_WARNING fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_INFO(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_info(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_INFO fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_DBG(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_dbg(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_INFO(fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__)
+
+#define RAS_DEV_RREG32_SOC15(dev, ip, inst, reg) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+ 0, ip##_HWIP, inst); \
+})
+
+#define RAS_DEV_WREG32_SOC15(dev, ip, inst, reg, value) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
+ value, 0, ip##_HWIP, inst); \
+})
+
+/* GET_INST returns the physical instance corresponding to a logical instance */
+#define RAS_GET_INST(dev, ip, inst) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ adev->ip_map.logical_to_dev_inst ? \
+ adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst; \
+})
+
+#define RAS_GET_MASK(dev, ip, mask) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ (adev->ip_map.logical_to_dev_mask ? \
+ adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask); \
+})
+
+static inline void *ras_radix_tree_delete_iter(struct radix_tree_root *root, void *iter)
+{
+ return radix_tree_delete(root, ((struct radix_tree_iter *)iter)->index);
+}
+
+static inline long ras_wait_event_interruptible_timeout(void *wq_head,
+ int (*condition)(void *param), void *param, unsigned int timeout)
+{
+ return wait_event_interruptible_timeout(*(wait_queue_head_t *)wq_head,
+ condition(param), timeout);
+}
+
+extern const struct ras_sys_func amdgpu_ras_sys_fn;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile
new file mode 100644
index 000000000000..e826a1f86424
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/Makefile
@@ -0,0 +1,44 @@
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+RAS_CORE_FILES = ras_core.o \
+ ras_mp1.o \
+ ras_mp1_v13_0.o \
+ ras_aca.o \
+ ras_aca_v1_0.o \
+ ras_eeprom.o \
+ ras_umc.o \
+ ras_umc_v12_0.o \
+ ras_cmd.o \
+ ras_gfx.o \
+ ras_gfx_v9_0.o \
+ ras_process.o \
+ ras_nbio.o \
+ ras_nbio_v7_9.o \
+ ras_log_ring.o \
+ ras_cper.o \
+ ras_psp.o \
+ ras_psp_v13_0.o
+
+
+RAS_CORE = $(addprefix $(AMD_GPU_RAS_PATH)/rascore/,$(RAS_CORE_FILES))
+
+AMD_GPU_RAS_FILES += $(RAS_CORE)
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras.h b/drivers/gpu/drm/amd/ras/rascore/ras.h
new file mode 100644
index 000000000000..3396b2e0949d
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_H__
+#define __RAS_H__
+#include "ras_sys.h"
+#include "ras_umc.h"
+#include "ras_aca.h"
+#include "ras_eeprom.h"
+#include "ras_core_status.h"
+#include "ras_process.h"
+#include "ras_gfx.h"
+#include "ras_cmd.h"
+#include "ras_nbio.h"
+#include "ras_mp1.h"
+#include "ras_psp.h"
+#include "ras_log_ring.h"
+
+#define RAS_HW_ERR "[Hardware Error]: "
+
+#define RAS_GPU_PAGE_SHIFT 12
+#define RAS_ADDR_TO_PFN(addr) ((addr) >> RAS_GPU_PAGE_SHIFT)
+#define RAS_PFN_TO_ADDR(pfn) ((pfn) << RAS_GPU_PAGE_SHIFT)
+
+#define RAS_CORE_RESET_GPU 0x10000
+
+#define GPU_RESET_CAUSE_POISON (RAS_CORE_RESET_GPU | 0x0001)
+#define GPU_RESET_CAUSE_FATAL (RAS_CORE_RESET_GPU | 0x0002)
+#define GPU_RESET_CAUSE_RMA (RAS_CORE_RESET_GPU | 0x0004)
+
+enum ras_block_id {
+ RAS_BLOCK_ID__UMC = 0,
+ RAS_BLOCK_ID__SDMA,
+ RAS_BLOCK_ID__GFX,
+ RAS_BLOCK_ID__MMHUB,
+ RAS_BLOCK_ID__ATHUB,
+ RAS_BLOCK_ID__PCIE_BIF,
+ RAS_BLOCK_ID__HDP,
+ RAS_BLOCK_ID__XGMI_WAFL,
+ RAS_BLOCK_ID__DF,
+ RAS_BLOCK_ID__SMN,
+ RAS_BLOCK_ID__SEM,
+ RAS_BLOCK_ID__MP0,
+ RAS_BLOCK_ID__MP1,
+ RAS_BLOCK_ID__FUSE,
+ RAS_BLOCK_ID__MCA,
+ RAS_BLOCK_ID__VCN,
+ RAS_BLOCK_ID__JPEG,
+ RAS_BLOCK_ID__IH,
+ RAS_BLOCK_ID__MPIO,
+
+ RAS_BLOCK_ID__LAST
+};
+
+enum ras_ecc_err_type {
+ RAS_ECC_ERR__NONE = 0,
+ RAS_ECC_ERR__PARITY = 1,
+ RAS_ECC_ERR__SINGLE_CORRECTABLE = 2,
+ RAS_ECC_ERR__MULTI_UNCORRECTABLE = 4,
+ RAS_ECC_ERR__POISON = 8,
+};
+
+enum ras_err_type {
+ RAS_ERR_TYPE__UE = 0,
+ RAS_ERR_TYPE__CE,
+ RAS_ERR_TYPE__DE,
+ RAS_ERR_TYPE__LAST
+};
+
+enum ras_seqno_type {
+ RAS_SEQNO_TYPE_INVALID = 0,
+ RAS_SEQNO_TYPE_UE,
+ RAS_SEQNO_TYPE_CE,
+ RAS_SEQNO_TYPE_DE,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION,
+ RAS_SEQNO_TYPE_COUNT_MAX,
+};
+
+enum ras_seqno_fifo {
+ SEQNO_FIFO_INVALID = 0,
+ SEQNO_FIFO_POISON_CREATION,
+ SEQNO_FIFO_POISON_CONSUMPTION,
+ SEQNO_FIFO_COUNT_MAX
+};
+
+enum ras_notify_event {
+ RAS_EVENT_ID__NONE,
+ RAS_EVENT_ID__BAD_PAGE_DETECTED,
+ RAS_EVENT_ID__POISON_CONSUMPTION,
+ RAS_EVENT_ID__RESERVE_BAD_PAGE,
+ RAS_EVENT_ID__DEVICE_RMA,
+ RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ RAS_EVENT_ID__FATAL_ERROR_DETECTED,
+ RAS_EVENT_ID__RESET_GPU,
+ RAS_EVENT_ID__RESET_VF,
+ RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN,
+ RAS_EVENT_ID__RAS_EVENT_PROC_END,
+};
+
+enum ras_gpu_status {
+ RAS_GPU_STATUS__NOT_READY = 0,
+ RAS_GPU_STATUS__READY = 0x1,
+ RAS_GPU_STATUS__IN_RESET = 0x2,
+ RAS_GPU_STATUS__IS_RMA = 0x4,
+ RAS_GPU_STATUS__IS_VF = 0x8,
+};
+
+struct ras_core_context;
+struct ras_bank_ecc;
+struct ras_umc;
+struct ras_aca;
+struct ras_process;
+struct ras_nbio;
+struct ras_log_ring;
+struct ras_psp;
+
+struct ras_mp1_sys_func {
+ int (*mp1_get_valid_bank_count)(struct ras_core_context *ras_core,
+ u32 msg, u32 *count);
+ int (*mp1_dump_valid_bank)(struct ras_core_context *ras_core,
+ u32 msg, u32 idx, u32 reg_idx, u64 *val);
+};
+
+struct ras_eeprom_sys_func {
+ int (*eeprom_i2c_xfer)(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 buf_size, bool read);
+ int (*update_eeprom_i2c_config)(struct ras_core_context *ras_core);
+};
+
+struct ras_nbio_sys_func {
+ int (*set_ras_controller_irq_state)(struct ras_core_context *ras_core,
+ bool state);
+ int (*set_ras_err_event_athub_irq_state)(struct ras_core_context *ras_core,
+ bool state);
+};
+
+struct ras_time {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ long tm_year;
+};
+
+struct device_system_info {
+ uint32_t device_id;
+ uint32_t vendor_id;
+ uint32_t socket_id;
+};
+
+enum gpu_mem_type {
+ GPU_MEM_TYPE_DEFAULT,
+ GPU_MEM_TYPE_RAS_PSP_RING,
+ GPU_MEM_TYPE_RAS_PSP_CMD,
+ GPU_MEM_TYPE_RAS_PSP_FENCE,
+ GPU_MEM_TYPE_RAS_TA_FW,
+ GPU_MEM_TYPE_RAS_TA_CMD,
+};
+
+struct ras_psp_sys_func {
+ int (*get_ras_psp_system_status)(struct ras_core_context *ras_core,
+ struct ras_psp_sys_status *status);
+ int (*get_ras_ta_init_param)(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param);
+};
+
+struct ras_sys_func {
+ int (*gpu_reset_lock)(struct ras_core_context *ras_core,
+ bool down, bool try);
+ int (*check_gpu_status)(struct ras_core_context *ras_core,
+ uint32_t *status);
+ int (*gen_seqno)(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t *seqno);
+ int (*async_handle_ras_event)(struct ras_core_context *ras_core, void *data);
+ int (*ras_notifier)(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data);
+ u64 (*get_utc_second_timestamp)(struct ras_core_context *ras_core);
+ int (*get_device_system_info)(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info);
+ bool (*detect_ras_interrupt)(struct ras_core_context *ras_core);
+ int (*get_gpu_mem)(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+ int (*put_gpu_mem)(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+};
+
+struct ras_ecc_count {
+ uint64_t new_ce_count;
+ uint64_t total_ce_count;
+ uint64_t new_ue_count;
+ uint64_t total_ue_count;
+ uint64_t new_de_count;
+ uint64_t total_de_count;
+};
+
+struct ras_bank_ecc {
+ uint32_t nps;
+ uint64_t seq_no;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+};
+
+struct ras_bank_ecc_node {
+ struct list_head node;
+ struct ras_bank_ecc ecc;
+};
+
+struct ras_aca_config {
+ u32 socket_num_per_hive;
+ u32 aid_num_per_socket;
+ u32 xcd_num_per_aid;
+};
+
+struct ras_mp1_config {
+ const struct ras_mp1_sys_func *mp1_sys_fn;
+};
+
+struct ras_nbio_config {
+ const struct ras_nbio_sys_func *nbio_sys_fn;
+};
+
+struct ras_psp_config {
+ const struct ras_psp_sys_func *psp_sys_fn;
+};
+
+struct ras_umc_config {
+ uint32_t umc_vram_type;
+};
+
+struct ras_eeprom_config {
+ const struct ras_eeprom_sys_func *eeprom_sys_fn;
+ int eeprom_record_threshold_config;
+ uint32_t eeprom_record_threshold_count;
+ void *eeprom_i2c_adapter;
+ u32 eeprom_i2c_addr;
+ u32 eeprom_i2c_port;
+ u16 max_i2c_read_len;
+ u16 max_i2c_write_len;
+};
+
+struct ras_core_config {
+ u32 aca_ip_version;
+ u32 umc_ip_version;
+ u32 mp1_ip_version;
+ u32 gfx_ip_version;
+ u32 nbio_ip_version;
+ u32 psp_ip_version;
+
+ bool poison_supported;
+ bool ras_eeprom_supported;
+ const struct ras_sys_func *sys_fn;
+
+ struct ras_aca_config aca_cfg;
+ struct ras_mp1_config mp1_cfg;
+ struct ras_nbio_config nbio_cfg;
+ struct ras_psp_config psp_cfg;
+ struct ras_eeprom_config eeprom_cfg;
+ struct ras_umc_config umc_cfg;
+};
+
+struct ras_core_context {
+ void *dev;
+ struct ras_core_config *config;
+ u32 socket_num_per_hive;
+ u32 aid_num_per_socket;
+ u32 xcd_num_per_aid;
+ int max_ue_banks_per_query;
+ int max_ce_banks_per_query;
+ struct ras_aca ras_aca;
+
+ bool ras_eeprom_supported;
+ struct ras_eeprom_control ras_eeprom;
+
+ struct ras_psp ras_psp;
+ struct ras_umc ras_umc;
+ struct ras_nbio ras_nbio;
+ struct ras_gfx ras_gfx;
+ struct ras_mp1 ras_mp1;
+ struct ras_process ras_proc;
+ struct ras_cmd_mgr ras_cmd;
+ struct ras_log_ring ras_log_ring;
+
+ const struct ras_sys_func *sys_fn;
+
+ /* is poison mode supported */
+ bool poison_supported;
+
+ bool is_rma;
+ bool is_initialized;
+
+ struct kfifo de_seqno_fifo;
+ struct kfifo consumption_seqno_fifo;
+ spinlock_t seqno_lock;
+
+ bool ras_core_enabled;
+};
+
+struct ras_core_context *ras_core_create(struct ras_core_config *init_config);
+void ras_core_destroy(struct ras_core_context *ras_core);
+int ras_core_sw_init(struct ras_core_context *ras_core);
+int ras_core_sw_fini(struct ras_core_context *ras_core);
+int ras_core_hw_init(struct ras_core_context *ras_core);
+int ras_core_hw_fini(struct ras_core_context *ras_core);
+bool ras_core_is_ready(struct ras_core_context *ras_core);
+uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type);
+uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, bool pop);
+
+int ras_core_put_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t seqno);
+
+int ras_core_update_ecc_info(struct ras_core_context *ras_core);
+int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
+ enum ras_block_id block, struct ras_ecc_count *ecc_count);
+
+bool ras_core_gpu_in_reset(struct ras_core_context *ras_core);
+bool ras_core_gpu_is_rma(struct ras_core_context *ras_core);
+bool ras_core_gpu_is_vf(struct ras_core_context *ras_core);
+bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data);
+int ras_core_handle_fatal_error(struct ras_core_context *ras_core);
+
+uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core);
+const char *ras_core_get_ras_block_name(enum ras_block_id block_id);
+int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
+ uint64_t timestamp, struct ras_time *tm);
+
+int ras_core_set_status(struct ras_core_context *ras_core, bool enable);
+bool ras_core_is_enabled(struct ras_core_context *ras_core);
+uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core);
+int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa);
+bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core);
+int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+bool ras_core_check_safety_watermark(struct ras_core_context *ras_core);
+int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core);
+void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core);
+void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core);
+int ras_core_event_notify(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data);
+int ras_core_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c
new file mode 100644
index 000000000000..e433c70d2989
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_aca.h"
+#include "ras_aca_v1_0.h"
+#include "ras_mp1_v13_0.h"
+
+#define ACA_MARK_FATAL_FLAG 0x100
+#define ACA_MARK_UE_READ_FLAG 0x1
+
+#define blk_name(block_id) ras_core_get_ras_block_name(block_id)
+
+static struct aca_regs_dump {
+ const char *name;
+ int reg_idx;
+} aca_regs[] = {
+ {"CONTROL", ACA_REG_IDX__CTL},
+ {"STATUS", ACA_REG_IDX__STATUS},
+ {"ADDR", ACA_REG_IDX__ADDR},
+ {"MISC", ACA_REG_IDX__MISC0},
+ {"CONFIG", ACA_REG_IDX__CONFG},
+ {"IPID", ACA_REG_IDX__IPID},
+ {"SYND", ACA_REG_IDX__SYND},
+ {"DESTAT", ACA_REG_IDX__DESTAT},
+ {"DEADDR", ACA_REG_IDX__DEADDR},
+ {"CONTROL_MASK", ACA_REG_IDX__CTL_MASK},
+};
+
+
+static void aca_report_ecc_info(struct ras_core_context *ras_core,
+ u64 seq_no, u32 blk, u32 skt, u32 aid,
+ struct aca_aid_ecc *aid_ecc,
+ struct aca_bank_ecc *new_ecc)
+{
+ struct aca_ecc_count ecc_count = {0};
+
+ ecc_count.new_ue_count = new_ecc->ue_count;
+ ecc_count.new_de_count = new_ecc->de_count;
+ ecc_count.new_ce_count = new_ecc->ce_count;
+ if (blk == RAS_BLOCK_ID__GFX) {
+ struct aca_ecc_count *xcd_ecc;
+ int xcd_id;
+
+ for (xcd_id = 0; xcd_id < aid_ecc->xcd.xcd_num; xcd_id++) {
+ xcd_ecc = &aid_ecc->xcd.xcd[xcd_id].ecc_err;
+ ecc_count.total_ue_count += xcd_ecc->total_ue_count;
+ ecc_count.total_de_count += xcd_ecc->total_de_count;
+ ecc_count.total_ce_count += xcd_ecc->total_ce_count;
+ }
+ } else {
+ ecc_count.total_ue_count = aid_ecc->ecc_err.total_ue_count;
+ ecc_count.total_de_count = aid_ecc->ecc_err.total_de_count;
+ ecc_count.total_ce_count = aid_ecc->ecc_err.total_ce_count;
+ }
+
+ if (ecc_count.new_ue_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new uncorrectable hardware errors detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_ue_count, blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u uncorrectable hardware errors detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_ue_count, blk_name(blk));
+ }
+
+ if (ecc_count.new_de_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new %s detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_de_count,
+ (blk == RAS_BLOCK_ID__UMC) ?
+ "deferred hardware errors" : "poison consumption",
+ blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u %s detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_de_count,
+ (blk == RAS_BLOCK_ID__UMC) ?
+ "deferred hardware errors" : "poison consumption",
+ blk_name(blk));
+ }
+
+ if (ecc_count.new_ce_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new correctable hardware errors detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_ce_count, blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u correctable hardware errors detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_ce_count, blk_name(blk));
+ }
+}
+
+static void aca_bank_log(struct ras_core_context *ras_core,
+ int idx, int total, struct aca_bank_reg *bank,
+ struct aca_bank_ecc *bank_ecc)
+{
+ int i;
+
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu}" RAS_HW_ERR "Accelerator Check Architecture events logged\n",
+ bank->seq_no);
+ /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu}" RAS_HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ bank->seq_no, idx + 1, total,
+ aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+}
+
+static void aca_log_bank_data(struct ras_core_context *ras_core,
+ struct aca_bank_reg *bank, struct aca_bank_ecc *bank_ecc,
+ struct ras_log_batch_tag *batch)
+{
+ if (bank_ecc->ue_count)
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_UE, bank->regs, batch);
+ else if (bank_ecc->de_count)
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_DE, bank->regs, batch);
+ else
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_CE, bank->regs, batch);
+}
+
+static int aca_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ return ras_mp1_get_bank_count(ras_core, type, count);
+}
+
+static bool aca_match_bank(struct aca_block *aca_blk, struct aca_bank_reg *bank)
+{
+ const struct aca_bank_hw_ops *bank_ops;
+
+ if (!aca_blk->blk_info)
+ return false;
+
+ bank_ops = &aca_blk->blk_info->bank_ops;
+ if (!bank_ops->bank_match)
+ return false;
+
+ return bank_ops->bank_match(aca_blk, bank);
+}
+
+static int aca_parse_bank(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk,
+ struct aca_bank_reg *bank,
+ struct aca_bank_ecc *ecc)
+{
+ const struct aca_bank_hw_ops *bank_ops = &aca_blk->blk_info->bank_ops;
+
+ if (!bank_ops || !bank_ops->bank_parse)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ return bank_ops->bank_parse(ras_core, aca_blk, bank, ecc);
+}
+
+static int aca_check_block_ecc_info(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, struct aca_ecc_info *info)
+{
+ if (info->socket_id >= aca_blk->ecc.socket_num_per_hive) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Socket id (%d) is out of config! max:%u\n",
+ info->socket_id, aca_blk->ecc.socket_num_per_hive);
+ return -ENODATA;
+ }
+
+ if (info->die_id >= aca_blk->ecc.socket[info->socket_id].aid_num) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Die id (%d) is out of config! max:%u\n",
+ info->die_id, aca_blk->ecc.socket[info->socket_id].aid_num);
+ return -ENODATA;
+ }
+
+ if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) &&
+ (info->xcd_id >=
+ aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Xcd id (%d) is out of config! max:%u\n",
+ info->xcd_id,
+ aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int aca_log_bad_bank(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, struct aca_bank_reg *bank,
+ struct aca_bank_ecc *bank_ecc)
+{
+ struct aca_ecc_info *info;
+ struct aca_ecc_count *ecc_err;
+ struct aca_aid_ecc *aid_ecc;
+ int ret;
+
+ info = &bank_ecc->bank_info;
+
+ ret = aca_check_block_ecc_info(ras_core, aca_blk, info);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aid_ecc = &aca_blk->ecc.socket[info->socket_id].aid[info->die_id];
+ if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX)
+ ecc_err = &aid_ecc->xcd.xcd[info->xcd_id].ecc_err;
+ else
+ ecc_err = &aid_ecc->ecc_err;
+
+ ecc_err->new_ce_count += bank_ecc->ce_count;
+ ecc_err->total_ce_count += bank_ecc->ce_count;
+ ecc_err->new_ue_count += bank_ecc->ue_count;
+ ecc_err->total_ue_count += bank_ecc->ue_count;
+ ecc_err->new_de_count += bank_ecc->de_count;
+ ecc_err->total_de_count += bank_ecc->de_count;
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) &&
+ bank_ecc->de_count) {
+ struct ras_bank_ecc ras_ecc = {0};
+
+ ras_ecc.nps = ras_core_get_curr_nps_mode(ras_core);
+ ras_ecc.addr = bank_ecc->bank_info.addr;
+ ras_ecc.ipid = bank_ecc->bank_info.ipid;
+ ras_ecc.status = bank_ecc->bank_info.status;
+ ras_ecc.seq_no = bank->seq_no;
+
+ if (ras_core_gpu_in_reset(ras_core))
+ ras_umc_log_bad_bank_pending(ras_core, &ras_ecc);
+ else
+ ras_umc_log_bad_bank(ras_core, &ras_ecc);
+ }
+
+ aca_report_ecc_info(ras_core,
+ bank->seq_no, aca_blk->blk_info->ras_block_id, info->socket_id, info->die_id,
+ &aca_blk->ecc.socket[info->socket_id].aid[info->die_id], bank_ecc);
+
+ return 0;
+}
+
+static struct aca_block *aca_get_bank_aca_block(struct ras_core_context *ras_core,
+ struct aca_bank_reg *bank)
+{
+ int i = 0;
+
+ for (i = 0; i < RAS_BLOCK_ID__LAST; i++)
+ if (aca_match_bank(&ras_core->ras_aca.aca_blk[i], bank))
+ return &ras_core->ras_aca.aca_blk[i];
+
+ return NULL;
+}
+
+static int aca_dump_bank(struct ras_core_context *ras_core, u32 ecc_type,
+ int idx, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ int i, ret, reg_cnt;
+
+ reg_cnt = min_t(int, 16, ARRAY_SIZE(bank->regs));
+ for (i = 0; i < reg_cnt; i++) {
+ ret = ras_mp1_dump_bank(ras_core, ecc_type, idx, i, &bank->regs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static uint64_t aca_get_bank_seqno(struct ras_core_context *ras_core,
+ enum ras_err_type err_type, struct aca_block *aca_blk,
+ struct aca_bank_ecc *bank_ecc)
+{
+ uint64_t seq_no = 0;
+
+ if (bank_ecc->de_count) {
+ if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC)
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_DE, true);
+ else
+ seq_no = ras_core_get_seqno(ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, true);
+ } else if (bank_ecc->ue_count) {
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_UE, true);
+ } else {
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_CE, true);
+ }
+
+ return seq_no;
+}
+
+static bool aca_dup_update_ue_in_fatal(struct ras_core_context *ras_core,
+ u32 ecc_type)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (ecc_type != RAS_ERR_TYPE__UE)
+ return false;
+
+ if (aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) {
+ if (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG)
+ return true;
+
+ aca->ue_updated_mark |= ACA_MARK_UE_READ_FLAG;
+ }
+
+ return false;
+}
+
+void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (!aca)
+ return;
+
+ aca->ue_updated_mark |= ACA_MARK_FATAL_FLAG;
+}
+
+void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (!aca)
+ return;
+
+ if ((aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) &&
+ (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG))
+ aca->ue_updated_mark = 0;
+}
+
+static int aca_banks_update(struct ras_core_context *ras_core,
+ u32 ecc_type, void *data)
+{
+ struct aca_bank_reg bank;
+ struct aca_block *aca_blk;
+ struct aca_bank_ecc bank_ecc;
+ struct ras_log_batch_tag *batch_tag = NULL;
+ u32 count = 0;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ras_core->ras_aca.bank_op_lock);
+
+ if (aca_dup_update_ue_in_fatal(ras_core, ecc_type))
+ goto out;
+
+ ret = aca_get_bank_count(ras_core, ecc_type, &count);
+ if (ret)
+ goto out;
+
+ if (!count)
+ goto out;
+
+ batch_tag = ras_log_ring_create_batch_tag(ras_core);
+ for (i = 0; i < count; i++) {
+ memset(&bank, 0, sizeof(bank));
+ ret = aca_dump_bank(ras_core, ecc_type, i, &bank);
+ if (ret)
+ break;
+
+ bank.ecc_type = ecc_type;
+
+ memset(&bank_ecc, 0, sizeof(bank_ecc));
+ aca_blk = aca_get_bank_aca_block(ras_core, &bank);
+ if (aca_blk)
+ ret = aca_parse_bank(ras_core, aca_blk, &bank, &bank_ecc);
+
+ bank.seq_no = aca_get_bank_seqno(ras_core, ecc_type, aca_blk, &bank_ecc);
+
+ aca_log_bank_data(ras_core, &bank, &bank_ecc, batch_tag);
+ aca_bank_log(ras_core, i, count, &bank, &bank_ecc);
+
+ if (!ret && aca_blk)
+ ret = aca_log_bad_bank(ras_core, aca_blk, &bank, &bank_ecc);
+
+ if (ret)
+ break;
+ }
+ ras_log_ring_destroy_batch_tag(ras_core, batch_tag);
+
+out:
+ mutex_unlock(&ras_core->ras_aca.bank_op_lock);
+ return ret;
+}
+
+int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 type, void *data)
+{
+ /* Update aca bank to aca source error_cache first */
+ return aca_banks_update(ras_core, type, data);
+}
+
+static struct aca_block *ras_aca_get_block_handle(struct ras_core_context *ras_core, uint32_t blk)
+{
+ return &ras_core->ras_aca.aca_blk[blk];
+}
+
+static int ras_aca_clear_block_ecc_count(struct ras_core_context *ras_core, u32 blk)
+{
+ struct aca_block *aca_blk;
+ struct aca_aid_ecc *aid_ecc;
+ int skt, aid, xcd;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ aid_ecc = &aca_blk->ecc.socket[skt].aid[aid];
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++)
+ memset(&aid_ecc->xcd.xcd[xcd],
+ 0, sizeof(struct aca_xcd_ecc));
+ } else {
+ memset(&aid_ecc->ecc_err, 0, sizeof(aid_ecc->ecc_err));
+ }
+ }
+ }
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core)
+{
+ enum ras_block_id blk;
+ int ret;
+
+ for (blk = RAS_BLOCK_ID__UMC; blk < RAS_BLOCK_ID__LAST; blk++) {
+ ret = ras_aca_clear_block_ecc_count(ras_core, blk);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk)
+{
+ struct aca_block *aca_blk;
+ int skt, aid, xcd;
+ struct aca_ecc_count *ecc_err;
+ struct aca_aid_ecc *aid_ecc;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ aid_ecc = &aca_blk->ecc.socket[skt].aid[aid];
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) {
+ ecc_err = &aid_ecc->xcd.xcd[xcd].ecc_err;
+ ecc_err->new_ce_count = 0;
+ ecc_err->new_ue_count = 0;
+ ecc_err->new_de_count = 0;
+ }
+ } else {
+ ecc_err = &aid_ecc->ecc_err;
+ ecc_err->new_ce_count = 0;
+ ecc_err->new_ue_count = 0;
+ ecc_err->new_de_count = 0;
+ }
+ }
+ }
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+static int ras_aca_get_block_each_aid_ecc_count(struct ras_core_context *ras_core,
+ u32 blk, u32 skt, u32 aid, u32 xcd,
+ struct aca_ecc_count *ecc_count)
+{
+ struct aca_block *aca_blk;
+ struct aca_ecc_count *ecc_err;
+
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ if (blk == RAS_BLOCK_ID__GFX)
+ ecc_err = &aca_blk->ecc.socket[skt].aid[aid].xcd.xcd[xcd].ecc_err;
+ else
+ ecc_err = &aca_blk->ecc.socket[skt].aid[aid].ecc_err;
+
+ ecc_count->new_ce_count = ecc_err->new_ce_count;
+ ecc_count->total_ce_count = ecc_err->total_ce_count;
+ ecc_count->new_ue_count = ecc_err->new_ue_count;
+ ecc_count->total_ue_count = ecc_err->total_ue_count;
+ ecc_count->new_de_count = ecc_err->new_de_count;
+ ecc_count->total_de_count = ecc_err->total_de_count;
+
+ return 0;
+}
+
+static inline void _add_ecc_count(struct aca_ecc_count *des, struct aca_ecc_count *src)
+{
+ des->new_ce_count += src->new_ce_count;
+ des->total_ce_count += src->total_ce_count;
+ des->new_ue_count += src->new_ue_count;
+ des->total_ue_count += src->total_ue_count;
+ des->new_de_count += src->new_de_count;
+ des->total_de_count += src->total_de_count;
+}
+
+static const struct ras_aca_ip_func *aca_get_ip_func(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(1, 0, 0):
+ return &ras_aca_func_v1_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "ACA ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core,
+ u32 blk, void *data)
+{
+ struct ras_ecc_count *err_data = (struct ras_ecc_count *)data;
+ struct aca_block *aca_blk;
+ int skt, aid, xcd;
+ struct aca_ecc_count ecc_xcd;
+ struct aca_ecc_count ecc_aid;
+ struct aca_ecc_count ecc;
+
+ if (blk >= RAS_BLOCK_ID__LAST)
+ return -EINVAL;
+
+ if (!err_data)
+ return -EINVAL;
+
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ memset(&ecc, 0, sizeof(ecc));
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ memset(&ecc_aid, 0, sizeof(ecc_aid));
+ for (xcd = 0;
+ xcd < aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num;
+ xcd++) {
+ memset(&ecc_xcd, 0, sizeof(ecc_xcd));
+ if (ras_aca_get_block_each_aid_ecc_count(ras_core,
+ blk, skt, aid, xcd, &ecc_xcd))
+ continue;
+ _add_ecc_count(&ecc_aid, &ecc_xcd);
+ }
+ _add_ecc_count(&ecc, &ecc_aid);
+ }
+ }
+ } else {
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ memset(&ecc_aid, 0, sizeof(ecc_aid));
+ if (ras_aca_get_block_each_aid_ecc_count(ras_core,
+ blk, skt, aid, 0, &ecc_aid))
+ continue;
+ _add_ecc_count(&ecc, &ecc_aid);
+ }
+ }
+ }
+
+ err_data->new_ce_count = ecc.new_ce_count;
+ err_data->total_ce_count = ecc.total_ce_count;
+ err_data->new_ue_count = ecc.new_ue_count;
+ err_data->total_ue_count = ecc.total_ue_count;
+ err_data->new_de_count = ecc.new_de_count;
+ err_data->total_de_count = ecc.total_de_count;
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+int ras_aca_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+ struct ras_aca_config *aca_cfg = &ras_core->config->aca_cfg;
+ struct aca_block *aca_blk;
+ uint32_t socket_num_per_hive;
+ uint32_t aid_num_per_socket;
+ uint32_t xcd_num_per_aid;
+ int blk, skt, aid;
+
+ socket_num_per_hive = aca_cfg->socket_num_per_hive;
+ aid_num_per_socket = aca_cfg->aid_num_per_socket;
+ xcd_num_per_aid = aca_cfg->xcd_num_per_aid;
+
+ if (!xcd_num_per_aid || !aid_num_per_socket ||
+ (socket_num_per_hive > MAX_SOCKET_NUM_PER_HIVE) ||
+ (aid_num_per_socket > MAX_AID_NUM_PER_SOCKET) ||
+ (xcd_num_per_aid > MAX_XCD_NUM_PER_AID)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid ACA system configuration: %d, %d, %d\n",
+ socket_num_per_hive, aid_num_per_socket, xcd_num_per_aid);
+ return -EINVAL;
+ }
+
+ memset(ras_aca, 0, sizeof(*ras_aca));
+
+ for (blk = 0; blk < RAS_BLOCK_ID__LAST; blk++) {
+ aca_blk = &ras_aca->aca_blk[blk];
+ aca_blk->ecc.socket_num_per_hive = socket_num_per_hive;
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ aca_blk->ecc.socket[skt].aid_num = aid_num_per_socket;
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++)
+ aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num =
+ xcd_num_per_aid;
+ }
+ }
+ }
+
+ mutex_init(&ras_aca->aca_lock);
+ mutex_init(&ras_aca->bank_op_lock);
+
+ return 0;
+}
+
+int ras_aca_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+
+ mutex_destroy(&ras_aca->aca_lock);
+ mutex_destroy(&ras_aca->bank_op_lock);
+
+ return 0;
+}
+
+int ras_aca_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+ struct aca_block *aca_blk;
+ const struct ras_aca_ip_func *ip_func;
+ int i;
+
+ ras_aca->aca_ip_version = ras_core->config->aca_ip_version;
+ ip_func = aca_get_ip_func(ras_core, ras_aca->aca_ip_version);
+ if (!ip_func)
+ return -EINVAL;
+
+ for (i = 0; i < ip_func->block_num; i++) {
+ aca_blk = &ras_aca->aca_blk[ip_func->block_info[i]->ras_block_id];
+ aca_blk->blk_info = ip_func->block_info[i];
+ }
+
+ ras_aca->ue_updated_mark = 0;
+
+ return 0;
+}
+
+int ras_aca_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+
+ ras_aca->ue_updated_mark = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h
new file mode 100644
index 000000000000..f61b02a5f0fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_ACA_H__
+#define __RAS_ACA_H__
+#include "ras.h"
+
+#define MAX_SOCKET_NUM_PER_HIVE 8
+#define MAX_AID_NUM_PER_SOCKET 4
+#define MAX_XCD_NUM_PER_AID 2
+#define MAX_ACA_RAS_BLOCK 20
+
+#define ACA_ERROR__UE_MASK (0x1 << RAS_ERR_TYPE__UE)
+#define ACA_ERROR__CE_MASK (0x1 << RAS_ERR_TYPE__CE)
+#define ACA_ERROR__DE_MASK (0x1 << RAS_ERR_TYPE__DE)
+
+enum ras_aca_reg_idx {
+ ACA_REG_IDX__CTL = 0,
+ ACA_REG_IDX__STATUS = 1,
+ ACA_REG_IDX__ADDR = 2,
+ ACA_REG_IDX__MISC0 = 3,
+ ACA_REG_IDX__CONFG = 4,
+ ACA_REG_IDX__IPID = 5,
+ ACA_REG_IDX__SYND = 6,
+ ACA_REG_IDX__DESTAT = 8,
+ ACA_REG_IDX__DEADDR = 9,
+ ACA_REG_IDX__CTL_MASK = 10,
+ ACA_REG_MAX_COUNT = 16,
+};
+
+struct ras_core_context;
+struct aca_block;
+
+struct aca_bank_reg {
+ u32 ecc_type;
+ u64 seq_no;
+ u64 regs[ACA_REG_MAX_COUNT];
+};
+
+enum aca_ecc_hwip {
+ ACA_ECC_HWIP__UNKNOWN = -1,
+ ACA_ECC_HWIP__PSP = 0,
+ ACA_ECC_HWIP__UMC,
+ ACA_ECC_HWIP__SMU,
+ ACA_ECC_HWIP__PCS_XGMI,
+ ACA_ECC_HWIP_COUNT,
+};
+
+struct aca_ecc_info {
+ int die_id;
+ int socket_id;
+ int xcd_id;
+ int hwid;
+ int mcatype;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+};
+
+struct aca_bank_ecc {
+ struct aca_ecc_info bank_info;
+ u32 ce_count;
+ u32 ue_count;
+ u32 de_count;
+};
+
+struct aca_ecc_count {
+ u32 new_ce_count;
+ u32 total_ce_count;
+ u32 new_ue_count;
+ u32 total_ue_count;
+ u32 new_de_count;
+ u32 total_de_count;
+};
+
+struct aca_xcd_ecc {
+ struct aca_ecc_count ecc_err;
+};
+
+struct aca_aid_ecc {
+ union {
+ struct aca_xcd {
+ struct aca_xcd_ecc xcd[MAX_XCD_NUM_PER_AID];
+ u32 xcd_num;
+ } xcd;
+ struct aca_ecc_count ecc_err;
+ };
+};
+
+struct aca_socket_ecc {
+ struct aca_aid_ecc aid[MAX_AID_NUM_PER_SOCKET];
+ u32 aid_num;
+};
+
+struct aca_block_ecc {
+ struct aca_socket_ecc socket[MAX_SOCKET_NUM_PER_HIVE];
+ u32 socket_num_per_hive;
+};
+
+struct aca_bank_hw_ops {
+ bool (*bank_match)(struct aca_block *ras_blk, void *data);
+ int (*bank_parse)(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, void *data, void *buf);
+};
+
+struct aca_block_info {
+ char name[32];
+ u32 ras_block_id;
+ enum aca_ecc_hwip hwip;
+ struct aca_bank_hw_ops bank_ops;
+ u32 mask;
+};
+
+struct aca_block {
+ const struct aca_block_info *blk_info;
+ struct aca_block_ecc ecc;
+};
+
+struct ras_aca_ip_func {
+ uint32_t block_num;
+ const struct aca_block_info **block_info;
+};
+
+struct ras_aca {
+ uint32_t aca_ip_version;
+ const struct ras_aca_ip_func *ip_func;
+ struct mutex aca_lock;
+ struct mutex bank_op_lock;
+ struct aca_block aca_blk[MAX_ACA_RAS_BLOCK];
+ uint32_t ue_updated_mark;
+};
+
+int ras_aca_sw_init(struct ras_core_context *ras_core);
+int ras_aca_sw_fini(struct ras_core_context *ras_core);
+int ras_aca_hw_init(struct ras_core_context *ras_core);
+int ras_aca_hw_fini(struct ras_core_context *ras_core);
+int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, u32 blk, void *data);
+int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk);
+int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core);
+int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 ecc_type, void *data);
+void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core);
+void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
new file mode 100644
index 000000000000..29df98948703
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_aca.h"
+#include "ras_core_status.h"
+#include "ras_aca_v1_0.h"
+
+struct ras_aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
+static struct ras_aca_hwip aca_hwid_mcatypes[ACA_ECC_HWIP_COUNT] = {
+ [ACA_ECC_HWIP__SMU] = {0x01, 0x01},
+ [ACA_ECC_HWIP__PCS_XGMI] = {0x50, 0x00},
+ [ACA_ECC_HWIP__UMC] = {0x96, 0x00},
+};
+
+static int aca_decode_bank_info(struct aca_block *aca_blk,
+ struct aca_bank_reg *bank, struct aca_ecc_info *info)
+{
+ u64 ipid;
+ u32 instidhi, instidlo;
+
+ ipid = bank->regs[ACA_REG_IDX__IPID];
+ info->hwid = ACA_REG_IPID_HARDWAREID(ipid);
+ info->mcatype = ACA_REG_IPID_MCATYPE(ipid);
+ /*
+ * Unified DieID Format: SAASS. A:AID, S:Socket.
+ * Unified DieID[4:4] = InstanceId[0:0]
+ * Unified DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = ACA_REG_IPID_INSTANCEIDHI(ipid);
+ instidlo = ACA_REG_IPID_INSTANCEIDLO(ipid);
+ info->die_id = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
+
+ if ((aca_blk->blk_info->hwip == ACA_ECC_HWIP__SMU) &&
+ (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX))
+ info->xcd_id =
+ ((instidlo & GENMASK_ULL(31, 1)) == mmSMNAID_XCD0_MCA_SMU) ? 0 : 1;
+
+ return 0;
+}
+
+static bool aca_check_bank_hwip(struct aca_bank_reg *bank, enum aca_ecc_hwip type)
+{
+ struct ras_aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || (type == ACA_ECC_HWIP__UNKNOWN))
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX__IPID];
+ hwid = ACA_REG_IPID_HARDWAREID(ipid);
+ mcatype = ACA_REG_IPID_MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
+static bool aca_match_bank_default(struct aca_block *aca_blk, void *data)
+{
+ return aca_check_bank_hwip((struct aca_bank_reg *)data, aca_blk->blk_info->hwip);
+}
+
+static bool aca_match_gfx_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ u32 instlo;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ switch (instlo) {
+ case mmSMNAID_XCD0_MCA_SMU:
+ case mmSMNAID_XCD1_MCA_SMU:
+ case mmSMNXCD_XCD0_MCA_SMU:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool aca_match_sdma_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
+ static int sdma_err_codes[] = { 33, 34, 35, 36 };
+ u32 instlo;
+ int errcode, i;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]);
+ errcode &= 0xff;
+
+ /* Check SDMA error codes */
+ for (i = 0; i < ARRAY_SIZE(sdma_err_codes); i++) {
+ if (errcode == sdma_err_codes[i])
+ return true;
+ }
+
+ return false;
+}
+
+static bool aca_match_mmhub_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ /* reference to smu driver if header file */
+ const int mmhub_err_codes[] = {
+ 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
+ 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
+ 10, /* CODE_UTCL2_ROUTER */
+ 11, /* CODE_VML2 */
+ 12, /* CODE_VML2_WALKER */
+ 13, /* CODE_MMCANE */
+ };
+ u32 instlo;
+ int errcode, i;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]);
+ errcode &= 0xff;
+
+ /* Check MMHUB error codes */
+ for (i = 0; i < ARRAY_SIZE(mmhub_err_codes); i++) {
+ if (errcode == mmhub_err_codes[i])
+ return true;
+ }
+
+ return false;
+}
+
+static bool aca_check_umc_de(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ return (ras_core->poison_supported &&
+ ACA_REG_STATUS_VAL(mc_umc_status) &&
+ ACA_REG_STATUS_DEFERRED(mc_umc_status));
+}
+
+static bool aca_check_umc_ue(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ if (aca_check_umc_de(ras_core, mc_umc_status))
+ return false;
+
+ return (ACA_REG_STATUS_VAL(mc_umc_status) &&
+ (ACA_REG_STATUS_PCC(mc_umc_status) ||
+ ACA_REG_STATUS_UC(mc_umc_status) ||
+ ACA_REG_STATUS_TCC(mc_umc_status)));
+}
+
+static bool aca_check_umc_ce(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ if (aca_check_umc_de(ras_core, mc_umc_status))
+ return false;
+
+ return (ACA_REG_STATUS_VAL(mc_umc_status) &&
+ (ACA_REG_STATUS_CECC(mc_umc_status) ||
+ (ACA_REG_STATUS_UECC(mc_umc_status) &&
+ ACA_REG_STATUS_UC(mc_umc_status) == 0) ||
+ /* Identify data parity error in replay mode */
+ ((ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0x5 ||
+ ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0xb) &&
+ !(aca_check_umc_ue(ras_core, mc_umc_status)))));
+}
+
+static int aca_parse_umc_bank(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk, void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ uint32_t ext_error_code;
+ uint64_t status0;
+
+ status0 = bank->regs[ACA_REG_IDX__STATUS];
+ if (!ACA_REG_STATUS_VAL(status0))
+ return 0;
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS];
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status0);
+
+ if (aca_check_umc_de(ras_core, status0))
+ ecc->de_count = 1;
+ else if (aca_check_umc_ue(ras_core, status0))
+ ecc->ue_count = ext_error_code ?
+ 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+ else if (aca_check_umc_ce(ras_core, status0))
+ ecc->ce_count = ext_error_code ?
+ 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+
+ return 0;
+}
+
+static bool aca_check_bank_is_de(struct ras_core_context *ras_core,
+ uint64_t status)
+{
+ return (ACA_REG_STATUS_POISON(status) ||
+ ACA_REG_STATUS_DEFERRED(status));
+}
+
+static int aca_parse_bank_default(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk,
+ void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ u64 misc0 = bank->regs[ACA_REG_IDX__MISC0];
+ u64 status = bank->regs[ACA_REG_IDX__STATUS];
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = status;
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ if (aca_check_bank_is_de(ras_core, status)) {
+ ecc->de_count = 1;
+ } else {
+ if (bank->ecc_type == RAS_ERR_TYPE__UE)
+ ecc->ue_count = 1;
+ else if (bank->ecc_type == RAS_ERR_TYPE__CE)
+ ecc->ce_count = ACA_REG_MISC0_ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+static int aca_parse_xgmi_bank(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk,
+ void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ u64 status, count;
+ int ext_error_code;
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS];
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ status = bank->regs[ACA_REG_IDX__STATUS];
+ ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status);
+
+ count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+ if (bank->ecc_type == RAS_ERR_TYPE__UE) {
+ if (ext_error_code != 0 && ext_error_code != 9)
+ count = 0ULL;
+ ecc->ue_count = count;
+ } else if (bank->ecc_type == RAS_ERR_TYPE__CE) {
+ count = ext_error_code == 6 ? count : 0ULL;
+ ecc->ce_count = count;
+ }
+
+ return 0;
+}
+
+static const struct aca_block_info aca_v1_0_umc = {
+ .name = "umc",
+ .ras_block_id = RAS_BLOCK_ID__UMC,
+ .hwip = ACA_ECC_HWIP__UMC,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK | ACA_ERROR__DE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_bank_default,
+ .bank_parse = aca_parse_umc_bank,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_gfx = {
+ .name = "gfx",
+ .ras_block_id = RAS_BLOCK_ID__GFX,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_gfx_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_sdma = {
+ .name = "sdma",
+ .ras_block_id = RAS_BLOCK_ID__SDMA,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_sdma_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_mmhub = {
+ .name = "mmhub",
+ .ras_block_id = RAS_BLOCK_ID__MMHUB,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_mmhub_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_xgmi = {
+ .name = "xgmi",
+ .ras_block_id = RAS_BLOCK_ID__XGMI_WAFL,
+ .hwip = ACA_ECC_HWIP__PCS_XGMI,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_bank_default,
+ .bank_parse = aca_parse_xgmi_bank,
+ },
+};
+
+static const struct aca_block_info *aca_block_info_v1_0[] = {
+ &aca_v1_0_umc,
+ &aca_v1_0_gfx,
+ &aca_v1_0_sdma,
+ &aca_v1_0_mmhub,
+ &aca_v1_0_xgmi,
+};
+
+const struct ras_aca_ip_func ras_aca_func_v1_0 = {
+ .block_num = ARRAY_SIZE(aca_block_info_v1_0),
+ .block_info = aca_block_info_v1_0,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h
new file mode 100644
index 000000000000..40e5d94b037f
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_ACA_V1_0_H__
+#define __RAS_ACA_V1_0_H__
+#include "ras.h"
+
+#define ACA__REG__FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
+#define ACA_REG_STATUS_VAL(x) ACA__REG__FIELD(x, 63, 63)
+#define ACA_REG_STATUS_OVERFLOW(x) ACA__REG__FIELD(x, 62, 62)
+#define ACA_REG_STATUS_UC(x) ACA__REG__FIELD(x, 61, 61)
+#define ACA_REG_STATUS_EN(x) ACA__REG__FIELD(x, 60, 60)
+#define ACA_REG_STATUS_MISCV(x) ACA__REG__FIELD(x, 59, 59)
+#define ACA_REG_STATUS_ADDRV(x) ACA__REG__FIELD(x, 58, 58)
+#define ACA_REG_STATUS_PCC(x) ACA__REG__FIELD(x, 57, 57)
+#define ACA_REG_STATUS_ERRCOREIDVAL(x) ACA__REG__FIELD(x, 56, 56)
+#define ACA_REG_STATUS_TCC(x) ACA__REG__FIELD(x, 55, 55)
+#define ACA_REG_STATUS_SYNDV(x) ACA__REG__FIELD(x, 53, 53)
+#define ACA_REG_STATUS_CECC(x) ACA__REG__FIELD(x, 46, 46)
+#define ACA_REG_STATUS_UECC(x) ACA__REG__FIELD(x, 45, 45)
+#define ACA_REG_STATUS_DEFERRED(x) ACA__REG__FIELD(x, 44, 44)
+#define ACA_REG_STATUS_POISON(x) ACA__REG__FIELD(x, 43, 43)
+#define ACA_REG_STATUS_SCRUB(x) ACA__REG__FIELD(x, 40, 40)
+#define ACA_REG_STATUS_ERRCOREID(x) ACA__REG__FIELD(x, 37, 32)
+#define ACA_REG_STATUS_ADDRLSB(x) ACA__REG__FIELD(x, 29, 24)
+#define ACA_REG_STATUS_ERRORCODEEXT(x) ACA__REG__FIELD(x, 21, 16)
+#define ACA_REG_STATUS_ERRORCODE(x) ACA__REG__FIELD(x, 15, 0)
+
+#define ACA_REG_IPID_MCATYPE(x) ACA__REG__FIELD(x, 63, 48)
+#define ACA_REG_IPID_INSTANCEIDHI(x) ACA__REG__FIELD(x, 47, 44)
+#define ACA_REG_IPID_HARDWAREID(x) ACA__REG__FIELD(x, 43, 32)
+#define ACA_REG_IPID_INSTANCEIDLO(x) ACA__REG__FIELD(x, 31, 0)
+
+#define ACA_REG_MISC0_VALID(x) ACA__REG__FIELD(x, 63, 63)
+#define ACA_REG_MISC0_OVRFLW(x) ACA__REG__FIELD(x, 48, 48)
+#define ACA_REG_MISC0_ERRCNT(x) ACA__REG__FIELD(x, 43, 32)
+
+#define ACA_REG_SYND_ERRORINFORMATION(x) ACA__REG__FIELD(x, 17, 0)
+
+/* NOTE: The following codes refers to the smu header file */
+#define ACA_EXTERROR_CODE_CE 0x3a
+#define ACA_EXTERROR_CODE_FAULT 0x3b
+
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+
+extern const struct ras_aca_ip_func ras_aca_func_v1_0;
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c
new file mode 100644
index 000000000000..94e6d7420d94
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_cmd.h"
+
+#define RAS_CMD_MAJOR_VERSION 6
+#define RAS_CMD_MINOR_VERSION 0
+#define RAS_CMD_VERSION (((RAS_CMD_MAJOR_VERSION) << 10) | (RAS_CMD_MINOR_VERSION))
+
+static int ras_cmd_add_device(struct ras_core_context *ras_core)
+{
+ INIT_LIST_HEAD(&ras_core->ras_cmd.head);
+ ras_core->ras_cmd.ras_core = ras_core;
+ ras_core->ras_cmd.dev_handle = (uintptr_t)ras_core ^ RAS_CMD_DEV_HANDLE_MAGIC;
+ return 0;
+}
+
+static int ras_cmd_remove_device(struct ras_core_context *ras_core)
+{
+ memset(&ras_core->ras_cmd, 0, sizeof(ras_core->ras_cmd));
+ return 0;
+}
+
+static int ras_get_block_ecc_info(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_block_ecc_info_req *input_data =
+ (struct ras_cmd_block_ecc_info_req *)cmd->input_buff_raw;
+ struct ras_cmd_block_ecc_info_rsp *output_data =
+ (struct ras_cmd_block_ecc_info_rsp *)cmd->output_buff_raw;
+ struct ras_ecc_count err_data;
+ int ret;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_block_ecc_info_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ memset(&err_data, 0, sizeof(err_data));
+ ret = ras_aca_get_block_ecc_count(ras_core, input_data->block_id, &err_data);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ output_data->ce_count = err_data.total_ce_count;
+ output_data->ue_count = err_data.total_ue_count;
+ output_data->de_count = err_data.total_de_count;
+
+ cmd->output_size = sizeof(struct ras_cmd_block_ecc_info_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static void ras_cmd_update_bad_page_info(struct ras_cmd_bad_page_record *ras_cmd_record,
+ struct eeprom_umc_record *record)
+{
+ ras_cmd_record->retired_page = record->cur_nps_retired_row_pfn;
+ ras_cmd_record->ts = record->ts;
+ ras_cmd_record->err_type = record->err_type;
+ ras_cmd_record->mem_channel = record->mem_channel;
+ ras_cmd_record->mcumc_id = record->mcumc_id;
+ ras_cmd_record->address = record->address;
+ ras_cmd_record->bank = record->bank;
+ ras_cmd_record->valid = 1;
+}
+
+static int ras_cmd_get_group_bad_pages(struct ras_core_context *ras_core,
+ uint32_t group_index, struct ras_cmd_bad_pages_info_rsp *output_data)
+{
+ struct eeprom_umc_record record;
+ struct ras_cmd_bad_page_record *ras_cmd_record;
+ uint32_t i = 0, bp_cnt = 0, group_cnt = 0;
+
+ output_data->bp_in_group = 0;
+ output_data->group_index = 0;
+
+ bp_cnt = ras_umc_get_badpage_count(ras_core);
+ if (bp_cnt) {
+ output_data->group_index = group_index;
+ group_cnt = bp_cnt / RAS_CMD_MAX_BAD_PAGES_PER_GROUP
+ + ((bp_cnt % RAS_CMD_MAX_BAD_PAGES_PER_GROUP) ? 1 : 0);
+
+ if (group_index >= group_cnt)
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ i = group_index * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (;
+ i < bp_cnt && output_data->bp_in_group < RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ i++) {
+ if (ras_umc_get_badpage_record(ras_core, i, &record))
+ return RAS_CMD__ERROR_GENERIC;
+
+ ras_cmd_record = &output_data->records[i % RAS_CMD_MAX_BAD_PAGES_PER_GROUP];
+
+ memset(ras_cmd_record, 0, sizeof(*ras_cmd_record));
+ ras_cmd_update_bad_page_info(ras_cmd_record, &record);
+ output_data->bp_in_group++;
+ }
+ }
+ output_data->bp_total_cnt = bp_cnt;
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_bad_pages(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_bad_pages_info_req *input_data =
+ (struct ras_cmd_bad_pages_info_req *)cmd->input_buff_raw;
+ struct ras_cmd_bad_pages_info_rsp *output_data =
+ (struct ras_cmd_bad_pages_info_rsp *)cmd->output_buff_raw;
+ int ret;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_bad_pages_info_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ret = ras_cmd_get_group_bad_pages(ras_core, input_data->group_index, output_data);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_bad_pages_info_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_clear_bad_page_info(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ if (cmd->input_size != sizeof(struct ras_cmd_dev_handle))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (ras_eeprom_reset_table(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ if (ras_umc_clean_badpage_data(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_reset_all_error_counts(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ if (cmd->input_size != sizeof(struct ras_cmd_dev_handle))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (ras_aca_clear_all_blocks_ecc_count(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ if (ras_umc_clear_logged_ecc(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_cper_snapshot(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_cper_snapshot_rsp *output_data =
+ (struct ras_cmd_cper_snapshot_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_cper_snapshot_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+
+ output_data->total_cper_num = overview.logged_batch_count;
+ output_data->start_cper_id = overview.first_batch_id;
+ output_data->latest_cper_id = overview.last_batch_id;
+
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_cper_snapshot_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_cper_records(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_cper_record_req *req =
+ (struct ras_cmd_cper_record_req *)cmd->input_buff_raw;
+ struct ras_cmd_cper_record_rsp *rsp =
+ (struct ras_cmd_cper_record_rsp *)cmd->output_buff_raw;
+ struct ras_log_info *trace[MAX_RECORD_PER_BATCH] = {0};
+ struct ras_log_batch_overview overview;
+ uint32_t offset = 0, real_data_len = 0;
+ uint64_t batch_id;
+ uint8_t *buffer;
+ int ret = 0, i, count;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_cper_record_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (!req->buf_size || !req->buf_ptr || !req->cper_num)
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ buffer = kzalloc(req->buf_size, GFP_KERNEL);
+ if (!buffer)
+ return RAS_CMD__ERROR_GENERIC;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+ for (i = 0; i < req->cper_num; i++) {
+ batch_id = req->cper_start_id + i;
+ if (batch_id >= overview.last_batch_id)
+ break;
+
+ count = ras_log_ring_get_batch_records(ras_core, batch_id, trace,
+ ARRAY_SIZE(trace));
+ if (count > 0) {
+ ret = ras_cper_generate_cper(ras_core, trace, count,
+ &buffer[offset], req->buf_size - offset, &real_data_len);
+ if (ret)
+ break;
+
+ offset += real_data_len;
+ }
+ }
+
+ if ((ret && (ret != -ENOMEM)) ||
+ copy_to_user(u64_to_user_ptr(req->buf_ptr), buffer, offset)) {
+ kfree(buffer);
+ return RAS_CMD__ERROR_GENERIC;
+ }
+
+ rsp->real_data_size = offset;
+ rsp->real_cper_num = i;
+ rsp->remain_num = (ret == -ENOMEM) ? (req->cper_num - i) : 0;
+ rsp->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_cper_record_rsp);
+
+ kfree(buffer);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_batch_trace_snapshot(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_batch_trace_snapshot_rsp *rsp =
+ (struct ras_cmd_batch_trace_snapshot_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+
+
+ if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_snapshot_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+
+ rsp->total_batch_num = overview.logged_batch_count;
+ rsp->start_batch_id = overview.first_batch_id;
+ rsp->latest_batch_id = overview.last_batch_id;
+ rsp->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_batch_trace_snapshot_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_batch_trace_records(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_batch_trace_record_req *input_data =
+ (struct ras_cmd_batch_trace_record_req *)cmd->input_buff_raw;
+ struct ras_cmd_batch_trace_record_rsp *output_data =
+ (struct ras_cmd_batch_trace_record_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+ struct ras_log_info *trace_arry[MAX_RECORD_PER_BATCH] = {0};
+ struct ras_log_info *record;
+ int i, j, count = 0, offset = 0;
+ uint64_t id;
+ bool completed = false;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_record_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if ((!input_data->batch_num) || (input_data->batch_num > RAS_CMD_MAX_BATCH_NUM))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+ if ((input_data->start_batch_id < overview.first_batch_id) ||
+ (input_data->start_batch_id >= overview.last_batch_id))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ for (i = 0; i < input_data->batch_num; i++) {
+ id = input_data->start_batch_id + i;
+ if (id >= overview.last_batch_id) {
+ completed = true;
+ break;
+ }
+
+ count = ras_log_ring_get_batch_records(ras_core,
+ id, trace_arry, ARRAY_SIZE(trace_arry));
+ if (count > 0) {
+ if ((offset + count) > RAS_CMD_MAX_TRACE_NUM)
+ break;
+ for (j = 0; j < count; j++) {
+ record = &output_data->records[offset + j];
+ record->seqno = trace_arry[j]->seqno;
+ record->timestamp = trace_arry[j]->timestamp;
+ record->event = trace_arry[j]->event;
+ memcpy(&record->aca_reg,
+ &trace_arry[j]->aca_reg, sizeof(trace_arry[j]->aca_reg));
+ }
+ } else {
+ count = 0;
+ }
+
+ output_data->batchs[i].batch_id = id;
+ output_data->batchs[i].offset = offset;
+ output_data->batchs[i].trace_num = count;
+ offset += count;
+ }
+
+ output_data->start_batch_id = input_data->start_batch_id;
+ output_data->real_batch_num = i;
+ output_data->remain_num = completed ? 0 : (input_data->batch_num - i);
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_batch_trace_record_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static enum ras_ta_block __get_ras_ta_block(enum ras_block_id block)
+{
+ switch (block) {
+ case RAS_BLOCK_ID__UMC:
+ return RAS_TA_BLOCK__UMC;
+ case RAS_BLOCK_ID__SDMA:
+ return RAS_TA_BLOCK__SDMA;
+ case RAS_BLOCK_ID__GFX:
+ return RAS_TA_BLOCK__GFX;
+ case RAS_BLOCK_ID__MMHUB:
+ return RAS_TA_BLOCK__MMHUB;
+ case RAS_BLOCK_ID__ATHUB:
+ return RAS_TA_BLOCK__ATHUB;
+ case RAS_BLOCK_ID__PCIE_BIF:
+ return RAS_TA_BLOCK__PCIE_BIF;
+ case RAS_BLOCK_ID__HDP:
+ return RAS_TA_BLOCK__HDP;
+ case RAS_BLOCK_ID__XGMI_WAFL:
+ return RAS_TA_BLOCK__XGMI_WAFL;
+ case RAS_BLOCK_ID__DF:
+ return RAS_TA_BLOCK__DF;
+ case RAS_BLOCK_ID__SMN:
+ return RAS_TA_BLOCK__SMN;
+ case RAS_BLOCK_ID__SEM:
+ return RAS_TA_BLOCK__SEM;
+ case RAS_BLOCK_ID__MP0:
+ return RAS_TA_BLOCK__MP0;
+ case RAS_BLOCK_ID__MP1:
+ return RAS_TA_BLOCK__MP1;
+ case RAS_BLOCK_ID__FUSE:
+ return RAS_TA_BLOCK__FUSE;
+ case RAS_BLOCK_ID__MCA:
+ return RAS_TA_BLOCK__MCA;
+ case RAS_BLOCK_ID__VCN:
+ return RAS_TA_BLOCK__VCN;
+ case RAS_BLOCK_ID__JPEG:
+ return RAS_TA_BLOCK__JPEG;
+ default:
+ return RAS_TA_BLOCK__UMC;
+ }
+}
+
+static enum ras_ta_error_type __get_ras_ta_err_type(enum ras_ecc_err_type error)
+{
+ switch (error) {
+ case RAS_ECC_ERR__NONE:
+ return RAS_TA_ERROR__NONE;
+ case RAS_ECC_ERR__PARITY:
+ return RAS_TA_ERROR__PARITY;
+ case RAS_ECC_ERR__SINGLE_CORRECTABLE:
+ return RAS_TA_ERROR__SINGLE_CORRECTABLE;
+ case RAS_ECC_ERR__MULTI_UNCORRECTABLE:
+ return RAS_TA_ERROR__MULTI_UNCORRECTABLE;
+ case RAS_ECC_ERR__POISON:
+ return RAS_TA_ERROR__POISON;
+ default:
+ return RAS_TA_ERROR__NONE;
+ }
+}
+
+static int ras_cmd_inject_error(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_inject_error_req *req =
+ (struct ras_cmd_inject_error_req *)cmd->input_buff_raw;
+ struct ras_cmd_inject_error_rsp *output_data =
+ (struct ras_cmd_inject_error_rsp *)cmd->output_buff_raw;
+ int ret = 0;
+ struct ras_ta_trigger_error_input block_info = {
+ .block_id = __get_ras_ta_block(req->block_id),
+ .sub_block_index = req->subblock_id,
+ .inject_error_type = __get_ras_ta_err_type(req->error_type),
+ .address = req->address,
+ .value = req->method,
+ };
+
+ ret = ras_psp_trigger_error(ras_core, &block_info, req->instance_mask);
+ if (!ret) {
+ output_data->version = 0;
+ output_data->address = block_info.address;
+ cmd->output_size = sizeof(struct ras_cmd_inject_error_rsp);
+ } else {
+ RAS_DEV_ERR(ras_core->dev, "ras inject block %u failed %d\n", req->block_id, ret);
+ ret = RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+ return ret;
+}
+
+static struct ras_cmd_func_map ras_cmd_maps[] = {
+ {RAS_CMD__INJECT_ERROR, ras_cmd_inject_error},
+ {RAS_CMD__GET_BLOCK_ECC_STATUS, ras_get_block_ecc_info},
+ {RAS_CMD__GET_BAD_PAGES, ras_cmd_get_bad_pages},
+ {RAS_CMD__CLEAR_BAD_PAGE_INFO, ras_cmd_clear_bad_page_info},
+ {RAS_CMD__RESET_ALL_ERROR_COUNTS, ras_cmd_reset_all_error_counts},
+ {RAS_CMD__GET_CPER_SNAPSHOT, ras_cmd_get_cper_snapshot},
+ {RAS_CMD__GET_CPER_RECORD, ras_cmd_get_cper_records},
+ {RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, ras_cmd_get_batch_trace_snapshot},
+ {RAS_CMD__GET_BATCH_TRACE_RECORD, ras_cmd_get_batch_trace_records},
+};
+
+int rascore_handle_cmd(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_func_map *ras_cmd = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ras_cmd_maps); i++) {
+ if (cmd->cmd_id == ras_cmd_maps[i].cmd_id) {
+ ras_cmd = &ras_cmd_maps[i];
+ break;
+ }
+ }
+
+ if (!ras_cmd)
+ return RAS_CMD__ERROR_UKNOWN_CMD;
+
+ return ras_cmd->func(ras_core, cmd, data);
+}
+
+int ras_cmd_init(struct ras_core_context *ras_core)
+{
+ return ras_cmd_add_device(ras_core);
+}
+
+int ras_cmd_fini(struct ras_core_context *ras_core)
+{
+ ras_cmd_remove_device(ras_core);
+ return 0;
+}
+
+int ras_cmd_query_interface_info(struct ras_core_context *ras_core,
+ struct ras_query_interface_info_rsp *rsp)
+{
+ rsp->ras_cmd_major_ver = RAS_CMD_MAJOR_VERSION;
+ rsp->ras_cmd_minor_ver = RAS_CMD_MINOR_VERSION;
+
+ return 0;
+}
+
+int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr)
+{
+ struct umc_bank_addr umc_bank = {0};
+ int ret;
+
+ ret = ras_umc_translate_soc_pa_and_bank(ras_core, &soc_pa, &umc_bank, false);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ bank_addr->stack_id = umc_bank.stack_id;
+ bank_addr->bank_group = umc_bank.bank_group;
+ bank_addr->bank = umc_bank.bank;
+ bank_addr->row = umc_bank.row;
+ bank_addr->column = umc_bank.column;
+ bank_addr->channel = umc_bank.channel;
+ bank_addr->subchannel = umc_bank.subchannel;
+
+ return 0;
+}
+
+int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa)
+{
+ struct umc_bank_addr umc_bank = {0};
+
+ umc_bank.stack_id = bank_addr.stack_id;
+ umc_bank.bank_group = bank_addr.bank_group;
+ umc_bank.bank = bank_addr.bank;
+ umc_bank.row = bank_addr.row;
+ umc_bank.column = bank_addr.column;
+ umc_bank.channel = bank_addr.channel;
+ umc_bank.subchannel = bank_addr.subchannel;
+
+ return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, &umc_bank, true);
+}
+
+uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_cmd.dev_handle;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
new file mode 100644
index 000000000000..48a0715eb821
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_CMD_H__
+#define __RAS_CMD_H__
+#include "ras.h"
+#include "ras_eeprom.h"
+#include "ras_log_ring.h"
+#include "ras_cper.h"
+
+#define RAS_CMD_DEV_HANDLE_MAGIC 0xFEEDAD00UL
+
+#define RAS_CMD_MAX_IN_SIZE 256
+#define RAS_CMD_MAX_GPU_NUM 32
+#define RAS_CMD_MAX_BAD_PAGES_PER_GROUP 32
+
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define RAS_TA_INST_MASK 0xfffff000
+#define RAS_TA_INST_SHIFT 0xc
+
+enum ras_cmd_interface_type {
+ RAS_CMD_INTERFACE_TYPE_NONE,
+ RAS_CMD_INTERFACE_TYPE_AMDGPU,
+ RAS_CMD_INTERFACE_TYPE_VF,
+ RAS_CMD_INTERFACE_TYPE_PF,
+};
+
+enum ras_cmd_id_range {
+ RAS_CMD_ID_COMMON_START = 0,
+ RAS_CMD_ID_COMMON_END = 0x10000,
+ RAS_CMD_ID_AMDGPU_START = RAS_CMD_ID_COMMON_END,
+ RAS_CMD_ID_AMDGPU_END = 0x20000,
+ RAS_CMD_ID_MXGPU_START = RAS_CMD_ID_AMDGPU_END,
+ RAS_CMD_ID_MXGPU_END = 0x30000,
+ RAS_CMD_ID_MXGPU_VF_START = RAS_CMD_ID_MXGPU_END,
+ RAS_CMD_ID_MXGPU_VF_END = 0x40000,
+};
+
+enum ras_cmd_id {
+ RAS_CMD__BEGIN = RAS_CMD_ID_COMMON_START,
+ RAS_CMD__QUERY_INTERFACE_INFO,
+ RAS_CMD__GET_DEVICES_INFO,
+ RAS_CMD__GET_BLOCK_ECC_STATUS,
+ RAS_CMD__INJECT_ERROR,
+ RAS_CMD__GET_BAD_PAGES,
+ RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ RAS_CMD__RESET_ALL_ERROR_COUNTS,
+ RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES,
+ RAS_CMD__TRANSLATE_FB_ADDRESS,
+ RAS_CMD__GET_LINK_TOPOLOGY,
+ RAS_CMD__GET_CPER_SNAPSHOT,
+ RAS_CMD__GET_CPER_RECORD,
+ RAS_CMD__GET_BATCH_TRACE_SNAPSHOT,
+ RAS_CMD__GET_BATCH_TRACE_RECORD,
+ RAS_CMD__SUPPORTED_MAX = RAS_CMD_ID_COMMON_END,
+};
+
+enum ras_cmd_response {
+ RAS_CMD__SUCCESS = 0,
+ RAS_CMD__SUCCESS_EXEED_BUFFER,
+ RAS_CMD__ERROR_UKNOWN_CMD,
+ RAS_CMD__ERROR_INVALID_CMD,
+ RAS_CMD__ERROR_VERSION,
+ RAS_CMD__ERROR_INVALID_INPUT_SIZE,
+ RAS_CMD__ERROR_INVALID_INPUT_DATA,
+ RAS_CMD__ERROR_DRV_INIT_FAIL,
+ RAS_CMD__ERROR_ACCESS_DENIED,
+ RAS_CMD__ERROR_GENERIC,
+ RAS_CMD__ERROR_TIMEOUT,
+};
+
+enum ras_error_type {
+ RAS_TYPE_ERROR__NONE = 0,
+ RAS_TYPE_ERROR__PARITY = 1,
+ RAS_TYPE_ERROR__SINGLE_CORRECTABLE = 2,
+ RAS_TYPE_ERROR__MULTI_UNCORRECTABLE = 4,
+ RAS_TYPE_ERROR__POISON = 8,
+};
+
+struct ras_core_context;
+struct ras_cmd_ctx;
+
+struct ras_cmd_mgr {
+ struct list_head head;
+ struct ras_core_context *ras_core;
+ uint64_t dev_handle;
+};
+
+struct ras_cmd_func_map {
+ uint32_t cmd_id;
+ int (*func)(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data);
+};
+
+struct ras_device_bdf {
+ union {
+ struct {
+ uint32_t function : 3;
+ uint32_t device : 5;
+ uint32_t bus : 8;
+ uint32_t domain : 16;
+ };
+ uint32_t u32_all;
+ };
+};
+
+struct ras_cmd_param {
+ uint32_t idx_vf;
+ void *data;
+};
+
+#pragma pack(push, 8)
+struct ras_cmd_ctx {
+ uint32_t magic;
+ union {
+ struct {
+ uint16_t ras_cmd_minor_ver : 10;
+ uint16_t ras_cmd_major_ver : 6;
+ };
+ uint16_t ras_cmd_ver;
+ };
+ union {
+ struct {
+ uint16_t plat_major_ver : 10;
+ uint16_t plat_minor_ver : 6;
+ };
+ uint16_t plat_ver;
+ };
+ uint32_t cmd_id;
+ uint32_t cmd_res;
+ uint32_t input_size;
+ uint32_t output_size;
+ uint32_t output_buf_size;
+ uint32_t reserved[5];
+ uint8_t input_buff_raw[RAS_CMD_MAX_IN_SIZE];
+ uint8_t output_buff_raw[];
+};
+
+struct ras_cmd_dev_handle {
+ uint64_t dev_handle;
+};
+
+struct ras_cmd_block_ecc_info_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t block_id;
+ uint32_t subblock_id;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_block_ecc_info_rsp {
+ uint32_t version;
+ uint32_t ce_count;
+ uint32_t ue_count;
+ uint32_t de_count;
+ uint32_t reserved[6];
+};
+
+struct ras_cmd_inject_error_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t block_id;
+ uint32_t subblock_id;
+ uint64_t address;
+ uint32_t error_type;
+ uint32_t instance_mask;
+ union {
+ struct {
+ /* vf index */
+ uint64_t vf_idx : 6;
+ /* method of error injection. i.e persistent, coherent etc */
+ uint64_t method : 10;
+ uint64_t rsv : 48;
+ };
+ uint64_t value;
+ };
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_inject_error_rsp {
+ uint32_t version;
+ uint32_t reserved[5];
+ uint64_t address;
+};
+
+struct ras_cmd_dev_info {
+ uint64_t dev_handle;
+ uint32_t location_id;
+ uint32_t ecc_enabled;
+ uint32_t ecc_supported;
+ uint32_t vf_num;
+ uint32_t asic_type;
+ uint32_t oam_id;
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_devices_info_rsp {
+ uint32_t version;
+ uint32_t dev_num;
+ uint32_t reserved[6];
+ struct ras_cmd_dev_info devs[RAS_CMD_MAX_GPU_NUM];
+};
+
+struct ras_cmd_bad_page_record {
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+ uint64_t retired_page;
+ uint64_t ts;
+
+ uint32_t err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+
+ unsigned char valid;
+ unsigned char reserved[8];
+};
+
+struct ras_cmd_bad_pages_info_req {
+ struct ras_cmd_dev_handle device;
+ uint32_t group_index;
+ uint32_t reserved[5];
+};
+
+struct ras_cmd_bad_pages_info_rsp {
+ uint32_t version;
+ uint32_t group_index;
+ uint32_t bp_in_group;
+ uint32_t bp_total_cnt;
+ uint32_t reserved[4];
+ struct ras_cmd_bad_page_record records[RAS_CMD_MAX_BAD_PAGES_PER_GROUP];
+};
+
+struct ras_query_interface_info_req {
+ uint32_t reserved[8];
+};
+
+struct ras_query_interface_info_rsp {
+ uint32_t version;
+ uint32_t ras_cmd_major_ver;
+ uint32_t ras_cmd_minor_ver;
+ uint32_t plat_major_ver;
+ uint32_t plat_minor_ver;
+ uint8_t interface_type;
+ uint8_t rsv[3];
+ uint32_t reserved[8];
+};
+
+#define RAS_MAX_NUM_SAFE_RANGES 64
+struct ras_cmd_ras_safe_fb_address_ranges_rsp {
+ uint32_t version;
+ uint32_t num_ranges;
+ uint32_t reserved[4];
+ struct {
+ uint64_t start;
+ uint64_t size;
+ uint32_t idx;
+ uint32_t reserved[3];
+ } range[RAS_MAX_NUM_SAFE_RANGES];
+};
+
+enum ras_fb_addr_type {
+ RAS_FB_ADDR_SOC_PHY, /* SPA */
+ RAS_FB_ADDR_BANK,
+ RAS_FB_ADDR_VF_PHY, /* GPA */
+ RAS_FB_ADDR_UNKNOWN
+};
+
+struct ras_fb_bank_addr {
+ uint32_t stack_id; /* SID */
+ uint32_t bank_group;
+ uint32_t bank;
+ uint32_t row;
+ uint32_t column;
+ uint32_t channel;
+ uint32_t subchannel; /* Also called Pseudochannel (PC) */
+ uint32_t reserved[3];
+};
+
+struct ras_fb_vf_phy_addr {
+ uint32_t vf_idx;
+ uint32_t reserved;
+ uint64_t addr;
+};
+
+union ras_translate_fb_address {
+ struct ras_fb_bank_addr bank_addr;
+ uint64_t soc_phy_addr;
+ struct ras_fb_vf_phy_addr vf_phy_addr;
+};
+
+struct ras_cmd_translate_fb_address_req {
+ struct ras_cmd_dev_handle dev;
+ enum ras_fb_addr_type src_addr_type;
+ enum ras_fb_addr_type dest_addr_type;
+ union ras_translate_fb_address trans_addr;
+};
+
+struct ras_cmd_translate_fb_address_rsp {
+ uint32_t version;
+ uint32_t reserved[5];
+ union ras_translate_fb_address trans_addr;
+};
+
+struct ras_dev_link_topology_req {
+ struct ras_cmd_dev_handle src;
+ struct ras_cmd_dev_handle dst;
+};
+
+struct ras_dev_link_topology_rsp {
+ uint32_t version;
+ uint32_t link_status; /* HW status of the link */
+ uint32_t link_type; /* type of the link */
+ uint32_t num_hops; /* number of hops */
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_cper_snapshot_req {
+ struct ras_cmd_dev_handle dev;
+};
+
+struct ras_cmd_cper_snapshot_rsp {
+ uint32_t version;
+ uint32_t reserved[4];
+ uint32_t total_cper_num;
+ uint64_t start_cper_id;
+ uint64_t latest_cper_id;
+};
+
+struct ras_cmd_cper_record_req {
+ struct ras_cmd_dev_handle dev;
+ uint64_t cper_start_id;
+ uint32_t cper_num;
+ uint32_t buf_size;
+ uint64_t buf_ptr;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_cper_record_rsp {
+ uint32_t version;
+ uint32_t real_data_size;
+ uint32_t real_cper_num;
+ uint32_t remain_num;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_batch_trace_snapshot_req {
+ struct ras_cmd_dev_handle dev;
+};
+
+struct ras_cmd_batch_trace_snapshot_rsp {
+ uint32_t version;
+ uint32_t reserved[4];
+ uint32_t total_batch_num;
+ uint64_t start_batch_id;
+ uint64_t latest_batch_id;
+};
+
+struct ras_cmd_batch_trace_record_req {
+ struct ras_cmd_dev_handle dev;
+ uint64_t start_batch_id;
+ uint32_t batch_num;
+ uint32_t reserved[5];
+};
+
+struct batch_ras_trace_info {
+ uint64_t batch_id;
+ uint16_t offset;
+ uint8_t trace_num;
+ uint8_t rsv;
+ uint32_t reserved;
+};
+
+#define RAS_CMD_MAX_BATCH_NUM 300
+#define RAS_CMD_MAX_TRACE_NUM 300
+struct ras_cmd_batch_trace_record_rsp {
+ uint32_t version;
+ uint16_t real_batch_num;
+ uint16_t remain_num;
+ uint64_t start_batch_id;
+ uint32_t reserved[2];
+ struct batch_ras_trace_info batchs[RAS_CMD_MAX_BATCH_NUM];
+ struct ras_log_info records[RAS_CMD_MAX_TRACE_NUM];
+};
+
+#pragma pack(pop)
+
+int ras_cmd_init(struct ras_core_context *ras_core);
+int ras_cmd_fini(struct ras_core_context *ras_core);
+int rascore_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data);
+uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core);
+int ras_cmd_query_interface_info(struct ras_core_context *ras_core,
+ struct ras_query_interface_info_rsp *rsp);
+int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr);
+int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core.c b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
new file mode 100644
index 000000000000..01122b55c98a
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+
+#define RAS_SEQNO_FIFO_SIZE (128 * sizeof(uint64_t))
+
+#define IS_LEAP_YEAR(x) ((x % 4 == 0 && x % 100 != 0) || x % 400 == 0)
+
+static const char * const ras_block_name[] = {
+ "umc",
+ "sdma",
+ "gfx",
+ "mmhub",
+ "athub",
+ "pcie_bif",
+ "hdp",
+ "xgmi_wafl",
+ "df",
+ "smn",
+ "sem",
+ "mp0",
+ "mp1",
+ "fuse",
+ "mca",
+ "vcn",
+ "jpeg",
+ "ih",
+ "mpio",
+};
+
+const char *ras_core_get_ras_block_name(enum ras_block_id block_id)
+{
+ if (block_id >= ARRAY_SIZE(ras_block_name))
+ return "";
+
+ return ras_block_name[block_id];
+}
+
+int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
+ uint64_t timestamp, struct ras_time *tm)
+{
+ int days_in_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+ uint64_t month = 0, day = 0, hour = 0, minute = 0, second = 0;
+ uint32_t year = 0;
+ int seconds_per_day = 24 * 60 * 60;
+ int seconds_per_hour = 60 * 60;
+ int seconds_per_minute = 60;
+ int days, remaining_seconds;
+
+ days = div64_u64_rem(timestamp, seconds_per_day, (uint64_t *)&remaining_seconds);
+
+ /* utc_timestamp follows the Unix epoch */
+ year = 1970;
+ while (days >= 365) {
+ if (IS_LEAP_YEAR(year)) {
+ if (days < 366)
+ break;
+ days -= 366;
+ } else {
+ days -= 365;
+ }
+ year++;
+ }
+
+ days_in_month[1] += IS_LEAP_YEAR(year);
+
+ month = 0;
+ while (days >= days_in_month[month]) {
+ days -= days_in_month[month];
+ month++;
+ }
+ month++;
+ day = days + 1;
+
+ if (remaining_seconds) {
+ hour = remaining_seconds / seconds_per_hour;
+ minute = (remaining_seconds % seconds_per_hour) / seconds_per_minute;
+ second = remaining_seconds % seconds_per_minute;
+ }
+
+ tm->tm_year = year;
+ tm->tm_mon = month;
+ tm->tm_mday = day;
+ tm->tm_hour = hour;
+ tm->tm_min = minute;
+ tm->tm_sec = second;
+
+ return 0;
+}
+
+bool ras_core_gpu_in_reset(struct ras_core_context *ras_core)
+{
+ uint32_t status = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->check_gpu_status)
+ ras_core->sys_fn->check_gpu_status(ras_core, &status);
+
+ return (status & RAS_GPU_STATUS__IN_RESET) ? true : false;
+}
+
+bool ras_core_gpu_is_vf(struct ras_core_context *ras_core)
+{
+ uint32_t status = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->check_gpu_status)
+ ras_core->sys_fn->check_gpu_status(ras_core, &status);
+
+ return (status & RAS_GPU_STATUS__IS_VF) ? true : false;
+}
+
+bool ras_core_gpu_is_rma(struct ras_core_context *ras_core)
+{
+ if (!ras_core)
+ return false;
+
+ return ras_core->is_rma;
+}
+
+static int ras_core_seqno_fifo_write(struct ras_core_context *ras_core,
+ enum ras_seqno_fifo fifo_type, uint64_t seqno)
+{
+ int ret = 0;
+ struct kfifo *seqno_fifo = NULL;
+
+ if (fifo_type == SEQNO_FIFO_POISON_CREATION)
+ seqno_fifo = &ras_core->de_seqno_fifo;
+ else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
+ seqno_fifo = &ras_core->consumption_seqno_fifo;
+
+ if (seqno_fifo)
+ ret = kfifo_in_spinlocked(seqno_fifo,
+ &seqno, sizeof(seqno), &ras_core->seqno_lock);
+
+ return ret ? 0 : -EINVAL;
+}
+
+static int ras_core_seqno_fifo_read(struct ras_core_context *ras_core,
+ enum ras_seqno_fifo fifo_type, uint64_t *seqno, bool pop)
+{
+ int ret = 0;
+ struct kfifo *seqno_fifo = NULL;
+
+ if (fifo_type == SEQNO_FIFO_POISON_CREATION)
+ seqno_fifo = &ras_core->de_seqno_fifo;
+ else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
+ seqno_fifo = &ras_core->consumption_seqno_fifo;
+
+ if (seqno_fifo) {
+ if (pop)
+ ret = kfifo_out_spinlocked(seqno_fifo,
+ seqno, sizeof(*seqno), &ras_core->seqno_lock);
+ else
+ ret = kfifo_out_peek(seqno_fifo, seqno, sizeof(*seqno));
+ }
+
+ return ret ? 0 : -EINVAL;
+}
+
+uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type type)
+{
+ uint64_t seqno = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->gen_seqno)
+ ras_core->sys_fn->gen_seqno(ras_core, type, &seqno);
+
+ return seqno;
+}
+
+int ras_core_put_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t seqno)
+{
+ int ret = 0;
+
+ if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
+ return -EINVAL;
+
+ if (seqno_type == RAS_SEQNO_TYPE_DE)
+ ret = ras_core_seqno_fifo_write(ras_core,
+ SEQNO_FIFO_POISON_CREATION, seqno);
+ else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
+ ret = ras_core_seqno_fifo_write(ras_core,
+ SEQNO_FIFO_POISON_CONSUMPTION, seqno);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, bool pop)
+{
+ uint64_t seq_no;
+ int ret = -ENODATA;
+
+ if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
+ return 0;
+
+ if (seqno_type == RAS_SEQNO_TYPE_DE)
+ ret = ras_core_seqno_fifo_read(ras_core,
+ SEQNO_FIFO_POISON_CREATION, &seq_no, pop);
+ else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
+ ret = ras_core_seqno_fifo_read(ras_core,
+ SEQNO_FIFO_POISON_CONSUMPTION, &seq_no, pop);
+
+ if (ret)
+ seq_no = ras_core_gen_seqno(ras_core, seqno_type);
+
+ return seq_no;
+}
+
+static int ras_core_eeprom_recovery(struct ras_core_context *ras_core)
+{
+ int count;
+ int ret;
+
+ count = ras_eeprom_get_record_count(ras_core);
+ if (!count)
+ return 0;
+
+ /* Avoid bad page to be loaded again after gpu reset */
+ if (ras_umc_get_saved_eeprom_count(ras_core) >= count)
+ return 0;
+
+ ret = ras_umc_load_bad_pages(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "ras_umc_load_bad_pages failed: %d\n", ret);
+ return ret;
+ }
+
+ ras_eeprom_sync_info(ras_core);
+
+ return ret;
+}
+
+struct ras_core_context *ras_core_create(struct ras_core_config *init_config)
+{
+ struct ras_core_context *ras_core;
+ struct ras_core_config *config;
+
+ ras_core = kzalloc(sizeof(*ras_core), GFP_KERNEL);
+ if (!ras_core)
+ return NULL;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ kfree(ras_core);
+ return NULL;
+ }
+
+ memcpy(config, init_config, sizeof(*config));
+ ras_core->config = config;
+
+ return ras_core;
+}
+
+void ras_core_destroy(struct ras_core_context *ras_core)
+{
+ if (ras_core)
+ kfree(ras_core->config);
+
+ kfree(ras_core);
+}
+
+int ras_core_sw_init(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ if (!ras_core->config) {
+ RAS_DEV_ERR(ras_core->dev, "No ras core config!\n");
+ return -EINVAL;
+ }
+
+ ras_core->sys_fn = ras_core->config->sys_fn;
+ if (!ras_core->sys_fn)
+ return -EINVAL;
+
+ ret = kfifo_alloc(&ras_core->de_seqno_fifo,
+ RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ ret = kfifo_alloc(&ras_core->consumption_seqno_fifo,
+ RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&ras_core->seqno_lock);
+
+ ret = ras_aca_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_umc_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_cmd_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_log_ring_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_psp_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ras_core_sw_fini(struct ras_core_context *ras_core)
+{
+ kfifo_free(&ras_core->de_seqno_fifo);
+ kfifo_free(&ras_core->consumption_seqno_fifo);
+
+ ras_psp_sw_fini(ras_core);
+ ras_log_ring_sw_fini(ras_core);
+ ras_cmd_fini(ras_core);
+ ras_umc_sw_fini(ras_core);
+ ras_aca_sw_fini(ras_core);
+
+ return 0;
+}
+
+int ras_core_hw_init(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ ras_core->ras_eeprom_supported =
+ ras_core->config->ras_eeprom_supported;
+
+ ras_core->poison_supported = ras_core->config->poison_supported;
+
+ ret = ras_psp_hw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_aca_hw_init(ras_core);
+ if (ret)
+ goto init_err1;
+
+ ret = ras_mp1_hw_init(ras_core);
+ if (ret)
+ goto init_err2;
+
+ ret = ras_nbio_hw_init(ras_core);
+ if (ret)
+ goto init_err3;
+
+ ret = ras_umc_hw_init(ras_core);
+ if (ret)
+ goto init_err4;
+
+ ret = ras_gfx_hw_init(ras_core);
+ if (ret)
+ goto init_err5;
+
+ ret = ras_eeprom_hw_init(ras_core);
+ if (ret)
+ goto init_err6;
+
+ ret = ras_core_eeprom_recovery(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to recovery ras core, ret:%d\n", ret);
+ goto init_err6;
+ }
+
+ ret = ras_eeprom_check_storage_status(ras_core);
+ if (ret)
+ goto init_err6;
+
+ ret = ras_process_init(ras_core);
+ if (ret)
+ goto init_err7;
+
+ ras_core->is_initialized = true;
+
+ return 0;
+
+init_err7:
+ ras_eeprom_hw_fini(ras_core);
+init_err6:
+ ras_gfx_hw_fini(ras_core);
+init_err5:
+ ras_umc_hw_fini(ras_core);
+init_err4:
+ ras_nbio_hw_fini(ras_core);
+init_err3:
+ ras_mp1_hw_fini(ras_core);
+init_err2:
+ ras_aca_hw_fini(ras_core);
+init_err1:
+ ras_psp_hw_fini(ras_core);
+ return ret;
+}
+
+int ras_core_hw_fini(struct ras_core_context *ras_core)
+{
+ ras_core->is_initialized = false;
+
+ ras_process_fini(ras_core);
+ ras_eeprom_hw_fini(ras_core);
+ ras_gfx_hw_fini(ras_core);
+ ras_nbio_hw_fini(ras_core);
+ ras_umc_hw_fini(ras_core);
+ ras_mp1_hw_fini(ras_core);
+ ras_aca_hw_fini(ras_core);
+ ras_psp_hw_fini(ras_core);
+
+ return 0;
+}
+
+bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data)
+{
+ return ras_nbio_handle_irq_error(ras_core, data);
+}
+
+int ras_core_handle_fatal_error(struct ras_core_context *ras_core)
+{
+ int ret = 0;
+
+ ras_aca_mark_fatal_flag(ras_core);
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__FATAL_ERROR_DETECTED, NULL);
+
+ return ret;
+}
+
+uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core)
+{
+ if (ras_core->ras_nbio.ip_func &&
+ ras_core->ras_nbio.ip_func->get_memory_partition_mode)
+ return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to get gpu memory nps mode!\n");
+ return 0;
+}
+
+int ras_core_update_ecc_info(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__CE, NULL);
+ if (!ret)
+ ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__UE, NULL);
+
+ return ret;
+}
+
+int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
+ enum ras_block_id block, struct ras_ecc_count *ecc_count)
+{
+ int ret;
+
+ if (!ecc_count || (block >= RAS_BLOCK_ID__LAST) || !ras_core)
+ return -EINVAL;
+
+ ret = ras_aca_get_block_ecc_count(ras_core, block, ecc_count);
+ if (!ret)
+ ras_aca_clear_block_new_ecc_count(ras_core, block);
+
+ return ret;
+}
+
+int ras_core_set_status(struct ras_core_context *ras_core, bool enable)
+{
+ ras_core->ras_core_enabled = enable;
+
+ return 0;
+}
+
+bool ras_core_is_enabled(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_core_enabled;
+}
+
+uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->get_utc_second_timestamp)
+ return ras_core->sys_fn->get_utc_second_timestamp(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to get system timestamp!\n");
+ return 0;
+}
+
+int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
+{
+ if (!ras_core || !soc_pa || !bank_addr)
+ return -EINVAL;
+
+ return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, bank_addr, bank_to_pa);
+}
+
+bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->detect_ras_interrupt)
+ return ras_core->sys_fn->detect_ras_interrupt(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to detect ras interrupt!\n");
+ return false;
+}
+
+int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem)
+ return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config get gpu memory API!\n");
+ return -EACCES;
+}
+
+int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem)
+ return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config put gpu memory API!!\n");
+ return -EACCES;
+}
+
+bool ras_core_is_ready(struct ras_core_context *ras_core)
+{
+ return ras_core ? ras_core->is_initialized : false;
+}
+
+bool ras_core_check_safety_watermark(struct ras_core_context *ras_core)
+{
+ return ras_eeprom_check_safety_watermark(ras_core);
+}
+
+int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ return ras_core->sys_fn->gpu_reset_lock(ras_core, true, true);
+
+ return 1;
+}
+
+void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ ras_core->sys_fn->gpu_reset_lock(ras_core, true, false);
+}
+
+void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ ras_core->sys_fn->gpu_reset_lock(ras_core, false, false);
+}
+
+int ras_core_event_notify(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->ras_notifier)
+ return ras_core->sys_fn->ras_notifier(ras_core, event_id, data);
+
+ return -RAS_CORE_NOT_SUPPORTED;
+}
+
+int ras_core_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->get_device_system_info)
+ return ras_core->sys_fn->get_device_system_info(ras_core, dev_info);
+
+ return -RAS_CORE_NOT_SUPPORTED;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h
new file mode 100644
index 000000000000..144fbe4ceb9a
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_CORE_STATUS_H__
+#define __RAS_CORE_STATUS_H__
+
+#define RAS_CORE_OK 0
+#define RAS_CORE_NOT_SUPPORTED 248
+#define RAS_CORE_FAIL_ERROR_QUERY 249
+#define RAS_CORE_FAIL_ERROR_INJECTION 250
+#define RAS_CORE_FAIL_FATAL_RECOVERY 251
+#define RAS_CORE_FAIL_POISON_CONSUMPTION 252
+#define RAS_CORE_FAIL_POISON_CREATION 253
+#define RAS_CORE_FAIL_NO_VALID_BANKS 254
+#define RAS_CORE_GPU_IN_MODE1_RESET 255
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.c b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c
new file mode 100644
index 000000000000..0fc7522b7ab6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+#include "ras_log_ring.h"
+#include "ras_cper.h"
+
+static const struct ras_cper_guid MCE = CPER_NOTIFY__MCE;
+static const struct ras_cper_guid CMC = CPER_NOTIFY__CMC;
+static const struct ras_cper_guid BOOT = BOOT__TYPE;
+
+static const struct ras_cper_guid CRASHDUMP = GPU__CRASHDUMP;
+static const struct ras_cper_guid RUNTIME = GPU__NONSTANDARD_ERROR;
+
+static void cper_get_timestamp(struct ras_core_context *ras_core,
+ struct ras_cper_timestamp *timestamp, uint64_t utc_second_timestamp)
+{
+ struct ras_time tm = {0};
+
+ ras_core_convert_timestamp_to_time(ras_core, utc_second_timestamp, &tm);
+ timestamp->seconds = tm.tm_sec;
+ timestamp->minutes = tm.tm_min;
+ timestamp->hours = tm.tm_hour;
+ timestamp->flag = 0;
+ timestamp->day = tm.tm_mday;
+ timestamp->month = tm.tm_mon;
+ timestamp->year = tm.tm_year % 100;
+ timestamp->century = tm.tm_year / 100;
+}
+
+static void fill_section_hdr(struct ras_core_context *ras_core,
+ struct cper_section_hdr *hdr, enum ras_cper_type type,
+ enum ras_cper_severity sev, struct ras_log_info *trace)
+{
+ struct device_system_info dev_info = {0};
+ char record_id[32];
+
+ hdr->signature[0] = 'C';
+ hdr->signature[1] = 'P';
+ hdr->signature[2] = 'E';
+ hdr->signature[3] = 'R';
+ hdr->revision = CPER_HDR__REV_1;
+ hdr->signature_end = 0xFFFFFFFF;
+ hdr->error_severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev);
+
+ hdr->valid_bits.platform_id = 1;
+ hdr->valid_bits.timestamp = 1;
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ cper_get_timestamp(ras_core, &hdr->timestamp, trace->timestamp);
+
+ snprintf(record_id, sizeof(record_id), "%d:%llX", dev_info.socket_id,
+ RAS_LOG_SEQNO_TO_BATCH_IDX(trace->seqno));
+ memcpy(hdr->record_id, record_id, 8);
+
+ snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
+ dev_info.vendor_id, dev_info.device_id);
+ /* pmfw version should be part of creator_id according to CPER spec */
+ snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID__AMDGPU);
+
+ switch (type) {
+ case RAS_CPER_TYPE_BOOT:
+ hdr->notify_type = BOOT;
+ break;
+ case RAS_CPER_TYPE_FATAL:
+ case RAS_CPER_TYPE_RMA:
+ hdr->notify_type = MCE;
+ break;
+ case RAS_CPER_TYPE_RUNTIME:
+ if (sev == RAS_CPER_SEV_NON_FATAL_CE)
+ hdr->notify_type = CMC;
+ else
+ hdr->notify_type = MCE;
+ break;
+ default:
+ RAS_DEV_ERR(ras_core->dev, "Unknown CPER Type\n");
+ break;
+ }
+}
+
+static int fill_section_descriptor(struct ras_core_context *ras_core,
+ struct cper_section_descriptor *descriptor,
+ enum ras_cper_severity sev,
+ struct ras_cper_guid sec_type,
+ uint32_t section_offset,
+ uint32_t section_length)
+{
+ struct device_system_info dev_info = {0};
+
+ descriptor->revision_minor = CPER_SEC__MINOR_REV_1;
+ descriptor->revision_major = CPER_SEC__MAJOR_REV_22;
+ descriptor->sec_offset = section_offset;
+ descriptor->sec_length = section_length;
+ descriptor->valid_bits.fru_text = 1;
+ descriptor->flag_bits.primary = 1;
+ descriptor->severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev);
+ descriptor->sec_type = sec_type;
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ snprintf(descriptor->fru_text, 20, "OAM%d", dev_info.socket_id);
+
+ if (sev == RAS_CPER_SEV_RMA)
+ descriptor->flag_bits.exceed_err_threshold = 1;
+
+ if (sev == RAS_CPER_SEV_NON_FATAL_UE)
+ descriptor->flag_bits.latent_err = 1;
+
+ return 0;
+}
+
+static int fill_section_fatal(struct ras_core_context *ras_core,
+ struct cper_section_fatal *fatal, struct ras_log_info *trace)
+{
+ fatal->data.reg_ctx_type = CPER_CTX_TYPE__CRASH;
+ fatal->data.reg_arr_size = sizeof(fatal->data.reg);
+
+ fatal->data.reg.status = trace->aca_reg.regs[RAS_CPER_ACA_REG_STATUS];
+ fatal->data.reg.addr = trace->aca_reg.regs[RAS_CPER_ACA_REG_ADDR];
+ fatal->data.reg.ipid = trace->aca_reg.regs[RAS_CPER_ACA_REG_IPID];
+ fatal->data.reg.synd = trace->aca_reg.regs[RAS_CPER_ACA_REG_SYND];
+
+ return 0;
+}
+
+static int fill_section_runtime(struct ras_core_context *ras_core,
+ struct cper_section_runtime *runtime, struct ras_log_info *trace,
+ enum ras_cper_severity sev)
+{
+ runtime->hdr.valid_bits.err_info_cnt = 1;
+ runtime->hdr.valid_bits.err_context_cnt = 1;
+
+ runtime->descriptor.error_type = RUNTIME;
+ runtime->descriptor.ms_chk_bits.err_type_valid = 1;
+ if (sev == RAS_CPER_SEV_RMA) {
+ runtime->descriptor.valid_bits.ms_chk = 1;
+ runtime->descriptor.ms_chk_bits.err_type = 1;
+ runtime->descriptor.ms_chk_bits.pcc = 1;
+ }
+
+ runtime->reg.reg_ctx_type = CPER_CTX_TYPE__CRASH;
+ runtime->reg.reg_arr_size = sizeof(runtime->reg.reg_dump);
+
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_CTL] = trace->aca_reg.regs[ACA_REG_IDX__CTL];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_STATUS] = trace->aca_reg.regs[ACA_REG_IDX__STATUS];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_ADDR] = trace->aca_reg.regs[ACA_REG_IDX__ADDR];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_MISC0] = trace->aca_reg.regs[ACA_REG_IDX__MISC0];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_CONFIG] = trace->aca_reg.regs[ACA_REG_IDX__CONFG];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_IPID] = trace->aca_reg.regs[ACA_REG_IDX__IPID];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_SYND] = trace->aca_reg.regs[ACA_REG_IDX__SYND];
+
+ return 0;
+}
+
+static int cper_generate_runtime_record(struct ras_core_context *ras_core,
+ struct cper_section_hdr *hdr, struct ras_log_info **trace_arr, uint32_t arr_num,
+ enum ras_cper_severity sev)
+{
+ struct cper_section_descriptor *descriptor;
+ struct cper_section_runtime *runtime;
+ int i;
+
+ fill_section_hdr(ras_core, hdr, RAS_CPER_TYPE_RUNTIME, sev, trace_arr[0]);
+ hdr->record_length = RAS_HDR_LEN + ((RAS_SEC_DESC_LEN + RAS_NONSTD_SEC_LEN) * arr_num);
+ hdr->sec_cnt = arr_num;
+ for (i = 0; i < arr_num; i++) {
+ descriptor = (struct cper_section_descriptor *)((uint8_t *)hdr +
+ RAS_SEC_DESC_OFFSET(i));
+ runtime = (struct cper_section_runtime *)((uint8_t *)hdr +
+ RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i));
+
+ fill_section_descriptor(ras_core, descriptor, sev, RUNTIME,
+ RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i),
+ sizeof(struct cper_section_runtime));
+ fill_section_runtime(ras_core, runtime, trace_arr[i], sev);
+ }
+
+ return 0;
+}
+
+static int cper_generate_fatal_record(struct ras_core_context *ras_core,
+ uint8_t *buffer, struct ras_log_info **trace_arr, uint32_t arr_num)
+{
+ struct ras_cper_fatal_record record = {0};
+ int i = 0;
+
+ for (i = 0; i < arr_num; i++) {
+ fill_section_hdr(ras_core, &record.hdr, RAS_CPER_TYPE_FATAL,
+ RAS_CPER_SEV_FATAL_UE, trace_arr[i]);
+ record.hdr.record_length = RAS_HDR_LEN + RAS_SEC_DESC_LEN + RAS_FATAL_SEC_LEN;
+ record.hdr.sec_cnt = 1;
+
+ fill_section_descriptor(ras_core, &record.descriptor, RAS_CPER_SEV_FATAL_UE,
+ CRASHDUMP, offsetof(struct ras_cper_fatal_record, fatal),
+ sizeof(struct cper_section_fatal));
+
+ fill_section_fatal(ras_core, &record.fatal, trace_arr[i]);
+
+ memcpy(buffer + (i * record.hdr.record_length),
+ &record, record.hdr.record_length);
+ }
+
+ return 0;
+}
+
+static int cper_get_record_size(enum ras_cper_type type, uint16_t section_count)
+{
+ int size = 0;
+
+ size += RAS_HDR_LEN;
+ size += (RAS_SEC_DESC_LEN * section_count);
+
+ switch (type) {
+ case RAS_CPER_TYPE_RUNTIME:
+ case RAS_CPER_TYPE_RMA:
+ size += (RAS_NONSTD_SEC_LEN * section_count);
+ break;
+ case RAS_CPER_TYPE_FATAL:
+ size += (RAS_FATAL_SEC_LEN * section_count);
+ size += (RAS_HDR_LEN * (section_count - 1));
+ break;
+ case RAS_CPER_TYPE_BOOT:
+ size += (RAS_BOOT_SEC_LEN * section_count);
+ break;
+ default:
+ /* should never reach here */
+ break;
+ }
+
+ return size;
+}
+
+static enum ras_cper_type cper_ras_log_event_to_cper_type(enum ras_log_event event)
+{
+ switch (event) {
+ case RAS_LOG_EVENT_UE:
+ return RAS_CPER_TYPE_FATAL;
+ case RAS_LOG_EVENT_DE:
+ case RAS_LOG_EVENT_CE:
+ case RAS_LOG_EVENT_POISON_CREATION:
+ case RAS_LOG_EVENT_POISON_CONSUMPTION:
+ return RAS_CPER_TYPE_RUNTIME;
+ case RAS_LOG_EVENT_RMA:
+ return RAS_CPER_TYPE_RMA;
+ default:
+ /* should never reach here */
+ return RAS_CPER_TYPE_RUNTIME;
+ }
+}
+
+int ras_cper_generate_cper(struct ras_core_context *ras_core,
+ struct ras_log_info **trace_list, uint32_t count,
+ uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len)
+{
+ uint8_t *buffer = buf;
+ uint64_t buf_size = buf_len;
+ int record_size, saved_size = 0;
+ struct cper_section_hdr *hdr;
+
+ /* All the batch traces share the same event */
+ record_size = cper_get_record_size(
+ cper_ras_log_event_to_cper_type(trace_list[0]->event), count);
+
+ if ((record_size + saved_size) > buf_size)
+ return -ENOMEM;
+
+ hdr = (struct cper_section_hdr *)(buffer + saved_size);
+
+ switch (trace_list[0]->event) {
+ case RAS_LOG_EVENT_RMA:
+ cper_generate_runtime_record(ras_core, hdr, trace_list, count, RAS_CPER_SEV_RMA);
+ break;
+ case RAS_LOG_EVENT_DE:
+ cper_generate_runtime_record(ras_core,
+ hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_UE);
+ break;
+ case RAS_LOG_EVENT_CE:
+ cper_generate_runtime_record(ras_core,
+ hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_CE);
+ break;
+ case RAS_LOG_EVENT_UE:
+ cper_generate_fatal_record(ras_core, buffer + saved_size, trace_list, count);
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev, "Unprocessed trace event: %d\n", trace_list[0]->event);
+ break;
+ }
+
+ saved_size += record_size;
+
+ *real_data_len = saved_size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.h b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h
new file mode 100644
index 000000000000..076c1883c1ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_CPER_H__
+#define __RAS_CPER_H__
+
+#define CPER_UUID_MAX_SIZE 16
+struct ras_cper_guid {
+ uint8_t b[CPER_UUID_MAX_SIZE];
+};
+
+#define CPER_GUID__INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+ ((struct ras_cper_guid) \
+ {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+#define CPER_HDR__REV_1 (0x100)
+#define CPER_SEC__MINOR_REV_1 (0x01)
+#define CPER_SEC__MAJOR_REV_22 (0x22)
+#define CPER_OAM_MAX_COUNT (8)
+
+#define CPER_CTX_TYPE__CRASH (1)
+#define CPER_CTX_TYPE__BOOT (9)
+
+#define CPER_CREATOR_ID__AMDGPU "amdgpu"
+
+#define CPER_NOTIFY__MCE \
+ CPER_GUID__INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
+#define CPER_NOTIFY__CMC \
+ CPER_GUID__INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
+#define BOOT__TYPE \
+ CPER_GUID__INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
+
+#define GPU__CRASHDUMP \
+ CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \
+ 0x72, 0x5F, 0xD6, 0xAE)
+#define GPU__NONSTANDARD_ERROR \
+ CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \
+ 0x17, 0x80, 0x55, 0x1D)
+#define PROC_ERR__SECTION_TYPE \
+ CPER_GUID__INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
+
+enum ras_cper_type {
+ RAS_CPER_TYPE_RUNTIME,
+ RAS_CPER_TYPE_FATAL,
+ RAS_CPER_TYPE_BOOT,
+ RAS_CPER_TYPE_RMA,
+};
+
+enum ras_cper_severity {
+ RAS_CPER_SEV_NON_FATAL_UE = 0,
+ RAS_CPER_SEV_FATAL_UE = 1,
+ RAS_CPER_SEV_NON_FATAL_CE = 2,
+ RAS_CPER_SEV_RMA = 3,
+
+ RAS_CPER_SEV_UNUSED = 10,
+};
+
+enum ras_cper_aca_reg {
+ RAS_CPER_ACA_REG_CTL = 0,
+ RAS_CPER_ACA_REG_STATUS = 1,
+ RAS_CPER_ACA_REG_ADDR = 2,
+ RAS_CPER_ACA_REG_MISC0 = 3,
+ RAS_CPER_ACA_REG_CONFIG = 4,
+ RAS_CPER_ACA_REG_IPID = 5,
+ RAS_CPER_ACA_REG_SYND = 6,
+ RAS_CPER_ACA_REG_DESTAT = 8,
+ RAS_CPER_ACA_REG_DEADDR = 9,
+ RAS_CPER_ACA_REG_MASK = 10,
+
+ RAS_CPER_ACA_REG_COUNT = 16,
+};
+
+#pragma pack(push, 1)
+
+struct ras_cper_timestamp {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t hours;
+ uint8_t flag;
+ uint8_t day;
+ uint8_t month;
+ uint8_t year;
+ uint8_t century;
+};
+
+struct cper_section_hdr {
+ char signature[4]; /* "CPER" */
+ uint16_t revision;
+ uint32_t signature_end; /* 0xFFFFFFFF */
+ uint16_t sec_cnt;
+ enum ras_cper_severity error_severity;
+ union {
+ struct {
+ uint32_t platform_id : 1;
+ uint32_t timestamp : 1;
+ uint32_t partition_id : 1;
+ uint32_t reserved : 29;
+ } valid_bits;
+ uint32_t valid_mask;
+ };
+ uint32_t record_length; /* Total size of CPER Entry */
+ struct ras_cper_timestamp timestamp;
+ char platform_id[16];
+ struct ras_cper_guid partition_id; /* Reserved */
+ char creator_id[16];
+ struct ras_cper_guid notify_type; /* CMC, MCE */
+ char record_id[8]; /* Unique CPER Entry ID */
+ uint32_t flags; /* Reserved */
+ uint64_t persistence_info; /* Reserved */
+ uint8_t reserved[12]; /* Reserved */
+};
+
+struct cper_section_descriptor {
+ uint32_t sec_offset; /* Offset from the start of CPER entry */
+ uint32_t sec_length;
+ uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */
+ uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */
+ union {
+ struct {
+ uint8_t fru_id : 1;
+ uint8_t fru_text : 1;
+ uint8_t reserved : 6;
+ } valid_bits;
+ uint8_t valid_mask;
+ };
+ uint8_t reserved;
+ union {
+ struct {
+ uint32_t primary : 1;
+ uint32_t reserved1 : 2;
+ uint32_t exceed_err_threshold : 1;
+ uint32_t latent_err : 1;
+ uint32_t reserved2 : 27;
+ } flag_bits;
+ uint32_t flag_mask;
+ };
+ struct ras_cper_guid sec_type;
+ char fru_id[16];
+ enum ras_cper_severity severity;
+ char fru_text[20];
+};
+
+struct runtime_hdr {
+ union {
+ struct {
+ uint64_t apic_id : 1;
+ uint64_t fw_id : 1;
+ uint64_t err_info_cnt : 6;
+ uint64_t err_context_cnt : 6;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ uint64_t apic_id;
+ char fw_id[48];
+};
+
+struct runtime_descriptor {
+ struct ras_cper_guid error_type;
+ union {
+ struct {
+ uint64_t ms_chk : 1;
+ uint64_t target_addr_id : 1;
+ uint64_t req_id : 1;
+ uint64_t resp_id : 1;
+ uint64_t instr_ptr : 1;
+ uint64_t reserved : 59;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ union {
+ struct {
+ uint64_t err_type_valid : 1;
+ uint64_t pcc_valid : 1;
+ uint64_t uncorr_valid : 1;
+ uint64_t precise_ip_valid : 1;
+ uint64_t restartable_ip_valid : 1;
+ uint64_t overflow_valid : 1;
+ uint64_t reserved1 : 10;
+ uint64_t err_type : 2;
+ uint64_t pcc : 1;
+ uint64_t uncorr : 1;
+ uint64_t precised_ip : 1;
+ uint64_t restartable_ip : 1;
+ uint64_t overflow : 1;
+ uint64_t reserved2 : 41;
+ } ms_chk_bits;
+ uint64_t ms_chk_mask;
+ };
+ uint64_t target_addr_id;
+ uint64_t req_id;
+ uint64_t resp_id;
+ uint64_t instr_ptr;
+};
+
+struct runtime_error_reg {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t msr_addr;
+ uint64_t mm_reg_addr;
+ uint64_t reg_dump[RAS_CPER_ACA_REG_COUNT];
+};
+
+struct cper_section_runtime {
+ struct runtime_hdr hdr;
+ struct runtime_descriptor descriptor;
+ struct runtime_error_reg reg;
+};
+
+struct crashdump_hdr {
+ uint64_t reserved1;
+ uint64_t reserved2;
+ char fw_id[48];
+ uint64_t reserved3[8];
+};
+
+struct fatal_reg_info {
+ uint64_t status;
+ uint64_t addr;
+ uint64_t ipid;
+ uint64_t synd;
+};
+
+struct crashdump_fatal {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ struct fatal_reg_info reg;
+};
+
+struct crashdump_boot {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ uint64_t msg[CPER_OAM_MAX_COUNT];
+};
+
+struct cper_section_fatal {
+ struct crashdump_hdr hdr;
+ struct crashdump_fatal data;
+};
+
+struct cper_section_boot {
+ struct crashdump_hdr hdr;
+ struct crashdump_boot data;
+};
+
+struct ras_cper_fatal_record {
+ struct cper_section_hdr hdr;
+ struct cper_section_descriptor descriptor;
+ struct cper_section_fatal fatal;
+};
+#pragma pack(pop)
+
+#define RAS_HDR_LEN (sizeof(struct cper_section_hdr))
+#define RAS_SEC_DESC_LEN (sizeof(struct cper_sec_desc))
+
+#define RAS_BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot))
+#define RAS_FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal))
+#define RAS_NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err))
+
+#define RAS_SEC_DESC_OFFSET(idx) (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * idx))
+
+#define RAS_BOOT_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_BOOT_SEC_LEN * idx))
+#define RAS_FATAL_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_FATAL_SEC_LEN * idx))
+#define RAS_NONSTD_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_NONSTD_SEC_LEN * idx))
+
+struct ras_core_context;
+struct ras_log_info;
+int ras_cper_generate_cper(struct ras_core_context *ras_core,
+ struct ras_log_info **trace_list, uint32_t count,
+ uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c
new file mode 100644
index 000000000000..cd6b057bdaf3
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras_eeprom.h"
+#include "ras.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+
+/*
+ * The 2 macros bellow represent the actual size in bytes that
+ * those entities occupy in the EEPROM memory.
+ * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_umc_record) which
+ * uses uint64 to store 6b fields such as retired_page.
+ */
+#define RAS_TABLE_HEADER_SIZE 20
+#define RAS_TABLE_RECORD_SIZE 24
+
+/* Table hdr is 'AMDR' */
+#define RAS_TABLE_HDR_VAL 0x414d4452
+
+/* Bad GPU tag ‘BADG’ */
+#define RAS_TABLE_HDR_BAD 0x42414447
+
+/*
+ * EEPROM Table structure v1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* Assume 2-Mbit size EEPROM and take up the whole space. */
+#define RAS_TBL_SIZE_BYTES (256 * 1024)
+#define RAS_TABLE_START 0
+#define RAS_HDR_START RAS_TABLE_START
+#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE)
+#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/*
+ * EEPROM Table structrue v2.1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE RAS INFO |
+ * | (available info size 4 Bytes) |
+ * | ( reserved size 252 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* EEPROM Table V2_1 */
+#define RAS_TABLE_V2_1_INFO_SIZE 256
+#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE
+#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \
+ RAS_TABLE_V2_1_INFO_SIZE)
+#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
+ * offset off of RAS_TABLE_START. That is, this is something you can
+ * add to control->i2c_address, and then tell I2C layer to read
+ * from/write to there. _N is the so called absolute index,
+ * because it starts right after the table header.
+ */
+#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \
+ (_N) * RAS_TABLE_RECORD_SIZE)
+
+#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \
+ (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE)
+
+/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off
+ * of "fri", return the absolute record index off of the end of
+ * the table header.
+ */
+#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \
+ (_C)->ras_max_record_count)
+
+#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define to_ras_core_context(x) (container_of(x, struct ras_core_context, ras_eeprom))
+
+static bool __is_ras_eeprom_supported(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_eeprom_supported;
+}
+
+static bool __get_eeprom_i2c_addr(struct ras_core_context *ras_core,
+ struct ras_eeprom_control *control)
+{
+ int ret = -EINVAL;
+
+ if (control->sys_func &&
+ control->sys_func->update_eeprom_i2c_config)
+ ret = control->sys_func->update_eeprom_i2c_config(ras_core);
+ else
+ RAS_DEV_WARN(ras_core->dev,
+ "No eeprom i2c system config!\n");
+
+ return !ret ? true : false;
+}
+
+static int __ras_eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int ret;
+
+ if (control->sys_func && control->sys_func->eeprom_i2c_xfer) {
+ ret = control->sys_func->eeprom_i2c_xfer(ras_core,
+ eeprom_addr, eeprom_buf, buf_size, read);
+
+ if ((ret > 0) && !read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+
+ return ret;
+ }
+
+ RAS_DEV_ERR(ras_core->dev, "Error: No eeprom i2c system xfer function!\n");
+ return -EINVAL;
+}
+
+static int __eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ u16 limit;
+ u16 ps; /* Partial size */
+ int res = 0, r;
+
+ if (read)
+ limit = ras_core->ras_eeprom.max_read_len;
+ else
+ limit = ras_core->ras_eeprom.max_write_len;
+
+ if (limit && (limit <= EEPROM_OFFSET_SIZE)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
+ eeprom_addr, buf_size,
+ read ? "read" : "write", EEPROM_OFFSET_SIZE);
+ return -EINVAL;
+ }
+
+ ras_core_down_gpu_reset_lock(ras_core);
+
+ if (limit == 0) {
+ res = __ras_eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, buf_size, read);
+ } else {
+ /* The "limit" includes all data bytes sent/received,
+ * which would include the EEPROM_OFFSET_SIZE bytes.
+ * Account for them here.
+ */
+ limit -= EEPROM_OFFSET_SIZE;
+ for ( ; buf_size > 0;
+ buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
+ ps = (buf_size < limit) ? buf_size : limit;
+
+ r = __ras_eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, ps, read);
+ if (r < 0)
+ break;
+
+ res += r;
+ }
+ }
+
+ ras_core_up_gpu_reset_lock(ras_core);
+
+ return res;
+}
+
+static int __eeprom_read(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 bytes)
+{
+ return __eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, bytes, true);
+}
+
+static int __eeprom_write(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 bytes)
+{
+ return __eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, bytes, false);
+}
+
+static void
+__encode_table_header_to_buf(struct ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ pp[0] = cpu_to_le32(hdr->header);
+ pp[1] = cpu_to_le32(hdr->version);
+ pp[2] = cpu_to_le32(hdr->first_rec_offset);
+ pp[3] = cpu_to_le32(hdr->tbl_size);
+ pp[4] = cpu_to_le32(hdr->checksum);
+}
+
+static void
+__decode_table_header_from_buf(struct ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ hdr->header = le32_to_cpu(pp[0]);
+ hdr->version = le32_to_cpu(pp[1]);
+ hdr->first_rec_offset = le32_to_cpu(pp[2]);
+ hdr->tbl_size = le32_to_cpu(pp[3]);
+ hdr->checksum = le32_to_cpu(pp[4]);
+}
+
+static int __write_table_header(struct ras_eeprom_control *control)
+{
+ u8 buf[RAS_TABLE_HEADER_SIZE];
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int res;
+
+ memset(buf, 0, sizeof(buf));
+ __encode_table_header_to_buf(&control->tbl_hdr, buf);
+
+ /* i2c may be unstable in gpu reset */
+ res = __eeprom_write(ras_core,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to write EEPROM table header:%d\n", res);
+ } else if (res < RAS_TABLE_HEADER_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Short write:%d out of %d\n", res, RAS_TABLE_HEADER_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static void
+__encode_table_ras_info_to_buf(struct ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = ((uint32_t)(rai->rma_status) & 0xFF) |
+ (((uint32_t)(rai->health_percent) << 8) & 0xFF00) |
+ (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000);
+ pp[0] = cpu_to_le32(tmp);
+}
+
+static void
+__decode_table_ras_info_from_buf(struct ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = le32_to_cpu(pp[0]);
+ rai->rma_status = tmp & 0xFF;
+ rai->health_percent = (tmp >> 8) & 0xFF;
+ rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF;
+}
+
+static int __write_table_ras_info(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u8 *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to alloc buf to write table ras info\n");
+ return -ENOMEM;
+ }
+
+ __encode_table_ras_info_to_buf(&control->tbl_rai, buf);
+
+ /* i2c may be unstable in gpu reset */
+ res = __eeprom_write(ras_core,
+ control->i2c_address +
+ control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to write EEPROM table ras info:%d\n", res);
+ } else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Short write:%d out of %d\n", res, RAS_TABLE_V2_1_INFO_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ kfree(buf);
+
+ return res;
+}
+
+static u8 __calc_hdr_byte_sum(const struct ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ u32 sz;
+
+ /* Header checksum, skip checksum field in the calculation */
+ sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum);
+ pp = (u8 *) &control->tbl_hdr;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static u8 __calc_ras_info_byte_sum(const struct ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ u32 sz;
+
+ sz = sizeof(control->tbl_rai);
+ pp = (u8 *) &control->tbl_rai;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static int ras_eeprom_correct_header_tag(
+ struct ras_eeprom_control *control,
+ uint32_t header)
+{
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ u8 *hh;
+ int res;
+ u8 csum;
+
+ csum = -hdr->checksum;
+
+ hh = (void *) &hdr->header;
+ csum -= (hh[0] + hh[1] + hh[2] + hh[3]);
+ hh = (void *) &header;
+ csum += hh[0] + hh[1] + hh[2] + hh[3];
+ csum = -csum;
+ mutex_lock(&control->ras_tbl_mutex);
+ hdr->header = header;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void ras_set_eeprom_table_version(struct ras_eeprom_control *control)
+{
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+
+ hdr->version = RAS_TABLE_VER_V3;
+}
+
+int ras_eeprom_reset_table(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ u8 csum;
+ int res;
+
+ mutex_lock(&control->ras_tbl_mutex);
+
+ hdr->header = RAS_TABLE_HDR_VAL;
+ ras_set_eeprom_table_version(control);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = RAS_GPU_HEALTH_USABLE;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = control->record_threshold_count;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+
+ control->ras_num_recs = 0;
+ control->ras_fri = 0;
+
+ control->bad_channel_bitmap = 0;
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ &control->ras_num_recs);
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ &control->bad_channel_bitmap);
+ control->update_channel_flag = false;
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void
+__encode_table_record_to_buf(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ buf[i++] = record->err_type;
+
+ buf[i++] = record->bank;
+
+ tmp = cpu_to_le64(record->ts);
+ memcpy(buf + i, &tmp, 8);
+ i += 8;
+
+ tmp = cpu_to_le64((record->offset & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+ i += 6;
+
+ buf[i++] = record->mem_channel;
+ buf[i++] = record->mcumc_id;
+
+ tmp = cpu_to_le64((record->retired_row_pfn & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+}
+
+static void
+__decode_table_record_from_buf(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ record->err_type = buf[i++];
+
+ record->bank = buf[i++];
+
+ memcpy(&tmp, buf + i, 8);
+ record->ts = le64_to_cpu(tmp);
+ i += 8;
+
+ memcpy(&tmp, buf + i, 6);
+ record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
+ i += 6;
+
+ record->mem_channel = buf[i++];
+ record->mcumc_id = buf[i++];
+
+ memcpy(&tmp, buf + i, 6);
+ record->retired_row_pfn = (le64_to_cpu(tmp) & 0xffffffffffff);
+}
+
+bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ bool ret = false;
+ int bad_page_count;
+
+ if (!__is_ras_eeprom_supported(ras_core) ||
+ !control->record_threshold_config)
+ return false;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) {
+ if (bad_page_count > control->record_threshold_count)
+ RAS_DEV_WARN(ras_core->dev, "RAS records:%d exceed threshold:%d",
+ bad_page_count, control->record_threshold_count);
+
+ if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) ||
+ (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n");
+ ret = false;
+ } else {
+ ras_core->is_rma = true;
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consider adjusting the customized threshold.\n");
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * __ras_eeprom_write -- write indexed from buffer to EEPROM
+ * @control: pointer to control structure
+ * @buf: pointer to buffer containing data to write
+ * @fri: start writing at this index
+ * @num: number of records to write
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __ras_eeprom_write(struct ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = __eeprom_write(ras_core,
+ control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Writing %d EEPROM table records error:%d\n", num, res);
+ } else if (res < buf_size) {
+ /* Short write, return error.*/
+ RAS_DEV_ERR(ras_core->dev,
+ "Wrote %d records out of %d\n",
+ (res/RAS_TABLE_RECORD_SIZE), num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static int ras_eeprom_append_table(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ const u32 num)
+{
+ u32 a, b, i;
+ u8 *buf, *pp;
+ int res;
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Encode all of them in one go.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __encode_table_record_to_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ control->update_channel_flag = true;
+ }
+ }
+
+ /* a, first record index to write into.
+ * b, last record index to write into.
+ * a = first index to read (fri) + number of records in the table,
+ * b = a + @num - 1.
+ * Let N = control->ras_max_num_record_count, then we have,
+ * case 0: 0 <= a <= b < N,
+ * just append @num records starting at a;
+ * case 1: 0 <= a < N <= b,
+ * append (N - a) records starting at a, and
+ * append the remainder, b % N + 1, starting at 0.
+ * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases,
+ * case 2a: 0 <= a <= b < N
+ * append num records starting at a; and fix fri if b overwrote it,
+ * and since a <= b, if b overwrote it then a must've also,
+ * and if b didn't overwrite it, then a didn't also.
+ * case 2b: 0 <= b < a < N
+ * write num records starting at a, which wraps around 0=N
+ * and overwrite fri unconditionally. Now from case 2a,
+ * this means that b eclipsed fri to overwrite it and wrap
+ * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally
+ * set fri = b + 1 (mod N).
+ * Now, since fri is updated in every case, except the trivial case 0,
+ * the number of records present in the table after writing, is,
+ * num_recs - 1 = b - fri (mod N), and we take the positive value,
+ * by adding an arbitrary multiple of N before taking the modulo N
+ * as shown below.
+ */
+ a = control->ras_fri + control->ras_num_recs;
+ b = a + num - 1;
+ if (b < control->ras_max_record_count) {
+ res = __ras_eeprom_write(control, buf, a, num);
+ } else if (a < control->ras_max_record_count) {
+ u32 g0, g1;
+
+ g0 = control->ras_max_record_count - a;
+ g1 = b % control->ras_max_record_count + 1;
+ res = __ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE,
+ 0, g1);
+ if (res)
+ goto Out;
+ if (g1 > control->ras_fri)
+ control->ras_fri = g1 % control->ras_max_record_count;
+ } else {
+ a %= control->ras_max_record_count;
+ b %= control->ras_max_record_count;
+
+ if (a <= b) {
+ /* Note that, b - a + 1 = num. */
+ res = __ras_eeprom_write(control, buf, a, num);
+ if (res)
+ goto Out;
+ if (b >= control->ras_fri)
+ control->ras_fri = (b + 1) % control->ras_max_record_count;
+ } else {
+ u32 g0, g1;
+
+ /* b < a, which means, we write from
+ * a to the end of the table, and from
+ * the start of the table to b.
+ */
+ g0 = control->ras_max_record_count - a;
+ g1 = b + 1;
+ res = __ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1);
+ if (res)
+ goto Out;
+ control->ras_fri = g1 % control->ras_max_record_count;
+ }
+ }
+ control->ras_num_recs = 1 +
+ (control->ras_max_record_count + b - control->ras_fri)
+ % control->ras_max_record_count;
+Out:
+ kfree(buf);
+ return res;
+}
+
+static int ras_eeprom_update_header(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int threshold_config = control->record_threshold_config;
+ u8 *buf, *pp, csum;
+ u32 buf_size;
+ int bad_page_count;
+ int res;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ /* Modify the header if it exceeds.
+ */
+ if (threshold_config != 0 &&
+ bad_page_count > control->record_threshold_count) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ bad_page_count, control->record_threshold_count);
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = RAS_GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
+
+ if ((threshold_config != WARN_NONSTOP_OVER_THRESHOLD) &&
+ (threshold_config != NONSTOP_OVER_THRESHOLD))
+ ras_core->is_rma = true;
+
+ /* ignore the -ENOTSUPP return value */
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__DEVICE_RMA, NULL);
+ }
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ control->tbl_hdr.checksum = 0;
+
+ buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
+ res = -ENOMEM;
+ goto Out;
+ }
+
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ control->ras_record_offset,
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "EEPROM failed reading records:%d\n", res);
+ goto Out;
+ } else if (res < buf_size) {
+ RAS_DEV_ERR(ras_core->dev,
+ "EEPROM read %d out of %d bytes\n", res, buf_size);
+ res = -EIO;
+ goto Out;
+ }
+
+ /**
+ * bad page records have been stored in eeprom,
+ * now calculate gpu health percent
+ */
+ if (threshold_config != 0 &&
+ control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
+ bad_page_count <= control->record_threshold_count)
+ control->tbl_rai.health_percent = ((control->record_threshold_count -
+ bad_page_count) * 100) / control->record_threshold_count;
+
+ /* Recalc the checksum.
+ */
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+
+ csum += __calc_hdr_byte_sum(control);
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ /* avoid sign extension when assigning to "checksum" */
+ csum = -csum;
+ control->tbl_hdr.checksum = csum;
+ res = __write_table_header(control);
+ if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+Out:
+ kfree(buf);
+ return res;
+}
+
+/**
+ * ras_core_eeprom_append -- append records to the EEPROM RAS table
+ * @control: pointer to control structure
+ * @record: array of records to append
+ * @num: number of records in @record array
+ *
+ * Append @num records to the table, calculate the checksum and write
+ * the table back to EEPROM. The maximum number of records that
+ * can be appended is between 1 and control->ras_max_record_count,
+ * regardless of how many records are already stored in the table.
+ *
+ * Return 0 on success or if EEPROM is not supported, -errno on error.
+ */
+int ras_eeprom_append(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, const u32 num)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int res;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (num == 0) {
+ RAS_DEV_ERR(ras_core->dev, "will not append 0 records\n");
+ return -EINVAL;
+ } else if ((num + control->ras_num_recs) > control->ras_max_record_count) {
+ RAS_DEV_ERR(ras_core->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = ras_eeprom_append_table(control, record, num);
+ if (!res)
+ res = ras_eeprom_update_header(control);
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+/**
+ * __ras_eeprom_read -- read indexed from EEPROM into buffer
+ * @control: pointer to control structure
+ * @buf: pointer to buffer to read into
+ * @fri: first record index, start reading at this index, absolute index
+ * @num: number of records to read
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __ras_eeprom_read(struct ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Reading %d EEPROM table records error:%d\n", num, res);
+ } else if (res < buf_size) {
+ /* Short read, return error.
+ */
+ RAS_DEV_ERR(ras_core->dev,
+ "Read %d records out of %d\n",
+ (res/RAS_TABLE_RECORD_SIZE), num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+int ras_eeprom_read(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, const u32 num)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int i, res;
+ u8 *buf, *pp;
+ u32 g0, g1;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (num == 0) {
+ RAS_DEV_ERR(ras_core->dev, "will not read 0 records\n");
+ return -EINVAL;
+ } else if (num > control->ras_num_recs) {
+ RAS_DEV_ERR(ras_core->dev,
+ "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
+ return -EINVAL;
+ }
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Determine how many records to read, from the first record
+ * index, fri, to the end of the table, and from the beginning
+ * of the table, such that the total number of records is
+ * @num, and we handle wrap around when fri > 0 and
+ * fri + num > RAS_MAX_RECORD_COUNT.
+ *
+ * First we compute the index of the last element
+ * which would be fetched from each region,
+ * g0 is in [fri, fri + num - 1], and
+ * g1 is in [0, RAS_MAX_RECORD_COUNT - 1].
+ * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of
+ * the last element to fetch, we set g0 to _the number_
+ * of elements to fetch, @num, since we know that the last
+ * indexed to be fetched does not exceed the table.
+ *
+ * If, however, g0 >= RAS_MAX_RECORD_COUNT, then
+ * we set g0 to the number of elements to read
+ * until the end of the table, and g1 to the number of
+ * elements to read from the beginning of the table.
+ */
+ g0 = control->ras_fri + num - 1;
+ g1 = g0 % control->ras_max_record_count;
+ if (g0 < control->ras_max_record_count) {
+ g0 = num;
+ g1 = 0;
+ } else {
+ g0 = control->ras_max_record_count - control->ras_fri;
+ g1 += 1;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = __ras_eeprom_read(control, buf, control->ras_fri, g0);
+ if (res)
+ goto Out;
+ if (g1) {
+ res = __ras_eeprom_read(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1);
+ if (res)
+ goto Out;
+ }
+
+ res = 0;
+
+ /* Read up everything? Then transform.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __decode_table_record_from_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ control->update_channel_flag = true;
+ }
+ }
+Out:
+ kfree(buf);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+
+ /* get available eeprom table version first before eeprom table init */
+ ras_set_eeprom_table_version(control);
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ return RAS_MAX_RECORD_COUNT_V2_1;
+ else
+ return RAS_MAX_RECORD_COUNT;
+}
+
+/**
+ * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum
+ * @control: pointer to control structure
+ *
+ * Check the checksum of the stored in EEPROM RAS table.
+ *
+ * Return 0 if the checksum is correct,
+ * positive if it is not correct, and
+ * -errno on I/O error.
+ */
+static int __verify_ras_table_checksum(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int buf_size, res;
+ u8 csum, *buf, *pp;
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Out of memory checking RAS table checksum.\n");
+ return -ENOMEM;
+ }
+
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, buf_size);
+ if (res < buf_size) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Partial read for checksum, res:%d\n", res);
+ /* On partial reads, return -EIO.
+ */
+ if (res >= 0)
+ res = -EIO;
+ goto Out;
+ }
+
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+Out:
+ kfree(buf);
+ return res < 0 ? res : csum;
+}
+
+static int __read_table_ras_info(struct ras_eeprom_control *control)
+{
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ unsigned char *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
+ return -ENOMEM;
+ }
+
+ /**
+ * EEPROM table V2_1 supports ras info,
+ * read EEPROM table ras info
+ */
+ res = __eeprom_read(ras_core,
+ control->i2c_address + control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+ if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to read EEPROM table ras info, res:%d\n", res);
+ res = res >= 0 ? -EIO : res;
+ goto Out;
+ }
+
+ __decode_table_ras_info_from_buf(rai, buf);
+
+Out:
+ kfree(buf);
+ return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
+}
+
+static int __check_ras_table_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 };
+ struct ras_eeprom_table_header *hdr;
+ int res;
+
+ hdr = &control->tbl_hdr;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (!__get_eeprom_i2c_addr(ras_core, control))
+ return -EINVAL;
+
+ control->ras_header_offset = RAS_HDR_START;
+ control->ras_info_offset = RAS_TABLE_V2_1_INFO_START;
+ mutex_init(&control->ras_tbl_mutex);
+
+ /* Read the table header from EEPROM address */
+ res = __eeprom_read(ras_core,
+ control->i2c_address + control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+ if (res < RAS_TABLE_HEADER_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to read EEPROM table header, res:%d\n", res);
+ return res >= 0 ? -EIO : res;
+ }
+
+ __decode_table_header_from_buf(hdr, buf);
+
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ RAS_DEV_INFO(ras_core->dev, "Creating a new EEPROM table");
+ return ras_eeprom_reset_table(ras_core);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
+ control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ break;
+ case RAS_TABLE_VER_V1:
+ control->ras_num_recs = RAS_NUM_RECS(hdr);
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
+ }
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+
+ return 0;
+}
+
+int ras_eeprom_check_storage_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_header *hdr;
+ int bad_page_count;
+ int res = 0;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (!__get_eeprom_i2c_addr(ras_core, control))
+ return -EINVAL;
+
+ hdr = &control->tbl_hdr;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ if (hdr->header == RAS_TABLE_HDR_VAL) {
+ RAS_DEV_INFO(ras_core->dev,
+ "Found existing EEPROM table with %d records\n",
+ bad_page_count);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS table incorrect checksum or error:%d\n", res);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * bad_page_count >= 9 * control->record_threshold_count)
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS records:%u exceeds 90%% of threshold:%d\n",
+ bad_page_count,
+ control->record_threshold_count);
+
+ } else if (hdr->header == RAS_TABLE_HDR_BAD &&
+ control->record_threshold_config != 0) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS Table incorrect checksum or error:%d\n", res);
+
+ if (control->record_threshold_count >= bad_page_count) {
+ /* This means that, the threshold was increased since
+ * the last time the system was booted, and now,
+ * ras->record_threshold_count - control->num_recs > 0,
+ * so that at least one more record can be saved,
+ * before the page count threshold is reached.
+ */
+ RAS_DEV_INFO(ras_core->dev,
+ "records:%d threshold:%d, resetting RAS table header signature",
+ bad_page_count,
+ control->record_threshold_count);
+ res = ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL);
+ } else {
+ RAS_DEV_ERR(ras_core->dev, "RAS records:%d exceed threshold:%d",
+ bad_page_count, control->record_threshold_count);
+ if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) ||
+ (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ res = 0;
+ } else {
+ ras_core->is_rma = true;
+ RAS_DEV_ERR(ras_core->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+ }
+ }
+
+ return res < 0 ? res : 0;
+}
+
+int ras_eeprom_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+ struct ras_eeprom_config *eeprom_cfg;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ ras_core->is_rma = false;
+
+ control = &ras_core->ras_eeprom;
+
+ memset(control, 0, sizeof(*control));
+
+ eeprom_cfg = &ras_core->config->eeprom_cfg;
+ control->record_threshold_config =
+ eeprom_cfg->eeprom_record_threshold_config;
+
+ control->record_threshold_count = ras_eeprom_max_record_count(ras_core);
+ if (eeprom_cfg->eeprom_record_threshold_count <
+ control->record_threshold_count)
+ control->record_threshold_count =
+ eeprom_cfg->eeprom_record_threshold_count;
+
+ control->sys_func = eeprom_cfg->eeprom_sys_fn;
+ control->max_read_len = eeprom_cfg->max_i2c_read_len;
+ control->max_write_len = eeprom_cfg->max_i2c_write_len;
+ control->i2c_adapter = eeprom_cfg->eeprom_i2c_adapter;
+ control->i2c_port = eeprom_cfg->eeprom_i2c_port;
+ control->i2c_address = eeprom_cfg->eeprom_i2c_addr;
+
+ control->update_channel_flag = false;
+
+ return __check_ras_table_status(ras_core);
+}
+
+int ras_eeprom_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ control = &ras_core->ras_eeprom;
+ mutex_destroy(&control->ras_tbl_mutex);
+
+ return 0;
+}
+
+uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core)
+{
+ if (!ras_core)
+ return 0;
+
+ return ras_core->ras_eeprom.ras_num_recs;
+}
+
+void ras_eeprom_sync_info(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+
+ if (!ras_core)
+ return;
+
+ control = &ras_core->ras_eeprom;
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ &control->ras_num_recs);
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ &control->bad_channel_bitmap);
+}
+
+enum ras_gpu_health_status
+ ras_eeprom_check_gpu_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+
+ if (!__is_ras_eeprom_supported(ras_core) ||
+ !control->record_threshold_config)
+ return RAS_GPU_HEALTH_NONE;
+
+ if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD)
+ return RAS_GPU_IN_BAD_STATUS;
+
+ return rai->rma_status;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h
new file mode 100644
index 000000000000..2abe566c18b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_EEPROM_H__
+#define __RAS_EEPROM_H__
+#include "ras_sys.h"
+
+#define RAS_TABLE_VER_V1 0x00010000
+#define RAS_TABLE_VER_V2_1 0x00021000
+#define RAS_TABLE_VER_V3 0x00030000
+
+#define NONSTOP_OVER_THRESHOLD -2
+#define WARN_NONSTOP_OVER_THRESHOLD -1
+#define DISABLE_RETIRE_PAGE 0
+
+/*
+ * Bad address pfn : eeprom_umc_record.retired_row_pfn[39:0],
+ * nps mode: eeprom_umc_record.retired_row_pfn[47:40]
+ */
+#define EEPROM_RECORD_UMC_ADDR_MASK 0xFFFFFFFFFFULL
+#define EEPROM_RECORD_UMC_NPS_MASK 0xFF0000000000ULL
+#define EEPROM_RECORD_UMC_NPS_SHIFT 40
+
+#define EEPROM_RECORD_UMC_NPS_MODE(RECORD) \
+ (((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_NPS_MASK) >> \
+ EEPROM_RECORD_UMC_NPS_SHIFT)
+
+#define EEPROM_RECORD_UMC_ADDR_PFN(RECORD) \
+ ((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_ADDR_MASK)
+
+#define EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(RECORD, ADDR, NPS) \
+do { \
+ uint64_t tmp = (NPS); \
+ tmp = ((tmp << EEPROM_RECORD_UMC_NPS_SHIFT) & EEPROM_RECORD_UMC_NPS_MASK); \
+ tmp |= (ADDR) & EEPROM_RECORD_UMC_ADDR_MASK; \
+ (RECORD)->retired_row_pfn = tmp; \
+} while (0)
+
+enum ras_gpu_health_status {
+ RAS_GPU_HEALTH_NONE = 0,
+ RAS_GPU_HEALTH_USABLE = 1,
+ RAS_GPU_RETIRED__ECC_REACH_THRESHOLD = 2,
+ RAS_GPU_IN_BAD_STATUS = 3,
+};
+
+enum ras_eeprom_err_type {
+ RAS_EEPROM_ERR_NA,
+ RAS_EEPROM_ERR_RECOVERABLE,
+ RAS_EEPROM_ERR_NON_RECOVERABLE,
+ RAS_EEPROM_ERR_COUNT,
+};
+
+struct ras_eeprom_table_header {
+ uint32_t header;
+ uint32_t version;
+ uint32_t first_rec_offset;
+ uint32_t tbl_size;
+ uint32_t checksum;
+} __packed;
+
+struct ras_eeprom_table_ras_info {
+ u8 rma_status;
+ u8 health_percent;
+ u16 ecc_page_threshold;
+ u32 padding[64 - 1];
+} __packed;
+
+struct ras_eeprom_control {
+ struct ras_eeprom_table_header tbl_hdr;
+ struct ras_eeprom_table_ras_info tbl_rai;
+
+ /* record threshold */
+ int record_threshold_config;
+ uint32_t record_threshold_count;
+ bool update_channel_flag;
+
+ const struct ras_eeprom_sys_func *sys_func;
+ void *i2c_adapter;
+ u32 i2c_port;
+ u16 max_read_len;
+ u16 max_write_len;
+
+ /* Base I2C EEPPROM 19-bit memory address,
+ * where the table is located. For more information,
+ * see top of amdgpu_eeprom.c.
+ */
+ u32 i2c_address;
+
+ /* The byte offset off of @i2c_address
+ * where the table header is found,
+ * and where the records start--always
+ * right after the header.
+ */
+ u32 ras_header_offset;
+ u32 ras_info_offset;
+ u32 ras_record_offset;
+
+ /* Number of records in the table.
+ */
+ u32 ras_num_recs;
+
+ /* First record index to read, 0-based.
+ * Range is [0, num_recs-1]. This is
+ * an absolute index, starting right after
+ * the table header.
+ */
+ u32 ras_fri;
+
+ /* Maximum possible number of records
+ * we could store, i.e. the maximum capacity
+ * of the table.
+ */
+ u32 ras_max_record_count;
+
+ /* Protect table access via this mutex.
+ */
+ struct mutex ras_tbl_mutex;
+
+ /* Record channel info which occurred bad pages
+ */
+ u32 bad_channel_bitmap;
+};
+
+/*
+ * Represents single table record. Packed to be easily serialized into byte
+ * stream.
+ */
+struct eeprom_umc_record {
+
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+
+ uint64_t retired_row_pfn;
+ uint64_t ts;
+
+ enum ras_eeprom_err_type err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+
+ /* The following variables will not be saved to eeprom.
+ */
+ uint64_t cur_nps_retired_row_pfn;
+ uint32_t cur_nps_bank;
+ uint32_t cur_nps;
+};
+
+struct ras_core_context;
+int ras_eeprom_hw_init(struct ras_core_context *ras_core);
+int ras_eeprom_hw_fini(struct ras_core_context *ras_core);
+
+int ras_eeprom_reset_table(struct ras_core_context *ras_core);
+
+bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core);
+
+int ras_eeprom_read(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, const u32 num);
+
+int ras_eeprom_append(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, const u32 num);
+
+uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core);
+uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core);
+void ras_eeprom_sync_info(struct ras_core_context *ras_core);
+
+int ras_eeprom_check_storage_status(struct ras_core_context *ras_core);
+enum ras_gpu_health_status
+ ras_eeprom_check_gpu_status(struct ras_core_context *ras_core);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c
new file mode 100644
index 000000000000..f5ce28777705
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_gfx_v9_0.h"
+#include "ras_gfx.h"
+#include "ras_core_status.h"
+
+static const struct ras_gfx_ip_func *ras_gfx_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ return &gfx_ras_func_v9_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "GFX ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock)
+{
+ struct ras_gfx *gfx = &ras_core->ras_gfx;
+
+ return gfx->ip_func->get_ta_subblock(ras_core,
+ error_type, subblock, ta_subblock);
+}
+
+int ras_gfx_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_gfx *gfx = &ras_core->ras_gfx;
+
+ gfx->gfx_ip_version = ras_core->config->gfx_ip_version;
+
+ gfx->ip_func = ras_gfx_get_ip_funcs(ras_core, gfx->gfx_ip_version);
+
+ return gfx->ip_func ? RAS_CORE_OK : -EINVAL;
+}
+
+int ras_gfx_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h
new file mode 100644
index 000000000000..8a42d69fb0ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_GFX_H__
+#define __RAS_GFX_H__
+
+struct ras_gfx_ip_func {
+ int (*get_ta_subblock)(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock);
+};
+
+struct ras_gfx {
+ uint32_t gfx_ip_version;
+ const struct ras_gfx_ip_func *ip_func;
+};
+
+int ras_gfx_hw_init(struct ras_core_context *ras_core);
+int ras_gfx_hw_fini(struct ras_core_context *ras_core);
+
+int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock);
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c
new file mode 100644
index 000000000000..6213d3f125be
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_gfx_v9_0.h"
+#include "ras_core_status.h"
+
+enum ta_gfx_v9_subblock {
+ /*CPC*/
+ TA_GFX_V9__GFX_CPC_INDEX_START = 0,
+ TA_GFX_V9__GFX_CPC_SCRATCH = TA_GFX_V9__GFX_CPC_INDEX_START,
+ TA_GFX_V9__GFX_CPC_UCODE,
+ TA_GFX_V9__GFX_DC_STATE_ME1,
+ TA_GFX_V9__GFX_DC_CSINVOC_ME1,
+ TA_GFX_V9__GFX_DC_RESTORE_ME1,
+ TA_GFX_V9__GFX_DC_STATE_ME2,
+ TA_GFX_V9__GFX_DC_CSINVOC_ME2,
+ TA_GFX_V9__GFX_DC_RESTORE_ME2,
+ TA_GFX_V9__GFX_CPC_INDEX_END = TA_GFX_V9__GFX_DC_RESTORE_ME2,
+ /* CPF*/
+ TA_GFX_V9__GFX_CPF_INDEX_START,
+ TA_GFX_V9__GFX_CPF_ROQ_ME2 = TA_GFX_V9__GFX_CPF_INDEX_START,
+ TA_GFX_V9__GFX_CPF_ROQ_ME1,
+ TA_GFX_V9__GFX_CPF_TAG,
+ TA_GFX_V9__GFX_CPF_INDEX_END = TA_GFX_V9__GFX_CPF_TAG,
+ /* CPG*/
+ TA_GFX_V9__GFX_CPG_INDEX_START,
+ TA_GFX_V9__GFX_CPG_DMA_ROQ = TA_GFX_V9__GFX_CPG_INDEX_START,
+ TA_GFX_V9__GFX_CPG_DMA_TAG,
+ TA_GFX_V9__GFX_CPG_TAG,
+ TA_GFX_V9__GFX_CPG_INDEX_END = TA_GFX_V9__GFX_CPG_TAG,
+ /* GDS*/
+ TA_GFX_V9__GFX_GDS_INDEX_START,
+ TA_GFX_V9__GFX_GDS_MEM = TA_GFX_V9__GFX_GDS_INDEX_START,
+ TA_GFX_V9__GFX_GDS_INPUT_QUEUE,
+ TA_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ TA_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ TA_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ TA_GFX_V9__GFX_GDS_INDEX_END = TA_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ /* SPI*/
+ TA_GFX_V9__GFX_SPI_SR_MEM,
+ /* SQ*/
+ TA_GFX_V9__GFX_SQ_INDEX_START,
+ TA_GFX_V9__GFX_SQ_SGPR = TA_GFX_V9__GFX_SQ_INDEX_START,
+ TA_GFX_V9__GFX_SQ_LDS_D,
+ TA_GFX_V9__GFX_SQ_LDS_I,
+ TA_GFX_V9__GFX_SQ_VGPR, /* VGPR = SP*/
+ TA_GFX_V9__GFX_SQ_INDEX_END = TA_GFX_V9__GFX_SQ_VGPR,
+ /* SQC (3 ranges)*/
+ TA_GFX_V9__GFX_SQC_INDEX_START,
+ /* SQC range 0*/
+ TA_GFX_V9__GFX_SQC_INDEX0_START = TA_GFX_V9__GFX_SQC_INDEX_START,
+ TA_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO =
+ TA_GFX_V9__GFX_SQC_INDEX0_START,
+ TA_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_INDEX0_END =
+ TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1*/
+ TA_GFX_V9__GFX_SQC_INDEX1_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM =
+ TA_GFX_V9__GFX_SQC_INDEX1_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX1_END =
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2*/
+ TA_GFX_V9__GFX_SQC_INDEX2_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM =
+ TA_GFX_V9__GFX_SQC_INDEX2_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX2_END =
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX_END = TA_GFX_V9__GFX_SQC_INDEX2_END,
+ /* TA*/
+ TA_GFX_V9__GFX_TA_INDEX_START,
+ TA_GFX_V9__GFX_TA_FS_DFIFO = TA_GFX_V9__GFX_TA_INDEX_START,
+ TA_GFX_V9__GFX_TA_FS_AFIFO,
+ TA_GFX_V9__GFX_TA_FL_LFIFO,
+ TA_GFX_V9__GFX_TA_FX_LFIFO,
+ TA_GFX_V9__GFX_TA_FS_CFIFO,
+ TA_GFX_V9__GFX_TA_INDEX_END = TA_GFX_V9__GFX_TA_FS_CFIFO,
+ /* TCA*/
+ TA_GFX_V9__GFX_TCA_INDEX_START,
+ TA_GFX_V9__GFX_TCA_HOLE_FIFO = TA_GFX_V9__GFX_TCA_INDEX_START,
+ TA_GFX_V9__GFX_TCA_REQ_FIFO,
+ TA_GFX_V9__GFX_TCA_INDEX_END = TA_GFX_V9__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges)*/
+ TA_GFX_V9__GFX_TCC_INDEX_START,
+ /* TCC range 0*/
+ TA_GFX_V9__GFX_TCC_INDEX0_START = TA_GFX_V9__GFX_TCC_INDEX_START,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA = TA_GFX_V9__GFX_TCC_INDEX0_START,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1,
+ TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0,
+ TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1,
+ TA_GFX_V9__GFX_TCC_HIGH_RATE_TAG,
+ TA_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ TA_GFX_V9__GFX_TCC_INDEX0_END = TA_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1*/
+ TA_GFX_V9__GFX_TCC_INDEX1_START,
+ TA_GFX_V9__GFX_TCC_IN_USE_DEC = TA_GFX_V9__GFX_TCC_INDEX1_START,
+ TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ TA_GFX_V9__GFX_TCC_INDEX1_END =
+ TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2*/
+ TA_GFX_V9__GFX_TCC_INDEX2_START,
+ TA_GFX_V9__GFX_TCC_RETURN_DATA = TA_GFX_V9__GFX_TCC_INDEX2_START,
+ TA_GFX_V9__GFX_TCC_RETURN_CONTROL,
+ TA_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO,
+ TA_GFX_V9__GFX_TCC_WRITE_RETURN,
+ TA_GFX_V9__GFX_TCC_WRITE_CACHE_READ,
+ TA_GFX_V9__GFX_TCC_SRC_FIFO,
+ TA_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ TA_GFX_V9__GFX_TCC_INDEX2_END =
+ TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3*/
+ TA_GFX_V9__GFX_TCC_INDEX3_START,
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO = TA_GFX_V9__GFX_TCC_INDEX3_START,
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ TA_GFX_V9__GFX_TCC_INDEX3_END =
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4*/
+ TA_GFX_V9__GFX_TCC_INDEX4_START,
+ TA_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ TA_GFX_V9__GFX_TCC_INDEX4_START,
+ TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_GFX_V9__GFX_TCC_INDEX4_END =
+ TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_GFX_V9__GFX_TCC_INDEX_END = TA_GFX_V9__GFX_TCC_INDEX4_END,
+ /* TCI*/
+ TA_GFX_V9__GFX_TCI_WRITE_RAM,
+ /* TCP*/
+ TA_GFX_V9__GFX_TCP_INDEX_START,
+ TA_GFX_V9__GFX_TCP_CACHE_RAM = TA_GFX_V9__GFX_TCP_INDEX_START,
+ TA_GFX_V9__GFX_TCP_LFIFO_RAM,
+ TA_GFX_V9__GFX_TCP_CMD_FIFO,
+ TA_GFX_V9__GFX_TCP_VM_FIFO,
+ TA_GFX_V9__GFX_TCP_DB_RAM,
+ TA_GFX_V9__GFX_TCP_UTCL1_LFIFO0,
+ TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ TA_GFX_V9__GFX_TCP_INDEX_END = TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ /* TD*/
+ TA_GFX_V9__GFX_TD_INDEX_START,
+ TA_GFX_V9__GFX_TD_SS_FIFO_LO = TA_GFX_V9__GFX_TD_INDEX_START,
+ TA_GFX_V9__GFX_TD_SS_FIFO_HI,
+ TA_GFX_V9__GFX_TD_CS_FIFO,
+ TA_GFX_V9__GFX_TD_INDEX_END = TA_GFX_V9__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges)*/
+ TA_GFX_V9__GFX_EA_INDEX_START,
+ /* EA range 0*/
+ TA_GFX_V9__GFX_EA_INDEX0_START = TA_GFX_V9__GFX_EA_INDEX_START,
+ TA_GFX_V9__GFX_EA_DRAMRD_CMDMEM = TA_GFX_V9__GFX_EA_INDEX0_START,
+ TA_GFX_V9__GFX_EA_DRAMWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_DRAMWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_RRET_TAGMEM,
+ TA_GFX_V9__GFX_EA_WRET_TAGMEM,
+ TA_GFX_V9__GFX_EA_GMIRD_CMDMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_INDEX0_END = TA_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1*/
+ TA_GFX_V9__GFX_EA_INDEX1_START,
+ TA_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = TA_GFX_V9__GFX_EA_INDEX1_START,
+ TA_GFX_V9__GFX_EA_DRAMWR_PAGEMEM,
+ TA_GFX_V9__GFX_EA_IORD_CMDMEM,
+ TA_GFX_V9__GFX_EA_IOWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_IOWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_GMIRD_PAGEMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ TA_GFX_V9__GFX_EA_INDEX1_END = TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2*/
+ TA_GFX_V9__GFX_EA_INDEX2_START,
+ TA_GFX_V9__GFX_EA_MAM_D0MEM = TA_GFX_V9__GFX_EA_INDEX2_START,
+ TA_GFX_V9__GFX_EA_MAM_D1MEM,
+ TA_GFX_V9__GFX_EA_MAM_D2MEM,
+ TA_GFX_V9__GFX_EA_MAM_D3MEM,
+ TA_GFX_V9__GFX_EA_INDEX2_END = TA_GFX_V9__GFX_EA_MAM_D3MEM,
+ TA_GFX_V9__GFX_EA_INDEX_END = TA_GFX_V9__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank*/
+ TA_GFX_V9__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker*/
+ TA_GFX_V9__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache*/
+ TA_GFX_V9__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache*/
+ TA_GFX_V9__UTC_ATCL2_CACHE_4K_BANK,
+ TA_GFX_V9__GFX_MAX
+};
+
+struct ras_gfx_subblock_t {
+ unsigned char *name;
+ int ta_subblock;
+ int hw_supported_error_type;
+ int sw_supported_error_type;
+};
+
+#define RAS_GFX_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
+ [RAS_GFX_V9__##subblock] = { \
+ #subblock, \
+ TA_GFX_V9__##subblock, \
+ ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
+ (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
+ }
+
+const struct ras_gfx_subblock_t ras_gfx_v9_0_subblocks[] = {
+ RAS_GFX_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
+};
+
+static int gfx_v9_0_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock)
+{
+ const struct ras_gfx_subblock_t *gfx_subblock;
+
+ if (subblock >= ARRAY_SIZE(ras_gfx_v9_0_subblocks))
+ return -EINVAL;
+
+ gfx_subblock = &ras_gfx_v9_0_subblocks[subblock];
+ if (!gfx_subblock->name)
+ return -EPERM;
+
+ if (!(gfx_subblock->hw_supported_error_type & error_type)) {
+ RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, hardware do not support type 0x%x\n",
+ gfx_subblock->name, error_type);
+ return -EPERM;
+ }
+
+ if (!(gfx_subblock->sw_supported_error_type & error_type)) {
+ RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, driver do not support type 0x%x\n",
+ gfx_subblock->name, error_type);
+ return -EPERM;
+ }
+
+ *ta_subblock = gfx_subblock->ta_subblock;
+
+ return 0;
+}
+
+const struct ras_gfx_ip_func gfx_ras_func_v9_0 = {
+ .get_ta_subblock = gfx_v9_0_get_ta_subblock,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h
new file mode 100644
index 000000000000..659b56619747
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_GFX_V9_0_H__
+#define __RAS_GFX_V9_0_H__
+
+enum ras_gfx_v9_subblock {
+ /* CPC */
+ RAS_GFX_V9__GFX_CPC_INDEX_START = 0,
+ RAS_GFX_V9__GFX_CPC_SCRATCH =
+ RAS_GFX_V9__GFX_CPC_INDEX_START,
+ RAS_GFX_V9__GFX_CPC_UCODE,
+ RAS_GFX_V9__GFX_DC_STATE_ME1,
+ RAS_GFX_V9__GFX_DC_CSINVOC_ME1,
+ RAS_GFX_V9__GFX_DC_RESTORE_ME1,
+ RAS_GFX_V9__GFX_DC_STATE_ME2,
+ RAS_GFX_V9__GFX_DC_CSINVOC_ME2,
+ RAS_GFX_V9__GFX_DC_RESTORE_ME2,
+ RAS_GFX_V9__GFX_CPC_INDEX_END =
+ RAS_GFX_V9__GFX_DC_RESTORE_ME2,
+ /* CPF */
+ RAS_GFX_V9__GFX_CPF_INDEX_START,
+ RAS_GFX_V9__GFX_CPF_ROQ_ME2 =
+ RAS_GFX_V9__GFX_CPF_INDEX_START,
+ RAS_GFX_V9__GFX_CPF_ROQ_ME1,
+ RAS_GFX_V9__GFX_CPF_TAG,
+ RAS_GFX_V9__GFX_CPF_INDEX_END = RAS_GFX_V9__GFX_CPF_TAG,
+ /* CPG */
+ RAS_GFX_V9__GFX_CPG_INDEX_START,
+ RAS_GFX_V9__GFX_CPG_DMA_ROQ =
+ RAS_GFX_V9__GFX_CPG_INDEX_START,
+ RAS_GFX_V9__GFX_CPG_DMA_TAG,
+ RAS_GFX_V9__GFX_CPG_TAG,
+ RAS_GFX_V9__GFX_CPG_INDEX_END = RAS_GFX_V9__GFX_CPG_TAG,
+ /* GDS */
+ RAS_GFX_V9__GFX_GDS_INDEX_START,
+ RAS_GFX_V9__GFX_GDS_MEM = RAS_GFX_V9__GFX_GDS_INDEX_START,
+ RAS_GFX_V9__GFX_GDS_INPUT_QUEUE,
+ RAS_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ RAS_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ RAS_GFX_V9__GFX_GDS_INDEX_END =
+ RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ /* SPI */
+ RAS_GFX_V9__GFX_SPI_SR_MEM,
+ /* SQ */
+ RAS_GFX_V9__GFX_SQ_INDEX_START,
+ RAS_GFX_V9__GFX_SQ_SGPR = RAS_GFX_V9__GFX_SQ_INDEX_START,
+ RAS_GFX_V9__GFX_SQ_LDS_D,
+ RAS_GFX_V9__GFX_SQ_LDS_I,
+ RAS_GFX_V9__GFX_SQ_VGPR,
+ RAS_GFX_V9__GFX_SQ_INDEX_END = RAS_GFX_V9__GFX_SQ_VGPR,
+ /* SQC (3 ranges) */
+ RAS_GFX_V9__GFX_SQC_INDEX_START,
+ /* SQC range 0 */
+ RAS_GFX_V9__GFX_SQC_INDEX0_START =
+ RAS_GFX_V9__GFX_SQC_INDEX_START,
+ RAS_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO =
+ RAS_GFX_V9__GFX_SQC_INDEX0_START,
+ RAS_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_INDEX0_END =
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1 */
+ RAS_GFX_V9__GFX_SQC_INDEX1_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM =
+ RAS_GFX_V9__GFX_SQC_INDEX1_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX1_END =
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2 */
+ RAS_GFX_V9__GFX_SQC_INDEX2_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM =
+ RAS_GFX_V9__GFX_SQC_INDEX2_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX2_END =
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX_END =
+ RAS_GFX_V9__GFX_SQC_INDEX2_END,
+ /* TA */
+ RAS_GFX_V9__GFX_TA_INDEX_START,
+ RAS_GFX_V9__GFX_TA_FS_DFIFO =
+ RAS_GFX_V9__GFX_TA_INDEX_START,
+ RAS_GFX_V9__GFX_TA_FS_AFIFO,
+ RAS_GFX_V9__GFX_TA_FL_LFIFO,
+ RAS_GFX_V9__GFX_TA_FX_LFIFO,
+ RAS_GFX_V9__GFX_TA_FS_CFIFO,
+ RAS_GFX_V9__GFX_TA_INDEX_END = RAS_GFX_V9__GFX_TA_FS_CFIFO,
+ /* TCA */
+ RAS_GFX_V9__GFX_TCA_INDEX_START,
+ RAS_GFX_V9__GFX_TCA_HOLE_FIFO =
+ RAS_GFX_V9__GFX_TCA_INDEX_START,
+ RAS_GFX_V9__GFX_TCA_REQ_FIFO,
+ RAS_GFX_V9__GFX_TCA_INDEX_END =
+ RAS_GFX_V9__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges) */
+ RAS_GFX_V9__GFX_TCC_INDEX_START,
+ /* TCC range 0 */
+ RAS_GFX_V9__GFX_TCC_INDEX0_START =
+ RAS_GFX_V9__GFX_TCC_INDEX_START,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA =
+ RAS_GFX_V9__GFX_TCC_INDEX0_START,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1,
+ RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0,
+ RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1,
+ RAS_GFX_V9__GFX_TCC_HIGH_RATE_TAG,
+ RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ RAS_GFX_V9__GFX_TCC_INDEX0_END =
+ RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1 */
+ RAS_GFX_V9__GFX_TCC_INDEX1_START,
+ RAS_GFX_V9__GFX_TCC_IN_USE_DEC =
+ RAS_GFX_V9__GFX_TCC_INDEX1_START,
+ RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ RAS_GFX_V9__GFX_TCC_INDEX1_END =
+ RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2 */
+ RAS_GFX_V9__GFX_TCC_INDEX2_START,
+ RAS_GFX_V9__GFX_TCC_RETURN_DATA =
+ RAS_GFX_V9__GFX_TCC_INDEX2_START,
+ RAS_GFX_V9__GFX_TCC_RETURN_CONTROL,
+ RAS_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO,
+ RAS_GFX_V9__GFX_TCC_WRITE_RETURN,
+ RAS_GFX_V9__GFX_TCC_WRITE_CACHE_READ,
+ RAS_GFX_V9__GFX_TCC_SRC_FIFO,
+ RAS_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ RAS_GFX_V9__GFX_TCC_INDEX2_END =
+ RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3 */
+ RAS_GFX_V9__GFX_TCC_INDEX3_START,
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO =
+ RAS_GFX_V9__GFX_TCC_INDEX3_START,
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ RAS_GFX_V9__GFX_TCC_INDEX3_END =
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4 */
+ RAS_GFX_V9__GFX_TCC_INDEX4_START,
+ RAS_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ RAS_GFX_V9__GFX_TCC_INDEX4_START,
+ RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ RAS_GFX_V9__GFX_TCC_INDEX4_END =
+ RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ RAS_GFX_V9__GFX_TCC_INDEX_END =
+ RAS_GFX_V9__GFX_TCC_INDEX4_END,
+ /* TCI */
+ RAS_GFX_V9__GFX_TCI_WRITE_RAM,
+ /* TCP */
+ RAS_GFX_V9__GFX_TCP_INDEX_START,
+ RAS_GFX_V9__GFX_TCP_CACHE_RAM =
+ RAS_GFX_V9__GFX_TCP_INDEX_START,
+ RAS_GFX_V9__GFX_TCP_LFIFO_RAM,
+ RAS_GFX_V9__GFX_TCP_CMD_FIFO,
+ RAS_GFX_V9__GFX_TCP_VM_FIFO,
+ RAS_GFX_V9__GFX_TCP_DB_RAM,
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO0,
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ RAS_GFX_V9__GFX_TCP_INDEX_END =
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ /* TD */
+ RAS_GFX_V9__GFX_TD_INDEX_START,
+ RAS_GFX_V9__GFX_TD_SS_FIFO_LO =
+ RAS_GFX_V9__GFX_TD_INDEX_START,
+ RAS_GFX_V9__GFX_TD_SS_FIFO_HI,
+ RAS_GFX_V9__GFX_TD_CS_FIFO,
+ RAS_GFX_V9__GFX_TD_INDEX_END = RAS_GFX_V9__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges) */
+ RAS_GFX_V9__GFX_EA_INDEX_START,
+ /* EA range 0 */
+ RAS_GFX_V9__GFX_EA_INDEX0_START =
+ RAS_GFX_V9__GFX_EA_INDEX_START,
+ RAS_GFX_V9__GFX_EA_DRAMRD_CMDMEM =
+ RAS_GFX_V9__GFX_EA_INDEX0_START,
+ RAS_GFX_V9__GFX_EA_DRAMWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_DRAMWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_RRET_TAGMEM,
+ RAS_GFX_V9__GFX_EA_WRET_TAGMEM,
+ RAS_GFX_V9__GFX_EA_GMIRD_CMDMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_INDEX0_END =
+ RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1 */
+ RAS_GFX_V9__GFX_EA_INDEX1_START,
+ RAS_GFX_V9__GFX_EA_DRAMRD_PAGEMEM =
+ RAS_GFX_V9__GFX_EA_INDEX1_START,
+ RAS_GFX_V9__GFX_EA_DRAMWR_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_IORD_CMDMEM,
+ RAS_GFX_V9__GFX_EA_IOWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_IOWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_GMIRD_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_INDEX1_END =
+ RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2 */
+ RAS_GFX_V9__GFX_EA_INDEX2_START,
+ RAS_GFX_V9__GFX_EA_MAM_D0MEM =
+ RAS_GFX_V9__GFX_EA_INDEX2_START,
+ RAS_GFX_V9__GFX_EA_MAM_D1MEM,
+ RAS_GFX_V9__GFX_EA_MAM_D2MEM,
+ RAS_GFX_V9__GFX_EA_MAM_D3MEM,
+ RAS_GFX_V9__GFX_EA_INDEX2_END =
+ RAS_GFX_V9__GFX_EA_MAM_D3MEM,
+ RAS_GFX_V9__GFX_EA_INDEX_END =
+ RAS_GFX_V9__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank */
+ RAS_GFX_V9__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker */
+ RAS_GFX_V9__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache */
+ RAS_GFX_V9__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache */
+ RAS_GFX_V9__UTC_ATCL2_CACHE_4K_BANK,
+ RAS_GFX_V9__GFX_MAX
+};
+
+extern const struct ras_gfx_ip_func gfx_ras_func_v9_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c
new file mode 100644
index 000000000000..0a838fdcb2f6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+#include "ras_log_ring.h"
+
+#define RAS_LOG_MAX_QUERY_SIZE 0xC000
+#define RAS_LOG_MEM_TEMP_SIZE 0x200
+#define RAS_LOG_MEMPOOL_SIZE \
+ (RAS_LOG_MAX_QUERY_SIZE + RAS_LOG_MEM_TEMP_SIZE)
+
+#define BATCH_IDX_TO_TREE_IDX(batch_idx, sn) (((batch_idx) << 8) | (sn))
+
+static const uint64_t ras_rma_aca_reg[ACA_REG_MAX_COUNT] = {
+ [ACA_REG_IDX__CTL] = 0x1,
+ [ACA_REG_IDX__STATUS] = 0xB000000000000137,
+ [ACA_REG_IDX__ADDR] = 0x0,
+ [ACA_REG_IDX__MISC0] = 0x0,
+ [ACA_REG_IDX__CONFG] = 0x1ff00000002,
+ [ACA_REG_IDX__IPID] = 0x9600000000,
+ [ACA_REG_IDX__SYND] = 0x0,
+};
+
+static uint64_t ras_log_ring_get_logged_ecc_count(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint64_t count = 0;
+
+ if (log_ring->logged_ecc_count < 0) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Error: the logged ras count should not less than 0!\n");
+ count = 0;
+ } else {
+ count = log_ring->logged_ecc_count;
+ }
+
+ if (count > RAS_LOG_MEMPOOL_SIZE)
+ RAS_DEV_WARN(ras_core->dev,
+ "Error: the logged ras count is out of range!\n");
+
+ return count;
+}
+
+static int ras_log_ring_add_data(struct ras_core_context *ras_core,
+ struct ras_log_info *log, struct ras_log_batch_tag *batch_tag)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (batch_tag && (batch_tag->sub_seqno >= MAX_RECORD_PER_BATCH)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Invalid batch sub seqno:%d, batch:0x%llx\n",
+ batch_tag->sub_seqno, batch_tag->batch_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ if (batch_tag) {
+ log->seqno =
+ BATCH_IDX_TO_TREE_IDX(batch_tag->batch_id, batch_tag->sub_seqno);
+ batch_tag->sub_seqno++;
+ } else {
+ log->seqno = BATCH_IDX_TO_TREE_IDX(log_ring->mono_upward_batch_id, 0);
+ log_ring->mono_upward_batch_id++;
+ }
+ ret = radix_tree_insert(&log_ring->ras_log_root, log->seqno, log);
+ if (!ret)
+ log_ring->logged_ecc_count++;
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to add ras log! seqno:0x%llx, ret:%d\n",
+ log->seqno, ret);
+ mempool_free(log, log_ring->ras_log_mempool);
+ }
+
+ return ret;
+}
+
+static int ras_log_ring_delete_data(struct ras_core_context *ras_core, uint32_t count)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ uint32_t i = 0, j = 0;
+ uint64_t batch_id, idx;
+ void *data;
+ int ret = -ENODATA;
+
+ if (count > ras_log_ring_get_logged_ecc_count(ras_core))
+ return -EINVAL;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_id = log_ring->last_del_batch_id;
+ while (batch_id < log_ring->mono_upward_batch_id) {
+ for (j = 0; j < MAX_RECORD_PER_BATCH; j++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, j);
+ data = radix_tree_delete(&log_ring->ras_log_root, idx);
+ if (data) {
+ mempool_free(data, log_ring->ras_log_mempool);
+ log_ring->logged_ecc_count--;
+ i++;
+ }
+ }
+ batch_id = ++log_ring->last_del_batch_id;
+ if (i >= count) {
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ return ret;
+}
+
+static void ras_log_ring_clear_log_tree(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint64_t batch_id, idx;
+ unsigned long flags = 0;
+ void *data;
+ int j;
+
+ if ((log_ring->mono_upward_batch_id <= log_ring->last_del_batch_id) &&
+ !log_ring->logged_ecc_count)
+ return;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_id = log_ring->last_del_batch_id;
+ while (batch_id < log_ring->mono_upward_batch_id) {
+ for (j = 0; j < MAX_RECORD_PER_BATCH; j++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, j);
+ data = radix_tree_delete(&log_ring->ras_log_root, idx);
+ if (data) {
+ mempool_free(data, log_ring->ras_log_mempool);
+ log_ring->logged_ecc_count--;
+ }
+ }
+ batch_id++;
+ }
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+}
+
+int ras_log_ring_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ memset(log_ring, 0, sizeof(*log_ring));
+
+ log_ring->ras_log_mempool = mempool_create_kmalloc_pool(
+ RAS_LOG_MEMPOOL_SIZE, sizeof(struct ras_log_info));
+ if (!log_ring->ras_log_mempool)
+ return -ENOMEM;
+
+ INIT_RADIX_TREE(&log_ring->ras_log_root, GFP_KERNEL);
+
+ spin_lock_init(&log_ring->spin_lock);
+
+ return 0;
+}
+
+int ras_log_ring_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ ras_log_ring_clear_log_tree(ras_core);
+ log_ring->logged_ecc_count = 0;
+ log_ring->last_del_batch_id = 0;
+ log_ring->mono_upward_batch_id = 0;
+
+ mempool_destroy(log_ring->ras_log_mempool);
+
+ return 0;
+}
+
+struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ struct ras_log_batch_tag *batch_tag;
+ unsigned long flags = 0;
+
+ batch_tag = kzalloc(sizeof(*batch_tag), GFP_KERNEL);
+ if (!batch_tag)
+ return NULL;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_tag->batch_id = log_ring->mono_upward_batch_id;
+ log_ring->mono_upward_batch_id++;
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ batch_tag->sub_seqno = 0;
+ batch_tag->timestamp = ras_core_get_utc_second_timestamp(ras_core);
+ return batch_tag;
+}
+
+void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core,
+ struct ras_log_batch_tag *batch_tag)
+{
+ kfree(batch_tag);
+}
+
+void ras_log_ring_add_log_event(struct ras_core_context *ras_core,
+ enum ras_log_event event, void *data, struct ras_log_batch_tag *batch_tag)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ struct device_system_info dev_info = {0};
+ struct ras_log_info *log;
+ uint64_t socket_id;
+ void *obj;
+
+ obj = mempool_alloc_preallocated(log_ring->ras_log_mempool);
+ if (!obj ||
+ (ras_log_ring_get_logged_ecc_count(ras_core) >= RAS_LOG_MEMPOOL_SIZE)) {
+ ras_log_ring_delete_data(ras_core, RAS_LOG_MEM_TEMP_SIZE);
+ if (!obj)
+ obj = mempool_alloc_preallocated(log_ring->ras_log_mempool);
+ }
+
+ if (!obj) {
+ RAS_DEV_ERR(ras_core->dev, "ERROR: Failed to alloc ras log buffer!\n");
+ return;
+ }
+
+ log = (struct ras_log_info *)obj;
+
+ memset(log, 0, sizeof(*log));
+ log->timestamp =
+ batch_tag ? batch_tag->timestamp : ras_core_get_utc_second_timestamp(ras_core);
+ log->event = event;
+
+ if (data)
+ memcpy(&log->aca_reg, data, sizeof(log->aca_reg));
+
+ if (event == RAS_LOG_EVENT_RMA) {
+ memcpy(&log->aca_reg, ras_rma_aca_reg, sizeof(log->aca_reg));
+ ras_core_get_device_system_info(ras_core, &dev_info);
+ socket_id = dev_info.socket_id;
+ log->aca_reg.regs[ACA_REG_IDX__IPID] |= ((socket_id / 4) & 0x01);
+ log->aca_reg.regs[ACA_REG_IDX__IPID] |= (((socket_id % 4) & 0x3) << 44);
+ }
+
+ ras_log_ring_add_data(ras_core, log, batch_tag);
+}
+
+static struct ras_log_info *ras_log_ring_lookup_data(struct ras_core_context *ras_core,
+ uint64_t idx)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ void *data;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ data = radix_tree_lookup(&log_ring->ras_log_root, idx);
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ return (struct ras_log_info *)data;
+}
+
+int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_id,
+ struct ras_log_info **log_arr, uint32_t arr_num)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint32_t i, idx, count = 0;
+ void *data;
+
+ if ((batch_id >= log_ring->mono_upward_batch_id) ||
+ (batch_id < log_ring->last_del_batch_id))
+ return -EINVAL;
+
+ for (i = 0; i < MAX_RECORD_PER_BATCH; i++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, i);
+ data = ras_log_ring_lookup_data(ras_core, idx);
+ if (data) {
+ log_arr[count++] = data;
+ if (count >= arr_num)
+ break;
+ }
+ }
+
+ return count;
+}
+
+int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core,
+ struct ras_log_batch_overview *overview)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ overview->logged_batch_count =
+ log_ring->mono_upward_batch_id - log_ring->last_del_batch_id;
+ overview->last_batch_id = log_ring->mono_upward_batch_id;
+ overview->first_batch_id = log_ring->last_del_batch_id;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h
new file mode 100644
index 000000000000..0ff6cc35678d
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_LOG_RING_H__
+#define __RAS_LOG_RING_H__
+#include "ras_aca.h"
+
+#define MAX_RECORD_PER_BATCH 32
+
+#define RAS_LOG_SEQNO_TO_BATCH_IDX(seqno) ((seqno) >> 8)
+
+enum ras_log_event {
+ RAS_LOG_EVENT_NONE,
+ RAS_LOG_EVENT_UE,
+ RAS_LOG_EVENT_DE,
+ RAS_LOG_EVENT_CE,
+ RAS_LOG_EVENT_POISON_CREATION,
+ RAS_LOG_EVENT_POISON_CONSUMPTION,
+ RAS_LOG_EVENT_RMA,
+ RAS_LOG_EVENT_COUNT_MAX,
+};
+
+struct ras_aca_reg {
+ uint64_t regs[ACA_REG_MAX_COUNT];
+};
+
+struct ras_log_info {
+ uint64_t seqno;
+ uint64_t timestamp;
+ enum ras_log_event event;
+ union {
+ struct ras_aca_reg aca_reg;
+ };
+};
+
+struct ras_log_batch_tag {
+ uint64_t batch_id;
+ uint64_t timestamp;
+ uint32_t sub_seqno;
+};
+
+struct ras_log_ring {
+ void *ras_log_mempool;
+ struct radix_tree_root ras_log_root;
+ spinlock_t spin_lock;
+ uint64_t mono_upward_batch_id;
+ uint64_t last_del_batch_id;
+ int logged_ecc_count;
+};
+
+struct ras_log_batch_overview {
+ uint64_t first_batch_id;
+ uint64_t last_batch_id;
+ uint32_t logged_batch_count;
+};
+
+struct ras_core_context;
+
+int ras_log_ring_sw_init(struct ras_core_context *ras_core);
+int ras_log_ring_sw_fini(struct ras_core_context *ras_core);
+
+struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core);
+void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core,
+ struct ras_log_batch_tag *tag);
+void ras_log_ring_add_log_event(struct ras_core_context *ras_core,
+ enum ras_log_event event, void *data, struct ras_log_batch_tag *tag);
+
+int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_idx,
+ struct ras_log_info **log_arr, uint32_t arr_num);
+
+int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core,
+ struct ras_log_batch_overview *overview);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c
new file mode 100644
index 000000000000..f3321df85021
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_mp1.h"
+#include "ras_mp1_v13_0.h"
+
+static const struct ras_mp1_ip_func *ras_mp1_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ return &mp1_ras_func_v13_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "MP1 ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_mp1_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ return mp1->ip_func->get_valid_bank_count(ras_core, type, count);
+}
+
+int ras_mp1_dump_bank(struct ras_core_context *ras_core,
+ u32 type, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ return mp1->ip_func->dump_valid_bank(ras_core, type, idx, reg_idx, val);
+}
+
+int ras_mp1_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ mp1->mp1_ip_version = ras_core->config->mp1_ip_version;
+ mp1->sys_func = ras_core->config->mp1_cfg.mp1_sys_fn;
+ if (!mp1->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS mp1 sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ mp1->ip_func = ras_mp1_get_ip_funcs(ras_core, mp1->mp1_ip_version);
+
+ return mp1->ip_func ? RAS_CORE_OK : -EINVAL;
+}
+
+int ras_mp1_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h
new file mode 100644
index 000000000000..de1d08286f41
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_MP1_H__
+#define __RAS_MP1_H__
+#include "ras.h"
+
+enum ras_err_type;
+struct ras_mp1_ip_func {
+ int (*get_valid_bank_count)(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count);
+ int (*dump_valid_bank)(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val);
+};
+
+struct ras_mp1 {
+ uint32_t mp1_ip_version;
+ const struct ras_mp1_ip_func *ip_func;
+ const struct ras_mp1_sys_func *sys_func;
+};
+
+int ras_mp1_hw_init(struct ras_core_context *ras_core);
+int ras_mp1_hw_fini(struct ras_core_context *ras_core);
+
+int ras_mp1_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count);
+
+int ras_mp1_dump_bank(struct ras_core_context *ras_core,
+ u32 ecc_type, u32 idx, u32 reg_idx, u64 *val);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c
new file mode 100644
index 000000000000..310d39fc816b
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_mp1.h"
+#include "ras_core_status.h"
+#include "ras_mp1_v13_0.h"
+
+#define RAS_MP1_MSG_QueryValidMcaCount 0x36
+#define RAS_MP1_MSG_McaBankDumpDW 0x37
+#define RAS_MP1_MSG_ClearMcaOnRead 0x39
+#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A
+#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B
+
+#define MAX_UE_BANKS_PER_QUERY 12
+#define MAX_CE_BANKS_PER_QUERY 12
+
+static int mp1_v13_0_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+ const struct ras_mp1_sys_func *sys_func = mp1->sys_func;
+ uint32_t bank_count = 0;
+ u32 msg;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ if (!sys_func || !sys_func->mp1_get_valid_bank_count)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ switch (type) {
+ case RAS_ERR_TYPE__UE:
+ msg = RAS_MP1_MSG_QueryValidMcaCount;
+ break;
+ case RAS_ERR_TYPE__CE:
+ case RAS_ERR_TYPE__DE:
+ msg = RAS_MP1_MSG_QueryValidMcaCeCount;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = sys_func->mp1_get_valid_bank_count(ras_core, msg, &bank_count);
+ if (!ret) {
+ if (((type == RAS_ERR_TYPE__UE) && (bank_count >= MAX_UE_BANKS_PER_QUERY)) ||
+ ((type == RAS_ERR_TYPE__CE) && (bank_count >= MAX_CE_BANKS_PER_QUERY)))
+ return -EINVAL;
+
+ *count = bank_count;
+ }
+
+ return ret;
+}
+
+static int mp1_v13_0_dump_bank(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+ const struct ras_mp1_sys_func *sys_func = mp1->sys_func;
+ u32 msg;
+
+ if (!sys_func || !sys_func->mp1_dump_valid_bank)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ switch (type) {
+ case RAS_ERR_TYPE__UE:
+ msg = RAS_MP1_MSG_McaBankDumpDW;
+ break;
+ case RAS_ERR_TYPE__CE:
+ case RAS_ERR_TYPE__DE:
+ msg = RAS_MP1_MSG_McaBankCeDumpDW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sys_func->mp1_dump_valid_bank(ras_core, msg, idx, reg_idx, val);
+}
+
+const struct ras_mp1_ip_func mp1_ras_func_v13_0 = {
+ .get_valid_bank_count = mp1_v13_0_get_bank_count,
+ .dump_valid_bank = mp1_v13_0_dump_bank,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h
new file mode 100644
index 000000000000..2edfdb5f6a75
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_MP1_V13_0_H__
+#define __RAS_MP1_V13_0_H__
+#include "ras_mp1.h"
+
+extern const struct ras_mp1_ip_func mp1_ras_func_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c
new file mode 100644
index 000000000000..bfddd104d548
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_nbio.h"
+#include "ras_nbio_v7_9.h"
+
+static const struct ras_nbio_ip_func *ras_nbio_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
+ return &ras_nbio_v7_9;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "NBIO ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_nbio_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ nbio->nbio_ip_version = ras_core->config->nbio_ip_version;
+ nbio->sys_func = ras_core->config->nbio_cfg.nbio_sys_fn;
+ if (!nbio->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS nbio sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ nbio->ip_func = ras_nbio_get_ip_funcs(ras_core, nbio->nbio_ip_version);
+ if (!nbio->ip_func)
+ return -EINVAL;
+
+ if (nbio->sys_func) {
+ if (nbio->sys_func->set_ras_controller_irq_state)
+ nbio->sys_func->set_ras_controller_irq_state(ras_core, true);
+ if (nbio->sys_func->set_ras_err_event_athub_irq_state)
+ nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, true);
+ }
+
+ return 0;
+}
+
+int ras_nbio_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ if (nbio->sys_func) {
+ if (nbio->sys_func->set_ras_controller_irq_state)
+ nbio->sys_func->set_ras_controller_irq_state(ras_core, false);
+ if (nbio->sys_func->set_ras_err_event_athub_irq_state)
+ nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, false);
+ }
+
+ return 0;
+}
+
+bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ if (nbio->ip_func) {
+ if (nbio->ip_func->handle_ras_controller_intr_no_bifring)
+ nbio->ip_func->handle_ras_controller_intr_no_bifring(ras_core);
+ if (nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring)
+ nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring(ras_core);
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h
new file mode 100644
index 000000000000..0a1313e59a02
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_NBIO_H__
+#define __RAS_NBIO_H__
+#include "ras.h"
+
+struct ras_core_context;
+
+struct ras_nbio_ip_func {
+ int (*handle_ras_controller_intr_no_bifring)(struct ras_core_context *ras_core);
+ int (*handle_ras_err_event_athub_intr_no_bifring)(struct ras_core_context *ras_core);
+ uint32_t (*get_memory_partition_mode)(struct ras_core_context *ras_core);
+};
+
+struct ras_nbio {
+ uint32_t nbio_ip_version;
+ const struct ras_nbio_ip_func *ip_func;
+ const struct ras_nbio_sys_func *sys_func;
+};
+
+int ras_nbio_hw_init(struct ras_core_context *ras_core);
+int ras_nbio_hw_fini(struct ras_core_context *ras_core);
+bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c
new file mode 100644
index 000000000000..f17d708ec668
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_nbio_v7_9.h"
+
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
+
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe
+
+#define regBIF_BX0_BIF_INTR_CNTL 0x0101
+#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2
+
+/* BIF_BX0_BIF_INTR_CNTL */
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
+
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS 0x0164
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS_BASE_IDX 2
+/* BIF_BX_PF0_PARTITION_MEM_STATUS */
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L
+
+
+static int nbio_v7_9_handle_ras_controller_intr_no_bifring(struct ras_core_context *ras_core)
+{
+ uint32_t bif_doorbell_intr_cntl = 0;
+
+ bif_doorbell_intr_cntl =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+
+ RAS_DEV_WREG32_SOC15(ras_core->dev,
+ NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ /* TODO: handle ras controller interrupt */
+ }
+
+ return 0;
+}
+
+static int nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct ras_core_context *ras_core)
+{
+ uint32_t bif_doorbell_intr_cntl = 0;
+ int ret = 0;
+
+ bif_doorbell_intr_cntl =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+
+ RAS_DEV_WREG32_SOC15(ras_core->dev,
+ NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ ret = ras_core_handle_fatal_error(ras_core);
+ }
+
+ return ret;
+}
+
+static uint32_t nbio_v7_9_get_memory_partition_mode(struct ras_core_context *ras_core)
+{
+ uint32_t mem_status;
+ uint32_t mem_mode;
+
+ mem_status =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
+
+ /* Each bit represents a mode 1-8*/
+ mem_mode = REG_GET_FIELD(mem_status, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE);
+
+ return ffs(mem_mode);
+}
+
+const struct ras_nbio_ip_func ras_nbio_v7_9 = {
+ .handle_ras_controller_intr_no_bifring =
+ nbio_v7_9_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring =
+ nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring,
+ .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h
new file mode 100644
index 000000000000..8711c82a927f
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_NBIO_V7_9_H__
+#define __RAS_NBIO_V7_9_H__
+#include "ras_nbio.h"
+
+extern const struct ras_nbio_ip_func ras_nbio_v7_9;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.c b/drivers/gpu/drm/amd/ras/rascore/ras_process.c
new file mode 100644
index 000000000000..3267dcdb169c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_process.h"
+
+#define RAS_EVENT_FIFO_SIZE (128 * sizeof(struct ras_event_req))
+
+#define RAS_POLLING_ECC_TIMEOUT 300
+
+static int ras_process_put_event(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = kfifo_in_spinlocked(&ras_proc->event_fifo,
+ req, sizeof(*req), &ras_proc->fifo_spinlock);
+ if (!ret) {
+ RAS_DEV_ERR(ras_core->dev, "Poison message fifo is full!\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int ras_process_add_reset_gpu_event(struct ras_core_context *ras_core,
+ uint32_t reset_cause)
+{
+ struct ras_event_req req = {0};
+
+ req.reset = reset_cause;
+
+ return ras_process_put_event(ras_core, &req);
+}
+
+static int ras_process_get_event(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ return kfifo_out_spinlocked(&ras_proc->event_fifo,
+ req, sizeof(*req), &ras_proc->fifo_spinlock);
+}
+
+static void ras_process_clear_event_fifo(struct ras_core_context *ras_core)
+{
+ struct ras_event_req req;
+ int ret;
+
+ do {
+ ret = ras_process_get_event(ras_core, &req);
+ } while (ret);
+}
+
+#define AMDGPU_RAS_WAITING_DATA_READY 200
+static int ras_process_umc_event(struct ras_core_context *ras_core,
+ uint32_t event_count)
+{
+ struct ras_ecc_count ecc_data;
+ int ret = 0;
+ uint32_t timeout = 0;
+ uint32_t detected_de_count = 0;
+
+ do {
+ memset(&ecc_data, 0, sizeof(ecc_data));
+ ret = ras_core_update_ecc_info(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_core_query_block_ecc_data(ras_core, RAS_BLOCK_ID__UMC, &ecc_data);
+ if (ret)
+ return ret;
+
+ if (ecc_data.new_de_count) {
+ detected_de_count += ecc_data.new_de_count;
+ timeout = 0;
+ } else {
+ if (!timeout && event_count)
+ timeout = AMDGPU_RAS_WAITING_DATA_READY;
+
+ if (timeout) {
+ if (!--timeout)
+ break;
+
+ msleep(1);
+ }
+ }
+ } while (detected_de_count < event_count);
+
+ if (detected_de_count && ras_core_gpu_is_rma(ras_core))
+ ras_process_add_reset_gpu_event(ras_core, GPU_RESET_CAUSE_RMA);
+
+ return 0;
+}
+
+static int ras_process_non_umc_event(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ struct ras_event_req req;
+ uint32_t event_count = kfifo_len(&ras_proc->event_fifo);
+ uint32_t reset_flags = 0;
+ int ret = 0, i;
+
+ for (i = 0; i < event_count; i++) {
+ memset(&req, 0, sizeof(req));
+ ret = ras_process_get_event(ras_core, &req);
+ if (!ret)
+ continue;
+
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__POISON_CONSUMPTION, &req);
+
+ reset_flags |= req.reset;
+
+ if (req.reset == GPU_RESET_CAUSE_RMA)
+ continue;
+
+ if (req.reset)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} GPU reset for %s RAS poison consumption is issued!\n",
+ req.seqno, ras_core_get_ras_block_name(req.block));
+ else
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} %s RAS poison consumption is issued!\n",
+ req.seqno, ras_core_get_ras_block_name(req.block));
+ }
+
+ if (reset_flags) {
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RESET_GPU, &reset_flags);
+ if (!ret && (reset_flags & GPU_RESET_CAUSE_RMA))
+ return -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ return ret;
+}
+
+int ras_process_handle_ras_event(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ uint32_t umc_event_count;
+ int ret;
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN, NULL);
+ if (ret)
+ return ret;
+
+ ras_aca_clear_fatal_flag(ras_core);
+ ras_umc_log_pending_bad_bank(ras_core);
+
+ do {
+ umc_event_count = atomic_read(&ras_proc->umc_interrupt_count);
+ ret = ras_process_umc_event(ras_core, umc_event_count);
+ if (ret == -RAS_CORE_GPU_IN_MODE1_RESET)
+ break;
+
+ if (umc_event_count)
+ atomic_sub(umc_event_count, &ras_proc->umc_interrupt_count);
+ } while (atomic_read(&ras_proc->umc_interrupt_count));
+
+ if ((ret != -RAS_CORE_GPU_IN_MODE1_RESET) &&
+ (kfifo_len(&ras_proc->event_fifo)))
+ ret = ras_process_non_umc_event(ras_core);
+
+ if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) {
+ /* Clear poison fifo */
+ ras_process_clear_event_fifo(ras_core);
+ atomic_set(&ras_proc->umc_interrupt_count, 0);
+ }
+
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RAS_EVENT_PROC_END, NULL);
+ return ret;
+}
+
+static int thread_wait_condition(void *param)
+{
+ struct ras_process *ras_proc = (struct ras_process *)param;
+
+ return (kthread_should_stop() ||
+ atomic_read(&ras_proc->ras_interrupt_req));
+}
+
+static int ras_process_thread(void *context)
+{
+ struct ras_core_context *ras_core = (struct ras_core_context *)context;
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ while (!kthread_should_stop()) {
+ ras_wait_event_interruptible_timeout(&ras_proc->ras_process_wq,
+ thread_wait_condition, ras_proc,
+ msecs_to_jiffies(RAS_POLLING_ECC_TIMEOUT));
+
+ if (kthread_should_stop())
+ break;
+
+ if (!ras_core->is_initialized)
+ continue;
+
+ atomic_set(&ras_proc->ras_interrupt_req, 0);
+
+ if (ras_core_gpu_in_reset(ras_core))
+ continue;
+
+ if (ras_core->sys_fn && ras_core->sys_fn->async_handle_ras_event)
+ ras_core->sys_fn->async_handle_ras_event(ras_core, NULL);
+ else
+ ras_process_handle_ras_event(ras_core);
+ }
+
+ return 0;
+}
+
+int ras_process_init(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = kfifo_alloc(&ras_proc->event_fifo, RAS_EVENT_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&ras_proc->fifo_spinlock);
+
+ init_waitqueue_head(&ras_proc->ras_process_wq);
+
+ ras_proc->ras_process_thread = kthread_run(ras_process_thread,
+ (void *)ras_core, "ras_process_thread");
+ if (!ras_proc->ras_process_thread) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to create ras_process_thread.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ras_process_fini(ras_core);
+ return ret;
+}
+
+int ras_process_fini(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ if (ras_proc->ras_process_thread) {
+ kthread_stop(ras_proc->ras_process_thread);
+ ras_proc->ras_process_thread = NULL;
+ }
+
+ kfifo_free(&ras_proc->event_fifo);
+
+ return 0;
+}
+
+static int ras_process_add_umc_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ atomic_inc(&ras_proc->umc_interrupt_count);
+ atomic_inc(&ras_proc->ras_interrupt_req);
+
+ wake_up(&ras_proc->ras_process_wq);
+ return 0;
+}
+
+static int ras_process_add_non_umc_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = ras_process_put_event(ras_core, req);
+ if (!ret) {
+ atomic_inc(&ras_proc->ras_interrupt_req);
+ wake_up(&ras_proc->ras_process_wq);
+ }
+
+ return ret;
+}
+
+int ras_process_add_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req, bool is_umc)
+{
+ int ret;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ if (!ras_core->is_initialized)
+ return -EPERM;
+
+ if (is_umc)
+ ret = ras_process_add_umc_interrupt_req(ras_core, req);
+ else
+ ret = ras_process_add_non_umc_interrupt_req(ras_core, req);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.h b/drivers/gpu/drm/amd/ras/rascore/ras_process.h
new file mode 100644
index 000000000000..28458b50510e
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_PROCESS_H__
+#define __RAS_PROCESS_H__
+
+struct ras_event_req {
+ uint64_t seqno;
+ uint32_t idx_vf;
+ uint32_t block;
+ uint16_t pasid;
+ uint32_t reset;
+ void *pasid_fn;
+ void *data;
+};
+
+struct ras_process {
+ void *dev;
+ void *ras_process_thread;
+ wait_queue_head_t ras_process_wq;
+ atomic_t ras_interrupt_req;
+ atomic_t umc_interrupt_count;
+ struct kfifo event_fifo;
+ spinlock_t fifo_spinlock;
+};
+
+struct ras_core_context;
+int ras_process_init(struct ras_core_context *ras_core);
+int ras_process_fini(struct ras_core_context *ras_core);
+int ras_process_handle_ras_event(struct ras_core_context *ras_core);
+int ras_process_add_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req, bool is_umc);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c
new file mode 100644
index 000000000000..ccdb42d2dd60
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_ta_if.h"
+#include "ras_psp.h"
+#include "ras_psp_v13_0.h"
+
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define RAS_TA_INST_MASK 0xfffff000
+#define RAS_TA_INST_SHIFT 0xc
+
+static const struct ras_psp_ip_func *ras_psp_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ return &ras_psp_v13_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "psp ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+static int ras_psp_sync_system_ras_psp_status(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+ struct ras_psp_sys_status status = {0};
+ int ret;
+
+ if (psp->sys_func && psp->sys_func->get_ras_psp_system_status) {
+ ret = psp->sys_func->get_ras_psp_system_status(ras_core, &status);
+ if (ret)
+ return ret;
+
+ if (status.initialized) {
+ ta_ctx->preload_ras_ta_enabled = true;
+ ta_ctx->ras_ta_initialized = status.initialized;
+ ta_ctx->session_id = status.session_id;
+ }
+
+ psp_ctx->external_mutex = status.psp_cmd_mutex;
+ }
+
+ return 0;
+}
+
+static int ras_psp_get_ras_ta_init_param(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ if (psp->sys_func && psp->sys_func->get_ras_ta_init_param)
+ return psp->sys_func->get_ras_ta_init_param(ras_core, ras_ta_param);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config get_ras_ta_init_param API!!\n");
+ return -EACCES;
+}
+
+static struct gpu_mem_block *ras_psp_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ struct gpu_mem_block *gpu_mem = NULL;
+ int ret;
+
+ switch (mem_type) {
+ case GPU_MEM_TYPE_RAS_PSP_RING:
+ gpu_mem = &psp->psp_ring.ras_ring_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_PSP_CMD:
+ gpu_mem = &psp->psp_ctx.psp_cmd_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_PSP_FENCE:
+ gpu_mem = &psp->psp_ctx.out_fence_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_TA_FW:
+ gpu_mem = &psp->ta_ctx.fw_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_TA_CMD:
+ gpu_mem = &psp->ta_ctx.cmd_gpu_mem;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!gpu_mem->ref_count) {
+ ret = ras_core_get_gpu_mem(ras_core, mem_type, gpu_mem);
+ if (ret)
+ return NULL;
+ gpu_mem->mem_type = mem_type;
+ }
+
+ gpu_mem->ref_count++;
+
+ return gpu_mem;
+}
+
+static int ras_psp_put_gpu_mem(struct ras_core_context *ras_core,
+ struct gpu_mem_block *gpu_mem)
+{
+ if (!gpu_mem)
+ return 0;
+
+ gpu_mem->ref_count--;
+
+ if (gpu_mem->ref_count > 0) {
+ return 0;
+ } else if (gpu_mem->ref_count < 0) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Duplicate free gpu memory %u\n", gpu_mem->mem_type);
+ } else {
+ ras_core_put_gpu_mem(ras_core, gpu_mem->mem_type, gpu_mem);
+ memset(gpu_mem, 0, sizeof(*gpu_mem));
+ }
+
+ return 0;
+}
+
+static void __acquire_psp_cmd_lock(struct ras_core_context *ras_core)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+
+ if (psp_ctx->external_mutex)
+ mutex_lock(psp_ctx->external_mutex);
+ else
+ mutex_lock(&psp_ctx->internal_mutex);
+}
+
+static void __release_psp_cmd_lock(struct ras_core_context *ras_core)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+
+ if (psp_ctx->external_mutex)
+ mutex_unlock(psp_ctx->external_mutex);
+ else
+ mutex_unlock(&psp_ctx->internal_mutex);
+}
+
+static uint32_t __get_ring_frame_slot(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ uint32_t ras_ring_wptr_dw;
+
+ ras_ring_wptr_dw = psp->ip_func->psp_ras_ring_wptr_get(ras_core);
+
+ return div64_u64((ras_ring_wptr_dw << 2), sizeof(struct psp_gfx_rb_frame));
+}
+
+static int __set_ring_frame_slot(struct ras_core_context *ras_core,
+ uint32_t slot)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ return psp->ip_func->psp_ras_ring_wptr_set(ras_core,
+ (slot * sizeof(struct psp_gfx_rb_frame)) >> 2);
+}
+
+static int write_frame_to_ras_psp_ring(struct ras_core_context *ras_core,
+ struct psp_gfx_rb_frame *frame)
+{
+ struct gpu_mem_block *ring_mem;
+ struct psp_gfx_rb_frame *rb_frame;
+ uint32_t max_frame_slot;
+ uint32_t slot_idx;
+ uint32_t write_flush_read_back = 0;
+ int ret = 0;
+
+ ring_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_RING);
+ if (!ring_mem)
+ return -ENOMEM;
+
+ max_frame_slot =
+ div64_u64(ring_mem->mem_size, sizeof(struct psp_gfx_rb_frame));
+
+ rb_frame =
+ (struct psp_gfx_rb_frame *)ring_mem->mem_cpu_addr;
+
+ slot_idx = __get_ring_frame_slot(ras_core);
+ if (slot_idx >= max_frame_slot)
+ slot_idx = 0;
+
+ memcpy(&rb_frame[slot_idx], frame, sizeof(*frame));
+
+ /* Do a read to force the write of the frame before writing
+ * write pointer.
+ */
+ write_flush_read_back = rb_frame[slot_idx].fence_value;
+ if (write_flush_read_back != frame->fence_value) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to submit ring cmd! cmd:0x%x:0x%x, fence:0x%x:0x%x value:%u, expected:%u\n",
+ rb_frame[slot_idx].cmd_buf_addr_hi,
+ rb_frame[slot_idx].cmd_buf_addr_lo,
+ rb_frame[slot_idx].fence_addr_hi,
+ rb_frame[slot_idx].fence_addr_lo,
+ write_flush_read_back, frame->fence_value);
+ ret = -EACCES;
+ goto err;
+ }
+
+ slot_idx++;
+
+ if (slot_idx >= max_frame_slot)
+ slot_idx = 0;
+
+ __set_ring_frame_slot(ras_core, slot_idx);
+
+err:
+ ras_psp_put_gpu_mem(ras_core, ring_mem);
+ return ret;
+}
+
+static int send_psp_cmd(struct ras_core_context *ras_core,
+ enum psp_gfx_cmd_id gfx_cmd_id, void *cmd_data,
+ uint32_t cmd_size, struct psp_cmd_resp *resp)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+ struct gpu_mem_block *psp_cmd_buf = NULL;
+ struct gpu_mem_block *psp_fence_buf = NULL;
+ struct psp_gfx_cmd_resp *gfx_cmd;
+ struct psp_gfx_rb_frame rb_frame;
+ int ret = 0;
+ int timeout = 1000;
+
+ if (!cmd_data || (cmd_size > sizeof(union psp_gfx_commands)) || !resp) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid RAS PSP command, id: %u\n", gfx_cmd_id);
+ return -EINVAL;
+ }
+
+ __acquire_psp_cmd_lock(ras_core);
+
+ psp_cmd_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_CMD);
+ if (!psp_cmd_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ psp_fence_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_FENCE);
+ if (!psp_fence_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ gfx_cmd = (struct psp_gfx_cmd_resp *)psp_cmd_buf->mem_cpu_addr;
+ memset(gfx_cmd, 0, sizeof(*gfx_cmd));
+ gfx_cmd->cmd_id = gfx_cmd_id;
+ memcpy(&gfx_cmd->cmd, cmd_data, cmd_size);
+
+ psp_ctx->in_fence_value++;
+
+ memset(&rb_frame, 0, sizeof(rb_frame));
+ rb_frame.cmd_buf_addr_hi = upper_32_bits(psp_cmd_buf->mem_mc_addr);
+ rb_frame.cmd_buf_addr_lo = lower_32_bits(psp_cmd_buf->mem_mc_addr);
+ rb_frame.fence_addr_hi = upper_32_bits(psp_fence_buf->mem_mc_addr);
+ rb_frame.fence_addr_lo = lower_32_bits(psp_fence_buf->mem_mc_addr);
+ rb_frame.fence_value = psp_ctx->in_fence_value;
+
+ ret = write_frame_to_ras_psp_ring(ras_core, &rb_frame);
+ if (ret) {
+ psp_ctx->in_fence_value--;
+ goto exit;
+ }
+
+ while (*((uint64_t *)psp_fence_buf->mem_cpu_addr) !=
+ psp_ctx->in_fence_value) {
+ if (--timeout == 0)
+ break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ if (ras_core_ras_interrupt_detected(ras_core))
+ break;
+
+ msleep(2);
+ }
+
+ resp->status = gfx_cmd->resp.status;
+ resp->session_id = gfx_cmd->resp.session_id;
+
+exit:
+ ras_psp_put_gpu_mem(ras_core, psp_cmd_buf);
+ ras_psp_put_gpu_mem(ras_core, psp_fence_buf);
+
+ __release_psp_cmd_lock(ras_core);
+
+ return ret;
+}
+
+static void __check_ras_ta_cmd_resp(struct ras_core_context *ras_core,
+ struct ras_ta_cmd *ras_cmd)
+{
+
+ if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+ RAS_DEV_WARN(ras_core->dev, "ECC switch disabled\n");
+ ras_cmd->ras_status = RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE;
+ } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ RAS_DEV_WARN(ras_core->dev, "RAS internal register access blocked\n");
+
+ switch (ras_cmd->ras_status) {
+ case RAS_TA_STATUS__ERROR_UNSUPPORTED_IP:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: cmd failed due to unsupported ip\n");
+ break;
+ case RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
+ break;
+ case RAS_TA_STATUS__SUCCESS:
+ break;
+ case RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED:
+ if (ras_cmd->cmd_id == RAS_TA_CMD_ID__TRIGGER_ERROR)
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: Inject error to critical region is not allowed\n");
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ break;
+ }
+}
+
+static int send_ras_ta_runtime_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id, void *in, uint32_t in_size,
+ void *out, uint32_t out_size)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct gpu_mem_block *cmd_mem;
+ struct ras_ta_cmd *ras_cmd;
+ struct psp_gfx_cmd_invoke_cmd invoke_cmd = {0};
+ struct psp_cmd_resp resp = {0};
+ int ret = 0;
+
+ if (!in || (in_size > sizeof(union ras_ta_cmd_input)) ||
+ (cmd_id >= MAX_RAS_TA_CMD_ID)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid RAS TA command, id: %u\n", cmd_id);
+ return -EINVAL;
+ }
+
+ ras_psp_sync_system_ras_psp_status(ras_core);
+
+ cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD);
+ if (!cmd_mem)
+ return -ENOMEM;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ ras_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr;
+
+ mutex_lock(&ta_ctx->ta_mutex);
+
+ memset(ras_cmd, 0, sizeof(*ras_cmd));
+ ras_cmd->cmd_id = cmd_id;
+ memcpy(&ras_cmd->ras_in_message, in, in_size);
+
+ invoke_cmd.ta_cmd_id = cmd_id;
+ invoke_cmd.session_id = ta_ctx->session_id;
+
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_INVOKE_CMD,
+ &invoke_cmd, sizeof(invoke_cmd), &resp);
+
+ /* If err_event_athub occurs error inject was successful, however
+ * return status from TA is no long reliable
+ */
+ if (ras_core_ras_interrupt_detected(ras_core)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ if (ret || resp.status) {
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS: Failed to send psp cmd! ret:%d, status:%u\n",
+ ret, resp.status);
+ ret = -ESTRPIPE;
+ goto unlock;
+ }
+
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
+ RAS_DEV_WARN(ras_core->dev, "RAS: Unsupported Interface\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!ras_cmd->ras_status && out && out_size)
+ memcpy(out, &ras_cmd->ras_out_message, out_size);
+
+ __check_ras_ta_cmd_resp(ras_core, ras_cmd);
+
+unlock:
+ mutex_unlock(&ta_ctx->ta_mutex);
+ ras_core_up_gpu_reset_lock(ras_core);
+out:
+ ras_psp_put_gpu_mem(ras_core, cmd_mem);
+ return ret;
+}
+
+static int trigger_ras_ta_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask)
+{
+ uint32_t dev_mask = 0;
+
+ switch (info->block_id) {
+ case RAS_TA_BLOCK__GFX:
+ if (ras_gfx_get_ta_subblock(ras_core, info->inject_error_type,
+ info->sub_block_index, &info->sub_block_index))
+ return -EINVAL;
+
+ dev_mask = RAS_GET_MASK(ras_core->dev, GC, instance_mask);
+ break;
+ case RAS_TA_BLOCK__SDMA:
+ dev_mask = RAS_GET_MASK(ras_core->dev, SDMA0, instance_mask);
+ break;
+ case RAS_TA_BLOCK__VCN:
+ case RAS_TA_BLOCK__JPEG:
+ dev_mask = RAS_GET_MASK(ras_core->dev, VCN, instance_mask);
+ break;
+ default:
+ dev_mask = instance_mask;
+ break;
+ }
+
+ /* reuse sub_block_index for backward compatibility */
+ dev_mask <<= RAS_TA_INST_SHIFT;
+ dev_mask &= RAS_TA_INST_MASK;
+ info->sub_block_index |= dev_mask;
+
+ return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__TRIGGER_ERROR,
+ info, sizeof(*info), NULL, 0);
+}
+
+static int send_load_ta_fw_cmd(struct ras_core_context *ras_core,
+ struct ras_ta_ctx *ta_ctx)
+{
+ struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin;
+ struct gpu_mem_block *fw_mem;
+ struct gpu_mem_block *cmd_mem;
+ struct ras_ta_cmd *ta_cmd;
+ struct ras_ta_init_flags *ta_init_flags;
+ struct psp_gfx_cmd_load_ta psp_load_ta_cmd;
+ struct psp_cmd_resp resp = {0};
+ struct ras_ta_image_header *fw_hdr = NULL;
+ int ret;
+
+ fw_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_FW);
+ if (!fw_mem)
+ return -ENOMEM;
+
+ cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD);
+ if (!cmd_mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ras_psp_get_ras_ta_init_param(ras_core, &ta_ctx->init_param);
+ if (ret)
+ goto err;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ /* copy ras ta binary to shared gpu memory */
+ memcpy(fw_mem->mem_cpu_addr, fw_bin->bin_addr, fw_bin->bin_size);
+ fw_mem->mem_size = fw_bin->bin_size;
+
+ /* Initialize ras ta startup parameter */
+ ta_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr;
+ ta_init_flags = &ta_cmd->ras_in_message.init_flags;
+
+ ta_init_flags->poison_mode_en = ta_ctx->init_param.poison_mode_en;
+ ta_init_flags->dgpu_mode = ta_ctx->init_param.dgpu_mode;
+ ta_init_flags->xcc_mask = ta_ctx->init_param.xcc_mask;
+ ta_init_flags->channel_dis_num = ta_ctx->init_param.channel_dis_num;
+ ta_init_flags->nps_mode = ta_ctx->init_param.nps_mode;
+ ta_init_flags->active_umc_mask = ta_ctx->init_param.active_umc_mask;
+
+ /* Setup load ras ta command */
+ memset(&psp_load_ta_cmd, 0, sizeof(psp_load_ta_cmd));
+ psp_load_ta_cmd.app_phy_addr_lo = lower_32_bits(fw_mem->mem_mc_addr);
+ psp_load_ta_cmd.app_phy_addr_hi = upper_32_bits(fw_mem->mem_mc_addr);
+ psp_load_ta_cmd.app_len = fw_mem->mem_size;
+ psp_load_ta_cmd.cmd_buf_phy_addr_lo = lower_32_bits(cmd_mem->mem_mc_addr);
+ psp_load_ta_cmd.cmd_buf_phy_addr_hi = upper_32_bits(cmd_mem->mem_mc_addr);
+ psp_load_ta_cmd.cmd_buf_len = cmd_mem->mem_size;
+
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_LOAD_TA,
+ &psp_load_ta_cmd, sizeof(psp_load_ta_cmd), &resp);
+ if (!ret && !resp.status) {
+ /* Read TA version at FW offset 0x60 if TA version not found*/
+ fw_hdr = (struct ras_ta_image_header *)fw_bin->bin_addr;
+ RAS_DEV_INFO(ras_core->dev, "PSP: RAS TA(version:%X.%X.%X.%X) is loaded.\n",
+ (fw_hdr->image_version >> 24) & 0xFF, (fw_hdr->image_version >> 16) & 0xFF,
+ (fw_hdr->image_version >> 8) & 0xFF, fw_hdr->image_version & 0xFF);
+ ta_ctx->ta_version = fw_hdr->image_version;
+ ta_ctx->session_id = resp.session_id;
+ ta_ctx->ras_ta_initialized = true;
+ } else {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to load RAS TA! ret:%d, status:%d\n", ret, resp.status);
+ }
+
+ ras_core_up_gpu_reset_lock(ras_core);
+
+err:
+ ras_psp_put_gpu_mem(ras_core, fw_mem);
+ ras_psp_put_gpu_mem(ras_core, cmd_mem);
+ return ret;
+}
+
+static int load_ras_ta_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin;
+ int ret;
+
+ fw_bin->bin_addr = ras_ta_load->bin_addr;
+ fw_bin->bin_size = ras_ta_load->bin_size;
+ fw_bin->fw_version = ras_ta_load->fw_version;
+ fw_bin->feature_version = ras_ta_load->feature_version;
+
+ ret = send_load_ta_fw_cmd(ras_core, ta_ctx);
+ if (!ret) {
+ ras_ta_load->out_session_id = ta_ctx->session_id;
+ ras_ta_load->out_loaded_ta_version = ta_ctx->ta_version;
+ }
+
+ return ret;
+}
+
+static int unload_ras_ta_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct psp_gfx_cmd_unload_ta cmd_unload_ta = {0};
+ struct psp_cmd_resp resp = {0};
+ int ret;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core))
+ return -EACCES;
+
+ cmd_unload_ta.session_id = ta_ctx->session_id;
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_UNLOAD_TA,
+ &cmd_unload_ta, sizeof(cmd_unload_ta), &resp);
+ if (ret || resp.status) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to unload RAS TA! ret:%d, status:%u\n",
+ ret, resp.status);
+ goto unlock;
+ }
+
+ kfree(ta_ctx->fw_bin.bin_addr);
+ memset(&ta_ctx->fw_bin, 0, sizeof(ta_ctx->fw_bin));
+ ta_ctx->ta_version = 0;
+ ta_ctx->ras_ta_initialized = false;
+ ta_ctx->session_id = 0;
+
+unlock:
+ ras_core_up_gpu_reset_lock(ras_core);
+
+ return ret;
+}
+
+int ras_psp_load_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_psp_ta_unload ras_ta_unload = {0};
+ int ret;
+
+ if (ta_ctx->preload_ras_ta_enabled)
+ return 0;
+
+ if (!ras_ta_load)
+ return -EINVAL;
+
+ if (ta_ctx->ras_ta_initialized) {
+ ras_ta_unload.ras_session_id = ta_ctx->session_id;
+ ret = unload_ras_ta_firmware(ras_core, &ras_ta_unload);
+ if (ret)
+ return ret;
+ }
+
+ return load_ras_ta_firmware(ras_core, ras_ta_load);
+}
+
+int ras_psp_unload_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (ta_ctx->preload_ras_ta_enabled)
+ return 0;
+
+ if ((!ras_ta_unload) ||
+ (ras_ta_unload->ras_session_id != ta_ctx->session_id))
+ return -EINVAL;
+
+ return unload_ras_ta_firmware(ras_core, ras_ta_unload);
+}
+
+int ras_psp_trigger_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) {
+ RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!");
+ return -ENOEXEC;
+ }
+
+ if (!info)
+ return -EINVAL;
+
+ return trigger_ras_ta_error(ras_core, info, instance_mask);
+}
+
+int ras_psp_query_address(struct ras_core_context *ras_core,
+ struct ras_ta_query_address_input *addr_in,
+ struct ras_ta_query_address_output *addr_out)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (!ta_ctx->preload_ras_ta_enabled &&
+ !ta_ctx->ras_ta_initialized) {
+ RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!");
+ return -ENOEXEC;
+ }
+
+ if (!addr_in || !addr_out)
+ return -EINVAL;
+
+ return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS,
+ addr_in, sizeof(*addr_in), addr_out, sizeof(*addr_out));
+}
+
+int ras_psp_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ memset(psp, 0, sizeof(*psp));
+
+ psp->sys_func = ras_core->config->psp_cfg.psp_sys_fn;
+ if (!psp->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS psp sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&psp->psp_ctx.internal_mutex);
+ mutex_init(&psp->ta_ctx.ta_mutex);
+
+ return 0;
+}
+
+int ras_psp_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ mutex_destroy(&psp->psp_ctx.internal_mutex);
+ mutex_destroy(&psp->ta_ctx.ta_mutex);
+
+ memset(psp, 0, sizeof(*psp));
+
+ return 0;
+}
+
+int ras_psp_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ psp->psp_ip_version = ras_core->config->psp_ip_version;
+
+ psp->ip_func = ras_psp_get_ip_funcs(ras_core, psp->psp_ip_version);
+ if (!psp->ip_func)
+ return -EINVAL;
+
+ /* After GPU reset, the system RAS PSP status may change.
+ * therefore, it is necessary to synchronize the system status again.
+ */
+ ras_psp_sync_system_ras_psp_status(ras_core);
+
+ return 0;
+}
+
+int ras_psp_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
+
+bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ bool ret = false;
+
+ if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized)
+ return false;
+
+ switch (cmd_id) {
+ case RAS_TA_CMD_ID__QUERY_ADDRESS:
+ /* Currently, querying the address from RAS TA is only supported
+ * when the RAS TA firmware is loaded during driver installation.
+ */
+ if (ta_ctx->preload_ras_ta_enabled)
+ ret = true;
+ break;
+ case RAS_TA_CMD_ID__TRIGGER_ERROR:
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h
new file mode 100644
index 000000000000..71776fecfd66
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_PSP_H__
+#define __RAS_PSP_H__
+#include "ras.h"
+#include "ras_ta_if.h"
+
+struct ras_core_context;
+struct ras_ta_trigger_error_input;
+struct ras_ta_query_address_input;
+struct ras_ta_query_address_output;
+enum ras_ta_cmd_id;
+
+struct ras_ta_image_header {
+ uint32_t reserved1[24];
+ uint32_t image_version; /* [0x60] Off Chip Firmware Version */
+ uint32_t reserved2[39];
+};
+
+struct ras_psp_sys_status {
+ bool initialized;
+ uint32_t session_id;
+ void *psp_cmd_mutex;
+};
+
+struct ras_ta_init_param {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
+ uint8_t nps_mode;
+ uint32_t active_umc_mask;
+};
+
+struct gpu_mem_block {
+ uint32_t mem_type;
+ void *mem_bo;
+ uint64_t mem_mc_addr;
+ void *mem_cpu_addr;
+ uint32_t mem_size;
+ int ref_count;
+ void *private;
+};
+
+struct ras_psp_ip_func {
+ uint32_t (*psp_ras_ring_wptr_get)(struct ras_core_context *ras_core);
+ int (*psp_ras_ring_wptr_set)(struct ras_core_context *ras_core, uint32_t wptr);
+};
+
+struct ras_psp_ring {
+ struct gpu_mem_block ras_ring_gpu_mem;
+};
+
+struct psp_cmd_resp {
+ uint32_t status;
+ uint32_t session_id;
+};
+
+struct ras_psp_ctx {
+ void *external_mutex;
+ struct mutex internal_mutex;
+ uint64_t in_fence_value;
+ struct gpu_mem_block psp_cmd_gpu_mem;
+ struct gpu_mem_block out_fence_gpu_mem;
+};
+
+struct ras_ta_fw_bin {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t bin_size;
+ uint8_t *bin_addr;
+};
+
+struct ras_ta_ctx {
+ bool preload_ras_ta_enabled;
+ bool ras_ta_initialized;
+ uint32_t session_id;
+ uint32_t resp_status;
+ uint32_t ta_version;
+ struct mutex ta_mutex;
+ struct ras_ta_fw_bin fw_bin;
+ struct ras_ta_init_param init_param;
+ struct gpu_mem_block fw_gpu_mem;
+ struct gpu_mem_block cmd_gpu_mem;
+};
+
+struct ras_psp {
+ uint32_t psp_ip_version;
+ struct ras_psp_ring psp_ring;
+ struct ras_psp_ctx psp_ctx;
+ struct ras_ta_ctx ta_ctx;
+ const struct ras_psp_ip_func *ip_func;
+ const struct ras_psp_sys_func *sys_func;
+};
+
+struct ras_psp_ta_load {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t bin_size;
+ uint8_t *bin_addr;
+ uint64_t out_session_id;
+ uint32_t out_loaded_ta_version;
+};
+
+struct ras_psp_ta_unload {
+ uint64_t ras_session_id;
+};
+
+int ras_psp_sw_init(struct ras_core_context *ras_core);
+int ras_psp_sw_fini(struct ras_core_context *ras_core);
+int ras_psp_hw_init(struct ras_core_context *ras_core);
+int ras_psp_hw_fini(struct ras_core_context *ras_core);
+int ras_psp_load_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load);
+int ras_psp_unload_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload);
+int ras_psp_trigger_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask);
+int ras_psp_query_address(struct ras_core_context *ras_core,
+ struct ras_ta_query_address_input *addr_in,
+ struct ras_ta_query_address_output *addr_out);
+bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c
new file mode 100644
index 000000000000..626cf39b75ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_psp_v13_0.h"
+
+#define regMP0_SMN_C2PMSG_67 0x0083
+#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
+
+static uint32_t ras_psp_v13_0_ring_wptr_get(struct ras_core_context *ras_core)
+{
+ return RAS_DEV_RREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67);
+}
+
+static int ras_psp_v13_0_ring_wptr_set(struct ras_core_context *ras_core, uint32_t value)
+{
+ RAS_DEV_WREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67, value);
+
+ return 0;
+}
+
+const struct ras_psp_ip_func ras_psp_v13_0 = {
+ .psp_ras_ring_wptr_get = ras_psp_v13_0_ring_wptr_get,
+ .psp_ras_ring_wptr_set = ras_psp_v13_0_ring_wptr_set,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h
new file mode 100644
index 000000000000..b705ffe38a12
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_PSP_V13_0_H__
+#define __RAS_PSP_V13_0_H__
+#include "ras_psp.h"
+
+extern const struct ras_psp_ip_func ras_psp_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h
new file mode 100644
index 000000000000..0921e36d3274
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _RAS_TA_IF_H
+#define _RAS_TA_IF_H
+#include "ras.h"
+
+#define RAS_TA_HOST_IF_VER 0
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+/* invalid node instance value */
+#define RAS_TA_INV_NODE 0xffff
+
+/* RAS related enumerations */
+/**********************************************************/
+enum ras_ta_cmd_id {
+ RAS_TA_CMD_ID__ENABLE_FEATURES = 0,
+ RAS_TA_CMD_ID__DISABLE_FEATURES,
+ RAS_TA_CMD_ID__TRIGGER_ERROR,
+ RAS_TA_CMD_ID__QUERY_BLOCK_INFO,
+ RAS_TA_CMD_ID__QUERY_SUB_BLOCK_INFO,
+ RAS_TA_CMD_ID__QUERY_ADDRESS,
+ MAX_RAS_TA_CMD_ID
+};
+
+enum ras_ta_status {
+ RAS_TA_STATUS__SUCCESS = 0x0000,
+ RAS_TA_STATUS__RESET_NEEDED = 0xA001,
+ RAS_TA_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
+ RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
+ RAS_TA_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004,
+ RAS_TA_STATUS__ERROR_INJECTION_FAILED = 0xA005,
+ RAS_TA_STATUS__ERROR_ASD_READ_WRITE = 0xA006,
+ RAS_TA_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007,
+ RAS_TA_STATUS__ERROR_TIMEOUT = 0xA008,
+ RAS_TA_STATUS__ERROR_BLOCK_DISABLED = 0XA009,
+ RAS_TA_STATUS__ERROR_GENERIC = 0xA00A,
+ RAS_TA_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B,
+ RAS_TA_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
+ RAS_TA_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
+ RAS_TA_STATUS__ERROR_TEE_INTERNAL = 0xA00F,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010,
+ RAS_TA_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011,
+ RAS_TA_STATUS__ERROR_RAS_READ_WRITE = 0xA012,
+ RAS_TA_STATUS__ERROR_NULL_PTR = 0xA013,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_IP = 0xA014,
+ RAS_TA_STATUS__ERROR_PCS_STATE_QUIET = 0xA015,
+ RAS_TA_STATUS__ERROR_PCS_STATE_ERROR = 0xA016,
+ RAS_TA_STATUS__ERROR_PCS_STATE_HANG = 0xA017,
+ RAS_TA_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019,
+ RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A
+};
+
+enum ras_ta_block {
+ RAS_TA_BLOCK__UMC = 0,
+ RAS_TA_BLOCK__SDMA,
+ RAS_TA_BLOCK__GFX,
+ RAS_TA_BLOCK__MMHUB,
+ RAS_TA_BLOCK__ATHUB,
+ RAS_TA_BLOCK__PCIE_BIF,
+ RAS_TA_BLOCK__HDP,
+ RAS_TA_BLOCK__XGMI_WAFL,
+ RAS_TA_BLOCK__DF,
+ RAS_TA_BLOCK__SMN,
+ RAS_TA_BLOCK__SEM,
+ RAS_TA_BLOCK__MP0,
+ RAS_TA_BLOCK__MP1,
+ RAS_TA_BLOCK__FUSE,
+ RAS_TA_BLOCK__MCA,
+ RAS_TA_BLOCK__VCN,
+ RAS_TA_BLOCK__JPEG,
+ RAS_TA_BLOCK__IH,
+ RAS_TA_BLOCK__MPIO,
+ RAS_TA_BLOCK__MMSCH,
+ RAS_TA_NUM_BLOCK_MAX
+};
+
+enum ras_ta_mca_block {
+ RAS_TA_MCA_BLOCK__MP0 = 0,
+ RAS_TA_MCA_BLOCK__MP1 = 1,
+ RAS_TA_MCA_BLOCK__MPIO = 2,
+ RAS_TA_MCA_BLOCK__IOHC = 3,
+ RAS_TA_MCA_NUM_BLOCK_MAX
+};
+
+enum ras_ta_error_type {
+ RAS_TA_ERROR__NONE = 0,
+ RAS_TA_ERROR__PARITY = 1,
+ RAS_TA_ERROR__SINGLE_CORRECTABLE = 2,
+ RAS_TA_ERROR__MULTI_UNCORRECTABLE = 4,
+ RAS_TA_ERROR__POISON = 8,
+};
+
+enum ras_ta_address_type {
+ RAS_TA_MCA_TO_PA,
+ RAS_TA_PA_TO_MCA,
+};
+
+enum ras_ta_nps_mode {
+ RAS_TA_UNKNOWN_MODE = 0,
+ RAS_TA_NPS1_MODE = 1,
+ RAS_TA_NPS2_MODE = 2,
+ RAS_TA_NPS4_MODE = 4,
+ RAS_TA_NPS8_MODE = 8,
+};
+
+/* Input/output structures for RAS commands */
+/**********************************************************/
+
+struct ras_ta_enable_features_input {
+ enum ras_ta_block block_id;
+ enum ras_ta_error_type error_type;
+};
+
+struct ras_ta_disable_features_input {
+ enum ras_ta_block block_id;
+ enum ras_ta_error_type error_type;
+};
+
+struct ras_ta_trigger_error_input {
+ /* ras-block. i.e. umc, gfx */
+ enum ras_ta_block block_id;
+
+ /* type of error. i.e. single_correctable */
+ enum ras_ta_error_type inject_error_type;
+
+ /* mem block. i.e. hbm, sram etc. */
+ uint32_t sub_block_index;
+
+ /* explicit address of error */
+ uint64_t address;
+
+ /* method if error injection. i.e persistent, coherent etc. */
+ uint64_t value;
+};
+
+struct ras_ta_init_flags {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
+ uint8_t nps_mode;
+ uint32_t active_umc_mask;
+};
+
+struct ras_ta_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+ uint32_t socket_id;
+};
+
+struct ras_ta_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct ras_ta_query_address_input {
+ enum ras_ta_address_type addr_type;
+ struct ras_ta_mca_addr ma;
+ struct ras_ta_phy_addr pa;
+};
+
+struct ras_ta_output_flags {
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
+};
+
+struct ras_ta_query_address_output {
+ /* don't use the flags here */
+ struct ras_ta_output_flags flags;
+ struct ras_ta_mca_addr ma;
+ struct ras_ta_phy_addr pa;
+};
+
+/* Common input structure for RAS callbacks */
+/**********************************************************/
+union ras_ta_cmd_input {
+ struct ras_ta_init_flags init_flags;
+ struct ras_ta_enable_features_input enable_features;
+ struct ras_ta_disable_features_input disable_features;
+ struct ras_ta_trigger_error_input trigger_error;
+ struct ras_ta_query_address_input address;
+ uint32_t reserve_pad[256];
+};
+
+union ras_ta_cmd_output {
+ struct ras_ta_output_flags flags;
+ struct ras_ta_query_address_output address;
+ uint32_t reserve_pad[256];
+};
+
+struct ras_ta_cmd {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ uint32_t ras_status;
+ uint32_t if_version;
+ union ras_ta_cmd_input ras_in_message;
+ union ras_ta_cmd_output ras_out_message;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
new file mode 100644
index 000000000000..4dae64c424a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_umc.h"
+#include "ras_umc_v12_0.h"
+
+#define MAX_ECC_NUM_PER_RETIREMENT 16
+
+/* bad page timestamp format
+ * yy[31:27] mm[26:23] day[22:17] hh[16:12] mm[11:6] ss[5:0]
+ */
+#define EEPROM_TIMESTAMP_MINUTE 6
+#define EEPROM_TIMESTAMP_HOUR 12
+#define EEPROM_TIMESTAMP_DAY 17
+#define EEPROM_TIMESTAMP_MONTH 23
+#define EEPROM_TIMESTAMP_YEAR 27
+
+static uint64_t ras_umc_get_eeprom_timestamp(struct ras_core_context *ras_core)
+{
+ struct ras_time tm = {0};
+ uint64_t utc_timestamp = 0;
+ uint64_t eeprom_timestamp = 0;
+
+ utc_timestamp = ras_core_get_utc_second_timestamp(ras_core);
+ if (!utc_timestamp)
+ return utc_timestamp;
+
+ ras_core_convert_timestamp_to_time(ras_core, utc_timestamp, &tm);
+
+ /* the year range is 2000 ~ 2031, set the year if not in the range */
+ if (tm.tm_year < 2000)
+ tm.tm_year = 2000;
+ if (tm.tm_year > 2031)
+ tm.tm_year = 2031;
+
+ tm.tm_year -= 2000;
+
+ eeprom_timestamp = tm.tm_sec + (tm.tm_min << EEPROM_TIMESTAMP_MINUTE)
+ + (tm.tm_hour << EEPROM_TIMESTAMP_HOUR)
+ + (tm.tm_mday << EEPROM_TIMESTAMP_DAY)
+ + (tm.tm_mon << EEPROM_TIMESTAMP_MONTH)
+ + (tm.tm_year << EEPROM_TIMESTAMP_YEAR);
+ eeprom_timestamp &= 0xffffffff;
+
+ return eeprom_timestamp;
+}
+
+static const struct ras_umc_ip_func *ras_umc_get_ip_func(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
+ return &ras_umc_func_v12_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "UMC ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *in, struct umc_phy_addr *out,
+ uint32_t nps)
+{
+ struct ras_ta_query_address_input addr_in;
+ struct ras_ta_query_address_output addr_out;
+ int ret;
+
+ if (!in)
+ return -EINVAL;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ addr_in.ma.err_addr = in->err_addr;
+ addr_in.ma.ch_inst = in->ch_inst;
+ addr_in.ma.umc_inst = in->umc_inst;
+ addr_in.ma.node_inst = in->node_inst;
+ addr_in.ma.socket_id = in->socket_id;
+
+ addr_in.addr_type = RAS_TA_MCA_TO_PA;
+
+ ret = ras_psp_query_address(ras_core, &addr_in, &addr_out);
+ if (ret) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Failed to query RAS physical address for 0x%llx, ret:%d",
+ in->err_addr, ret);
+ return -EREMOTEIO;
+ }
+
+ if (out) {
+ out->pa = addr_out.pa.pa;
+ out->bank = addr_out.pa.bank;
+ out->channel_idx = addr_out.pa.channel_idx;
+ }
+
+ return 0;
+}
+
+static int ras_umc_log_ecc(struct ras_core_context *ras_core,
+ unsigned long idx, void *data)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ int ret;
+
+ mutex_lock(&ras_umc->tree_lock);
+ ret = radix_tree_insert(&ras_umc->root, idx, data);
+ if (!ret)
+ radix_tree_tag_set(&ras_umc->root, idx, UMC_ECC_NEW_DETECTED_TAG);
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return ret;
+}
+
+int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint64_t buf[8] = {0};
+ void **slot;
+ void *data;
+ void *iter = buf;
+
+ mutex_lock(&ras_umc->tree_lock);
+ radix_tree_for_each_slot(slot, &ras_umc->root, iter, 0) {
+ data = ras_radix_tree_delete_iter(&ras_umc->root, iter);
+ kfree(data);
+ }
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return 0;
+}
+
+static void ras_umc_reserve_eeprom_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint64_t page_pfn[16];
+ int count = 0, i;
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) {
+ count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core,
+ record, record->cur_nps, page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Fail to convert error address! count:%d\n", count);
+ return;
+ }
+ }
+
+ /* Reserve memory */
+ for (i = 0; i < count; i++)
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RESERVE_BAD_PAGE, &page_pfn[i]);
+}
+
+/* When gpu reset is ongoing, ecc logging operations will be pended.
+ */
+int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_bank_ecc_node *ecc_node;
+
+ ecc_node = kzalloc(sizeof(*ecc_node), GFP_KERNEL);
+ if (!ecc_node)
+ return -ENOMEM;
+
+ memcpy(&ecc_node->ecc, bank, sizeof(ecc_node->ecc));
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_add_tail(&ecc_node->node, &ras_umc->pending_ecc_list);
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+/* After gpu reset is complete, re-log the pending error banks.
+ */
+int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_bank_ecc_node *ecc_node, *tmp;
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_for_each_entry_safe(ecc_node,
+ tmp, &ras_umc->pending_ecc_list, node){
+ if (ecc_node && !ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) {
+ list_del(&ecc_node->node);
+ kfree(ecc_node);
+ }
+ }
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+int ras_umc_log_bad_bank(struct ras_core_context *ras_core, struct ras_bank_ecc *bank)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_umc_record umc_rec;
+ struct eeprom_umc_record *err_rec;
+ int ret;
+
+ memset(&umc_rec, 0, sizeof(umc_rec));
+
+ mutex_lock(&ras_umc->bank_log_lock);
+ ret = ras_umc->ip_func->bank_to_eeprom_record(ras_core, bank, &umc_rec);
+ if (ret)
+ goto out;
+
+ err_rec = kzalloc(sizeof(*err_rec), GFP_KERNEL);
+ if (!err_rec) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(err_rec, &umc_rec, sizeof(umc_rec));
+ ret = ras_umc_log_ecc(ras_core, err_rec->cur_nps_retired_row_pfn, err_rec);
+ if (ret) {
+ if (ret == -EEXIST) {
+ RAS_DEV_INFO(ras_core->dev, "The bad pages have been logged before.\n");
+ ret = 0;
+ }
+
+ kfree(err_rec);
+ goto out;
+ }
+
+ ras_umc_reserve_eeprom_record(ras_core, err_rec);
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__BAD_PAGE_DETECTED, NULL);
+
+out:
+ mutex_unlock(&ras_umc->bank_log_lock);
+ return ret;
+}
+
+static int ras_umc_get_new_records(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, u32 num)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_umc_record *entries[MAX_ECC_NUM_PER_RETIREMENT];
+ u32 entry_num = num < MAX_ECC_NUM_PER_RETIREMENT ? num : MAX_ECC_NUM_PER_RETIREMENT;
+ int count = 0;
+ int new_detected, i;
+
+ mutex_lock(&ras_umc->tree_lock);
+ new_detected = radix_tree_gang_lookup_tag(&ras_umc->root, (void **)entries,
+ 0, entry_num, UMC_ECC_NEW_DETECTED_TAG);
+ for (i = 0; i < new_detected; i++) {
+ if (!entries[i])
+ continue;
+
+ memcpy(&records[i], entries[i], sizeof(struct eeprom_umc_record));
+ count++;
+ radix_tree_tag_clear(&ras_umc->root,
+ entries[i]->cur_nps_retired_row_pfn, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return count;
+}
+
+static bool ras_umc_check_retired_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, bool from_eeprom)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data;
+ uint32_t nps = 0;
+ int i, ret;
+
+ if (from_eeprom) {
+ nps = ras_umc->umc_err_data.umc_nps_mode;
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_record) {
+ ret = ras_umc->ip_func->eeprom_record_to_nps_record(ras_core, record, nps);
+ if (ret)
+ RAS_DEV_WARN(ras_core->dev,
+ "Failed to adjust eeprom record, ret:%d", ret);
+ }
+ return false;
+ }
+
+ for (i = 0; i < data->count; i++) {
+ if ((data->bps[i].retired_row_pfn == record->retired_row_pfn) &&
+ (data->bps[i].cur_nps_retired_row_pfn == record->cur_nps_retired_row_pfn))
+ return true;
+ }
+
+ return false;
+}
+
+/* alloc/realloc bps array */
+static int ras_umc_realloc_err_data_space(struct ras_core_context *ras_core,
+ struct eeprom_store_record *data, int pages)
+{
+ unsigned int old_space = data->count + data->space_left;
+ unsigned int new_space = old_space + pages;
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kzalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+
+ if (!bps)
+ return -ENOMEM;
+
+ if (data->bps) {
+ memcpy(bps, data->bps,
+ data->count * sizeof(*data->bps));
+ kfree(data->bps);
+ }
+
+ data->bps = bps;
+ data->space_left += align_space - old_space;
+ return 0;
+}
+
+static int ras_umc_update_eeprom_rom_data(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.rom_data;
+
+ if (!data->space_left &&
+ ras_umc_realloc_err_data_space(ras_core, data, 256)) {
+ return -ENOMEM;
+ }
+
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ return 0;
+}
+
+static int ras_umc_update_eeprom_ram_data(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data;
+ uint64_t page_pfn[16];
+ int count = 0, j;
+
+ if (!data->space_left &&
+ ras_umc_realloc_err_data_space(ras_core, data, 256)) {
+ return -ENOMEM;
+ }
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages)
+ count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core,
+ bps, bps->cur_nps, page_pfn, ARRAY_SIZE(page_pfn));
+
+ if (count > 0) {
+ for (j = 0; j < count; j++) {
+ bps->cur_nps_retired_row_pfn = page_pfn[j];
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
+ } else {
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
+
+ return 0;
+}
+
+/* it deal with vram only. */
+static int ras_umc_add_bad_pages(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps,
+ int pages, bool from_eeprom)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_umc_err_data *data = &ras_umc->umc_err_data;
+ int i, ret = 0;
+
+ if (!bps || pages <= 0)
+ return 0;
+
+ mutex_lock(&ras_umc->umc_lock);
+ for (i = 0; i < pages; i++) {
+ if (ras_umc_check_retired_record(ras_core, &bps[i], from_eeprom))
+ continue;
+
+ ret = ras_umc_update_eeprom_rom_data(ras_core, &bps[i]);
+ if (ret)
+ goto out;
+
+ if (data->last_retired_pfn == bps[i].cur_nps_retired_row_pfn)
+ continue;
+
+ data->last_retired_pfn = bps[i].cur_nps_retired_row_pfn;
+
+ if (from_eeprom)
+ ras_umc_reserve_eeprom_record(ras_core, &bps[i]);
+
+ ret = ras_umc_update_eeprom_ram_data(ras_core, &bps[i]);
+ if (ret)
+ goto out;
+ }
+out:
+ mutex_unlock(&ras_umc->umc_lock);
+
+ return ret;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+int ras_umc_load_bad_pages(struct ras_core_context *ras_core)
+{
+ struct eeprom_umc_record *bps;
+ uint32_t ras_num_recs;
+ int ret;
+
+ ras_num_recs = ras_eeprom_get_record_count(ras_core);
+ /* no bad page record, skip eeprom access */
+ if (!ras_num_recs ||
+ ras_core->ras_eeprom.record_threshold_config == DISABLE_RETIRE_PAGE)
+ return 0;
+
+ bps = kcalloc(ras_num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ ret = ras_eeprom_read(ras_core, bps, ras_num_recs);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to load EEPROM table records!");
+ } else {
+ ras_core->ras_umc.umc_err_data.last_retired_pfn = UMC_INV_MEM_PFN;
+ ret = ras_umc_add_bad_pages(ras_core, bps, ras_num_recs, true);
+ }
+
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
+ */
+static int ras_umc_save_bad_pages(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data;
+ uint32_t eeprom_record_num;
+ int save_count;
+ int ret = 0;
+
+ if (!data->bps)
+ return 0;
+
+ eeprom_record_num = ras_eeprom_get_record_count(ras_core);
+ mutex_lock(&ras_umc->umc_lock);
+ save_count = data->count - eeprom_record_num;
+ /* only new entries are saved */
+ if (save_count > 0) {
+ if (ras_eeprom_append(ras_core,
+ &data->bps[eeprom_record_num],
+ save_count)) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to save EEPROM table data!");
+ ret = -EIO;
+ goto exit;
+ }
+
+ RAS_DEV_INFO(ras_core->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ }
+
+exit:
+ mutex_unlock(&ras_umc->umc_lock);
+ return ret;
+}
+
+int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data)
+{
+ struct eeprom_umc_record records[MAX_ECC_NUM_PER_RETIREMENT];
+ int count, ret;
+
+ memset(records, 0, sizeof(records));
+ count = ras_umc_get_new_records(ras_core, records, ARRAY_SIZE(records));
+ if (count <= 0)
+ return -ENODATA;
+
+ ret = ras_umc_add_bad_pages(ras_core, records, count, false);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to add ras bad page!\n");
+ return -EINVAL;
+ }
+
+ ret = ras_umc_save_bad_pages(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to save ras bad page\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ras_umc_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+
+ memset(ras_umc, 0, sizeof(*ras_umc));
+
+ INIT_LIST_HEAD(&ras_umc->pending_ecc_list);
+
+ INIT_RADIX_TREE(&ras_umc->root, GFP_KERNEL);
+
+ mutex_init(&ras_umc->tree_lock);
+ mutex_init(&ras_umc->pending_ecc_lock);
+ mutex_init(&ras_umc->umc_lock);
+ mutex_init(&ras_umc->bank_log_lock);
+
+ return 0;
+}
+
+int ras_umc_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_umc_err_data *umc_err_data = &ras_umc->umc_err_data;
+ struct ras_bank_ecc_node *ecc_node, *tmp;
+
+ mutex_destroy(&ras_umc->umc_lock);
+ mutex_destroy(&ras_umc->bank_log_lock);
+
+ if (umc_err_data->rom_data.bps) {
+ umc_err_data->rom_data.count = 0;
+ kfree(umc_err_data->rom_data.bps);
+ umc_err_data->rom_data.bps = NULL;
+ umc_err_data->rom_data.space_left = 0;
+ }
+
+ if (umc_err_data->ram_data.bps) {
+ umc_err_data->ram_data.count = 0;
+ kfree(umc_err_data->ram_data.bps);
+ umc_err_data->ram_data.bps = NULL;
+ umc_err_data->ram_data.space_left = 0;
+ }
+
+ ras_umc_clear_logged_ecc(ras_core);
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_for_each_entry_safe(ecc_node,
+ tmp, &ras_umc->pending_ecc_list, node){
+ list_del(&ecc_node->node);
+ kfree(ecc_node);
+ }
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ mutex_destroy(&ras_umc->tree_lock);
+ mutex_destroy(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+int ras_umc_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint32_t nps;
+
+ nps = ras_core_get_curr_nps_mode(ras_core);
+
+ if (!nps || (nps >= UMC_MEMORY_PARTITION_MODE_UNKNOWN)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid memory NPS mode: %u!\n", nps);
+ return -ENODATA;
+ }
+
+ ras_umc->umc_err_data.umc_nps_mode = nps;
+
+ ras_umc->umc_vram_type = ras_core->config->umc_cfg.umc_vram_type;
+ if (!ras_umc->umc_vram_type) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid UMC VRAM Type: %u!\n",
+ ras_umc->umc_vram_type);
+ return -ENODATA;
+ }
+
+ ras_umc->umc_ip_version = ras_core->config->umc_ip_version;
+ ras_umc->ip_func = ras_umc_get_ip_func(ras_core, ras_umc->umc_ip_version);
+ if (!ras_umc->ip_func)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ras_umc_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
+
+int ras_umc_clean_badpage_data(struct ras_core_context *ras_core)
+{
+ struct ras_umc_err_data *data = &ras_core->ras_umc.umc_err_data;
+
+ mutex_lock(&ras_core->ras_umc.umc_lock);
+
+ kfree(data->rom_data.bps);
+ kfree(data->ram_data.bps);
+
+ memset(data, 0, sizeof(*data));
+ mutex_unlock(&ras_core->ras_umc.umc_lock);
+
+ return 0;
+}
+
+int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core,
+ uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr,
+ enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record)
+{
+ struct eeprom_umc_record *err_rec = record;
+
+ /* Set bad page pfn and nps mode */
+ EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(err_rec,
+ RAS_ADDR_TO_PFN(cur_nps_addr->pa), cur_nps);
+
+ err_rec->address = err_addr;
+ err_rec->ts = ras_umc_get_eeprom_timestamp(ras_core);
+ err_rec->err_type = RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = cur_nps_addr->channel_idx;
+ err_rec->mcumc_id = umc_inst;
+ err_rec->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(cur_nps_addr->pa);
+ err_rec->cur_nps_bank = cur_nps_addr->bank;
+ err_rec->cur_nps = cur_nps;
+ return 0;
+}
+
+int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core)
+{
+ struct ras_umc_err_data *err_data = &ras_core->ras_umc.umc_err_data;
+
+ return err_data->rom_data.count;
+}
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data;
+
+ return data->count;
+}
+
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data;
+
+ if (index >= data->count)
+ return -EINVAL;
+
+ memcpy(record, &data->bps[index], sizeof(struct eeprom_umc_record));
+ return 0;
+}
+
+bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data;
+ uint64_t page_pfn = RAS_ADDR_TO_PFN(addr);
+ int i, ret = false;
+
+ mutex_lock(&ras_umc->umc_lock);
+ for (i = 0; i < data->count; i++) {
+ if (data->bps[i].cur_nps_retired_row_pfn == page_pfn) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&ras_umc->umc_lock);
+
+ return ret;
+}
+
+int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ int ret = 0;
+
+ if (bank_to_pa)
+ ret = ras_umc->ip_func->bank_to_soc_pa(ras_core, *bank_addr, soc_pa);
+ else
+ ret = ras_umc->ip_func->soc_pa_to_bank(ras_core, *soc_pa, bank_addr);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h
new file mode 100644
index 000000000000..7d9e779d8c4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_UMC_H__
+#define __RAS_UMC_H__
+#include "ras.h"
+#include "ras_eeprom.h"
+#include "ras_cmd.h"
+
+#define UMC_VRAM_TYPE_UNKNOWN 0
+#define UMC_VRAM_TYPE_GDDR1 1
+#define UMC_VRAM_TYPE_DDR2 2
+#define UMC_VRAM_TYPE_GDDR3 3
+#define UMC_VRAM_TYPE_GDDR4 4
+#define UMC_VRAM_TYPE_GDDR5 5
+#define UMC_VRAM_TYPE_HBM 6
+#define UMC_VRAM_TYPE_DDR3 7
+#define UMC_VRAM_TYPE_DDR4 8
+#define UMC_VRAM_TYPE_GDDR6 9
+#define UMC_VRAM_TYPE_DDR5 10
+#define UMC_VRAM_TYPE_LPDDR4 11
+#define UMC_VRAM_TYPE_LPDDR5 12
+#define UMC_VRAM_TYPE_HBM3E 13
+
+#define UMC_ECC_NEW_DETECTED_TAG 0x1
+#define UMC_INV_MEM_PFN (0xFFFFFFFFFFFFFFFF)
+
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define UMC_PA_FLIP_BITS_NUM 4
+
+enum umc_memory_partition_mode {
+ UMC_MEMORY_PARTITION_MODE_NONE = 0,
+ UMC_MEMORY_PARTITION_MODE_NPS1 = 1,
+ UMC_MEMORY_PARTITION_MODE_NPS2 = 2,
+ UMC_MEMORY_PARTITION_MODE_NPS3 = 3,
+ UMC_MEMORY_PARTITION_MODE_NPS4 = 4,
+ UMC_MEMORY_PARTITION_MODE_NPS6 = 6,
+ UMC_MEMORY_PARTITION_MODE_NPS8 = 8,
+ UMC_MEMORY_PARTITION_MODE_UNKNOWN
+};
+
+struct ras_core_context;
+struct ras_bank_ecc;
+
+struct umc_flip_bits {
+ uint32_t flip_bits_in_pa[UMC_PA_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
+struct umc_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+ uint32_t socket_id;
+};
+
+struct umc_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct umc_bank_addr {
+ uint32_t stack_id; /* SID */
+ uint32_t bank_group;
+ uint32_t bank;
+ uint32_t row;
+ uint32_t column;
+ uint32_t channel;
+ uint32_t subchannel; /* Also called Pseudochannel (PC) */
+};
+
+struct ras_umc_ip_func {
+ int (*bank_to_eeprom_record)(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct eeprom_umc_record *record);
+ int (*eeprom_record_to_nps_record)(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps);
+ int (*eeprom_record_to_nps_pages)(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num);
+ int (*bank_to_soc_pa)(struct ras_core_context *ras_core,
+ struct umc_bank_addr bank_addr, uint64_t *soc_pa);
+ int (*soc_pa_to_bank)(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct umc_bank_addr *bank_addr);
+};
+
+struct eeprom_store_record {
+ /* point to data records array */
+ struct eeprom_umc_record *bps;
+ /* the count of entries */
+ int count;
+ /* the space can place new entries */
+ int space_left;
+};
+
+struct ras_umc_err_data {
+ struct eeprom_store_record rom_data;
+ struct eeprom_store_record ram_data;
+ enum umc_memory_partition_mode umc_nps_mode;
+ uint64_t last_retired_pfn;
+};
+
+struct ras_umc {
+ u32 umc_ip_version;
+ u32 umc_vram_type;
+ const struct ras_umc_ip_func *ip_func;
+ struct radix_tree_root root;
+ struct mutex tree_lock;
+ struct mutex umc_lock;
+ struct mutex bank_log_lock;
+ struct mutex pending_ecc_lock;
+ struct ras_umc_err_data umc_err_data;
+ struct list_head pending_ecc_list;
+};
+
+int ras_umc_sw_init(struct ras_core_context *ras);
+int ras_umc_sw_fini(struct ras_core_context *ras);
+int ras_umc_hw_init(struct ras_core_context *ras);
+int ras_umc_hw_fini(struct ras_core_context *ras);
+int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *in, struct umc_phy_addr *out,
+ uint32_t nps);
+int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data);
+int ras_umc_log_bad_bank(struct ras_core_context *ras, struct ras_bank_ecc *bank);
+int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank);
+int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core);
+int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core);
+int ras_umc_load_bad_pages(struct ras_core_context *ras_core);
+int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core);
+int ras_umc_clean_badpage_data(struct ras_core_context *ras_core);
+int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core,
+ uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr,
+ enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record);
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core);
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record);
+bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr);
+int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c
new file mode 100644
index 000000000000..5d9a11c17a86
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_umc.h"
+#include "ras_core_status.h"
+#include "ras_umc_v12_0.h"
+
+#define NumDieInterleaved 4
+
+static const uint32_t umc_v12_0_channel_idx_tbl[]
+ [UMC_V12_0_UMC_INSTANCE_NUM][UMC_V12_0_CHANNEL_INSTANCE_NUM] = {
+ {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12},
+ {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}},
+ {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32},
+ {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}},
+ {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64},
+ {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}},
+ {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108},
+ {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}}
+};
+
+/* mapping of MCA error address to normalized address */
+static const uint32_t umc_v12_0_ma2na_mapping[] = {
+ 0, 5, 6, 8, 9, 14, 12, 13,
+ 10, 11, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 24, 7, 29, 30,
+};
+
+static bool umc_v12_0_bit_wise_xor(uint32_t val)
+{
+ bool result = 0;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ result = result ^ ((val >> i) & 0x1);
+
+ return result;
+}
+
+static void __get_nps_pa_flip_bits(struct ras_core_context *ras_core,
+ enum umc_memory_partition_mode nps,
+ struct umc_flip_bits *flip_bits)
+{
+ uint32_t vram_type = ras_core->ras_umc.umc_vram_type;
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case UMC_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case UMC_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+}
+
+static uint64_t convert_nps_pa_to_row_pa(struct ras_core_context *ras_core,
+ uint64_t pa, enum umc_memory_partition_mode nps, bool zero_pfn_ok)
+{
+ struct umc_flip_bits flip_bits = {0};
+ uint64_t row_pa;
+ int i;
+
+ __get_nps_pa_flip_bits(ras_core, nps, &flip_bits);
+
+ row_pa = pa;
+ /* clear loop bits in soc physical address */
+ for (i = 0; i < flip_bits.bit_num; i++)
+ row_pa &= ~BIT_ULL(flip_bits.flip_bits_in_pa[i]);
+
+ if (!zero_pfn_ok && !RAS_ADDR_TO_PFN(row_pa))
+ row_pa |= BIT_ULL(flip_bits.flip_bits_in_pa[2]);
+
+ return row_pa;
+}
+
+static int lookup_bad_pages_in_a_row(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num,
+ uint64_t seq_no, bool dump)
+{
+ uint32_t col, col_lower, row, row_lower, idx, row_high;
+ uint64_t soc_pa, row_pa, column, err_addr;
+ uint64_t retired_addr = RAS_PFN_TO_ADDR(record->cur_nps_retired_row_pfn);
+ struct umc_flip_bits flip_bits = {0};
+ uint32_t retire_unit;
+ uint32_t i;
+
+ __get_nps_pa_flip_bits(ras_core, nps, &flip_bits);
+
+ row_pa = convert_nps_pa_to_row_pa(ras_core, retired_addr, nps, true);
+
+ err_addr = record->address;
+ /* get column bit 0 and 1 in mca address */
+ col_lower = (err_addr >> 1) & 0x3ULL;
+ /* MA_R13_BIT will be handled later */
+ row_lower = (err_addr >> UMC_V12_0_MCA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(flip_bits.flip_row_bit);
+
+ if (ras_core->ras_gfx.gfx_ip_version >= IP_VERSION(9, 5, 0)) {
+ row_high = (row_pa >> flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
+
+ idx = 0;
+ row = 0;
+ retire_unit = 0x1 << flip_bits.bit_num;
+ /* loop for all possibilities of retire bits */
+ for (column = 0; column < retire_unit; column++) {
+ soc_pa = row_pa;
+ for (i = 0; i < flip_bits.bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits.flip_bits_in_pa[i]);
+
+ col = ((column & 0x7) << 2) | col_lower;
+
+ /* add row bit 13 */
+ if (flip_bits.bit_num == UMC_PA_FLIP_BITS_NUM)
+ row = ((column >> 3) << flip_bits.flip_row_bit) | row_lower;
+
+ if (dump)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ seq_no, soc_pa, row, col,
+ record->cur_nps_bank, record->mem_channel);
+
+
+ if (pfns && (idx < num))
+ pfns[idx++] = RAS_ADDR_TO_PFN(soc_pa);
+ }
+
+ return idx;
+}
+
+static int umc_v12_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out,
+ uint32_t nps)
+{
+ uint32_t i, na_shift;
+ uint64_t soc_pa, na, na_nps;
+ uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
+ uint32_t bank0, bank1, bank2, bank3, bank;
+ uint32_t ch_inst = addr_in->ch_inst;
+ uint32_t umc_inst = addr_in->umc_inst;
+ uint32_t node_inst = addr_in->node_inst;
+ uint32_t socket_id = addr_in->socket_id;
+ uint32_t channel_index;
+ uint64_t err_addr = addr_in->err_addr;
+
+ if (node_inst != UMC_INV_AID_NODE) {
+ if (ch_inst >= UMC_V12_0_CHANNEL_INSTANCE_NUM ||
+ umc_inst >= UMC_V12_0_UMC_INSTANCE_NUM ||
+ node_inst >= UMC_V12_0_AID_NUM_MAX ||
+ socket_id >= UMC_V12_0_SOCKET_NUM_MAX)
+ return -EINVAL;
+ } else {
+ if (socket_id >= UMC_V12_0_SOCKET_NUM_MAX ||
+ ch_inst >= UMC_V12_0_TOTAL_CHANNEL_NUM)
+ return -EINVAL;
+ }
+
+ bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
+ bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL;
+ bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL;
+ bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL;
+ col = (err_addr >> 1) & 0x1fULL;
+ row = (err_addr >> 10) & 0x3fffULL;
+
+ /* apply bank hash algorithm */
+ bank0 =
+ bank_hash0 ^ (UMC_V12_0_XOR_EN0 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0))));
+ bank1 =
+ bank_hash1 ^ (UMC_V12_0_XOR_EN1 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1))));
+ bank2 =
+ bank_hash2 ^ (UMC_V12_0_XOR_EN2 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2))));
+ bank3 =
+ bank_hash3 ^ (UMC_V12_0_XOR_EN3 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3))));
+
+ bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3);
+ err_addr &= ~0x3c0ULL;
+ err_addr |= (bank << UMC_V12_0_MCA_B0_BIT);
+
+ na_nps = 0x0;
+ /* convert mca error address to normalized address */
+ for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++)
+ na_nps |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i];
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS1)
+ na_shift = 8;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ na_shift = 9;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ na_shift = 10;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8)
+ na_shift = 11;
+ else
+ return -EINVAL;
+
+ na = ((na_nps >> na_shift) << 8) | (na_nps & 0xff);
+
+ if (node_inst != UMC_INV_AID_NODE)
+ channel_index =
+ umc_v12_0_channel_idx_tbl[node_inst][umc_inst][ch_inst];
+ else {
+ channel_index = ch_inst;
+ node_inst = channel_index /
+ (UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM);
+ }
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_32KB_BLOCK(na) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(na);
+
+ /* calc channel hash based on absolute address */
+ soc_pa += socket_id * SOCKET_LFB_SIZE;
+ /* the umc channel bits are not original values, they are hashed */
+ UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
+ /* restore pa */
+ soc_pa -= socket_id * SOCKET_LFB_SIZE;
+
+ /* get some channel bits from na_nps directly and
+ * add nps section offset
+ */
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) {
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_CH5_BIT);
+ soc_pa |= ((na_nps & 0x100) << 5);
+ soc_pa += (node_inst >> 1) * (SOCKET_LFB_SIZE >> 1);
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) {
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_CH4_BIT);
+ soc_pa |= ((na_nps & 0x300) << 4);
+ soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2);
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) {
+ soc_pa &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT);
+ soc_pa |= ((na_nps & 0x700) << 4);
+ soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2) +
+ (channel_index >> 4) * (SOCKET_LFB_SIZE >> 3);
+ }
+
+ addr_out->pa = soc_pa;
+ addr_out->bank = bank;
+ addr_out->channel_idx = channel_index;
+
+ return 0;
+}
+
+static int convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out,
+ uint32_t nps)
+{
+ int ret;
+
+ if (ras_psp_check_supported_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS))
+ ret = ras_umc_psp_convert_ma_to_pa(ras_core,
+ addr_in, addr_out, nps);
+ else
+ ret = umc_v12_convert_ma_to_pa(ras_core,
+ addr_in, addr_out, nps);
+
+ return ret;
+}
+
+static int convert_bank_to_nps_addr(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct umc_phy_addr *pa_addr, uint32_t nps)
+{
+ struct umc_mca_addr addr_in;
+ struct umc_phy_addr addr_out;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ addr_in.err_addr = ACA_ADDR_2_ERR_ADDR(bank->addr);
+ addr_in.ch_inst = ACA_IPID_2_UMC_CH(bank->ipid);
+ addr_in.umc_inst = ACA_IPID_2_UMC_INST(bank->ipid);
+ addr_in.node_inst = ACA_IPID_2_DIE_ID(bank->ipid);
+ addr_in.socket_id = ACA_IPID_2_SOCKET_ID(bank->ipid);
+
+ ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps);
+ if (!ret) {
+ pa_addr->pa =
+ convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false);
+ pa_addr->channel_idx = addr_out.channel_idx;
+ pa_addr->bank = addr_out.bank;
+ }
+
+ return ret;
+}
+
+static int umc_v12_0_bank_to_eeprom_record(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct eeprom_umc_record *record)
+{
+ struct umc_phy_addr nps_addr;
+ int ret;
+
+ memset(&nps_addr, 0, sizeof(nps_addr));
+
+ ret = convert_bank_to_nps_addr(ras_core, bank,
+ &nps_addr, bank->nps);
+ if (ret)
+ return ret;
+
+ ras_umc_fill_eeprom_record(ras_core,
+ ACA_ADDR_2_ERR_ADDR(bank->addr), ACA_IPID_2_UMC_INST(bank->ipid),
+ &nps_addr, bank->nps, record);
+
+ lookup_bad_pages_in_a_row(ras_core, record,
+ bank->nps, NULL, 0, bank->seq_no, true);
+
+ return 0;
+}
+
+static int convert_eeprom_record_to_nps_addr(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint64_t *pa, uint32_t nps)
+{
+ struct device_system_info dev_info = {0};
+ struct umc_mca_addr addr_in;
+ struct umc_phy_addr addr_out;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ addr_in.err_addr = record->address;
+ addr_in.ch_inst = record->mem_channel;
+ addr_in.umc_inst = record->mcumc_id;
+ addr_in.node_inst = UMC_INV_AID_NODE;
+ addr_in.socket_id = dev_info.socket_id;
+
+ ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps);
+ if (ret)
+ return ret;
+
+ *pa = convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false);
+
+ return 0;
+}
+
+static int umc_v12_0_eeprom_record_to_nps_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps)
+{
+ uint64_t pa = 0;
+ int ret = 0;
+
+ if (nps == EEPROM_RECORD_UMC_NPS_MODE(record)) {
+ record->cur_nps_retired_row_pfn = EEPROM_RECORD_UMC_ADDR_PFN(record);
+ } else {
+ ret = convert_eeprom_record_to_nps_addr(ras_core,
+ record, &pa, nps);
+ if (!ret)
+ record->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(pa);
+ }
+
+ record->cur_nps = nps;
+
+ return ret;
+}
+
+static int umc_v12_0_eeprom_record_to_nps_pages(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num)
+{
+ return lookup_bad_pages_in_a_row(ras_core,
+ record, nps, pfns, num, 0, false);
+}
+
+static int umc_12_0_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa,
+ struct umc_bank_addr *bank_addr)
+{
+
+ int channel_hashed = 0;
+ int channel_real = 0;
+ int channel_reversed = 0;
+ int i = 0;
+
+ bank_addr->stack_id = UMC_V12_0_SOC_PA_TO_SID(soc_pa);
+ bank_addr->bank_group = 0; /* This is a combination of SID & Bank. Needed?? */
+ bank_addr->bank = UMC_V12_0_SOC_PA_TO_BANK(soc_pa);
+ bank_addr->row = UMC_V12_0_SOC_PA_TO_ROW(soc_pa);
+ bank_addr->column = UMC_V12_0_SOC_PA_TO_COL(soc_pa);
+
+ /* Channel bits 4-6 are hashed. Bruteforce reverse the hash */
+ channel_hashed = (soc_pa >> UMC_V12_0_PA_CH4_BIT) & 0x7;
+
+ for (i = 0; i < 8; i++) {
+ channel_reversed = 0;
+ channel_reversed |= UMC_V12_0_CHANNEL_HASH_CH4((i << 4), soc_pa);
+ channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH5((i << 4), soc_pa) << 1);
+ channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH6((i << 4), soc_pa) << 2);
+ if (channel_reversed == channel_hashed)
+ channel_real = ((i << 4)) | ((soc_pa >> UMC_V12_0_PA_CH0_BIT) & 0xf);
+ }
+
+ bank_addr->channel = channel_real;
+ bank_addr->subchannel = UMC_V12_0_SOC_PA_TO_PC(soc_pa);
+
+ return 0;
+}
+
+static int umc_12_0_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct umc_bank_addr bank_addr,
+ uint64_t *soc_pa)
+{
+ uint64_t na = 0;
+ uint64_t tmp_pa = 0;
+ *soc_pa = 0;
+
+ tmp_pa |= UMC_V12_0_SOC_SID_TO_PA(bank_addr.stack_id);
+ tmp_pa |= UMC_V12_0_SOC_BANK_TO_PA(bank_addr.bank);
+ tmp_pa |= UMC_V12_0_SOC_ROW_TO_PA(bank_addr.row);
+ tmp_pa |= UMC_V12_0_SOC_COL_TO_PA(bank_addr.column);
+ tmp_pa |= UMC_V12_0_SOC_CH_TO_PA(bank_addr.channel);
+ tmp_pa |= UMC_V12_0_SOC_PC_TO_PA(bank_addr.subchannel);
+
+ /* Get the NA */
+ na = ((tmp_pa >> UMC_V12_0_PA_C2_BIT) << UMC_V12_0_NA_C2_BIT);
+ na |= tmp_pa & 0xff;
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ tmp_pa = ADDR_OF_32KB_BLOCK(na) |
+ ADDR_OF_256B_BLOCK(bank_addr.channel) |
+ OFFSET_IN_256B_BLOCK(na);
+
+ /* the umc channel bits are not original values, they are hashed */
+ UMC_V12_0_SET_CHANNEL_HASH(bank_addr.channel, tmp_pa);
+
+ *soc_pa = tmp_pa;
+
+ return 0;
+}
+
+const struct ras_umc_ip_func ras_umc_func_v12_0 = {
+ .bank_to_eeprom_record = umc_v12_0_bank_to_eeprom_record,
+ .eeprom_record_to_nps_record = umc_v12_0_eeprom_record_to_nps_record,
+ .eeprom_record_to_nps_pages = umc_v12_0_eeprom_record_to_nps_pages,
+ .bank_to_soc_pa = umc_12_0_bank_to_soc_pa,
+ .soc_pa_to_bank = umc_12_0_soc_pa_to_bank,
+};
+
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h
new file mode 100644
index 000000000000..8a35ad856165
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_UMC_V12_0_H__
+#define __RAS_UMC_V12_0_H__
+#include "ras.h"
+
+/* MCA_UMC_UMC0_MCUMC_ADDRT0 */
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xFF00000000000000L
+
+/* MCMP1_IPIDT0 */
+#define MCMP1_IPIDT0__InstanceIdLo__SHIFT 0x0
+#define MCMP1_IPIDT0__HardwareID__SHIFT 0x20
+#define MCMP1_IPIDT0__InstanceIdHi__SHIFT 0x2c
+#define MCMP1_IPIDT0__McaType__SHIFT 0x30
+
+#define MCMP1_IPIDT0__InstanceIdLo_MASK 0x00000000FFFFFFFFL
+#define MCMP1_IPIDT0__HardwareID_MASK 0x00000FFF00000000L
+#define MCMP1_IPIDT0__InstanceIdHi_MASK 0x0000F00000000000L
+#define MCMP1_IPIDT0__McaType_MASK 0xFFFF000000000000L
+
+/* number of umc channel instance with memory map register access */
+#define UMC_V12_0_CHANNEL_INSTANCE_NUM 8
+/* number of umc instance with memory map register access */
+#define UMC_V12_0_UMC_INSTANCE_NUM 4
+
+/* one piece of normalized address is mapped to 8 pieces of physical address */
+#define UMC_V12_0_NA_MAP_PA_NUM 8
+
+/* bank bits in MCA error address */
+#define UMC_V12_0_MCA_B0_BIT 6
+#define UMC_V12_0_MCA_B1_BIT 7
+#define UMC_V12_0_MCA_B2_BIT 8
+#define UMC_V12_0_MCA_B3_BIT 9
+
+/* row bits in MCA address */
+#define UMC_V12_0_MCA_R0_BIT 10
+
+/* Stack ID bits in SOC physical address */
+#define UMC_V12_0_PA_SID1_BIT 37
+#define UMC_V12_0_PA_SID0_BIT 36
+
+/* bank bits in SOC physical address */
+#define UMC_V12_0_PA_B3_BIT 18
+#define UMC_V12_0_PA_B2_BIT 17
+#define UMC_V12_0_PA_B1_BIT 20
+#define UMC_V12_0_PA_B0_BIT 19
+
+/* row bits in SOC physical address */
+#define UMC_V12_0_PA_R13_BIT 35
+#define UMC_V12_0_PA_R12_BIT 34
+#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R10_BIT 32
+#define UMC_V12_0_PA_R9_BIT 31
+#define UMC_V12_0_PA_R8_BIT 30
+#define UMC_V12_0_PA_R7_BIT 29
+#define UMC_V12_0_PA_R6_BIT 28
+#define UMC_V12_0_PA_R5_BIT 27
+#define UMC_V12_0_PA_R4_BIT 26
+#define UMC_V12_0_PA_R3_BIT 25
+#define UMC_V12_0_PA_R2_BIT 24
+#define UMC_V12_0_PA_R1_BIT 23
+#define UMC_V12_0_PA_R0_BIT 22
+
+/* column bits in SOC physical address */
+#define UMC_V12_0_PA_C4_BIT 21
+#define UMC_V12_0_PA_C3_BIT 16
+#define UMC_V12_0_PA_C2_BIT 15
+#define UMC_V12_0_PA_C1_BIT 6
+#define UMC_V12_0_PA_C0_BIT 5
+
+/* channel index bits in SOC physical address */
+#define UMC_V12_0_PA_CH6_BIT 14
+#define UMC_V12_0_PA_CH5_BIT 13
+#define UMC_V12_0_PA_CH4_BIT 12
+#define UMC_V12_0_PA_CH3_BIT 11
+#define UMC_V12_0_PA_CH2_BIT 10
+#define UMC_V12_0_PA_CH1_BIT 9
+#define UMC_V12_0_PA_CH0_BIT 8
+
+/* Pseudochannel index bits in SOC physical address */
+#define UMC_V12_0_PA_PC0_BIT 7
+
+#define UMC_V12_0_NA_C2_BIT 8
+
+#define UMC_V12_0_SOC_PA_TO_SID(pa) \
+ ((((pa >> UMC_V12_0_PA_SID0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_SID1_BIT) & 0x1ULL) << 1ULL))
+
+#define UMC_V12_0_SOC_PA_TO_BANK(pa) \
+ ((((pa >> UMC_V12_0_PA_B0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_B1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_B2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_B3_BIT) & 0x1ULL) << 3ULL))
+
+#define UMC_V12_0_SOC_PA_TO_ROW(pa) \
+ ((((pa >> UMC_V12_0_PA_R0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_R1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_R2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_R3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_R4_BIT) & 0x1ULL) << 4ULL) | \
+ (((pa >> UMC_V12_0_PA_R5_BIT) & 0x1ULL) << 5ULL) | \
+ (((pa >> UMC_V12_0_PA_R6_BIT) & 0x1ULL) << 6ULL) | \
+ (((pa >> UMC_V12_0_PA_R7_BIT) & 0x1ULL) << 7ULL) | \
+ (((pa >> UMC_V12_0_PA_R8_BIT) & 0x1ULL) << 8ULL) | \
+ (((pa >> UMC_V12_0_PA_R9_BIT) & 0x1ULL) << 9ULL) | \
+ (((pa >> UMC_V12_0_PA_R10_BIT) & 0x1ULL) << 10ULL) | \
+ (((pa >> UMC_V12_0_PA_R11_BIT) & 0x1ULL) << 11ULL) | \
+ (((pa >> UMC_V12_0_PA_R12_BIT) & 0x1ULL) << 12ULL) | \
+ (((pa >> UMC_V12_0_PA_R13_BIT) & 0x1ULL) << 13ULL))
+
+#define UMC_V12_0_SOC_PA_TO_COL(pa) \
+ ((((pa >> UMC_V12_0_PA_C0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_C1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_C2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_C3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_C4_BIT) & 0x1ULL) << 4ULL))
+
+#define UMC_V12_0_SOC_PA_TO_CH(pa) \
+ ((((pa >> UMC_V12_0_PA_CH0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_CH1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_CH2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_CH3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_CH4_BIT) & 0x1ULL) << 4ULL) | \
+ (((pa >> UMC_V12_0_PA_CH5_BIT) & 0x1ULL) << 5ULL) | \
+ (((pa >> UMC_V12_0_PA_CH6_BIT) & 0x1ULL) << 6ULL))
+
+#define UMC_V12_0_SOC_PA_TO_PC(pa) (((pa >> UMC_V12_0_PA_PC0_BIT) & 0x1ULL) << 0ULL)
+
+#define UMC_V12_0_SOC_SID_TO_PA(sid) \
+ ((((sid >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_SID0_BIT) | \
+ (((sid >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_SID1_BIT))
+
+#define UMC_V12_0_SOC_BANK_TO_PA(bank) \
+ ((((bank >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_B0_BIT) | \
+ (((bank >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_B1_BIT) | \
+ (((bank >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_B2_BIT) | \
+ (((bank >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_B3_BIT))
+
+#define UMC_V12_0_SOC_ROW_TO_PA(row) \
+ ((((row >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_R0_BIT) | \
+ (((row >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_R1_BIT) | \
+ (((row >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_R2_BIT) | \
+ (((row >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_R3_BIT) | \
+ (((row >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_R4_BIT) | \
+ (((row >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_R5_BIT) | \
+ (((row >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_R6_BIT) | \
+ (((row >> 7ULL) & 0x1ULL) << UMC_V12_0_PA_R7_BIT) | \
+ (((row >> 8ULL) & 0x1ULL) << UMC_V12_0_PA_R8_BIT) | \
+ (((row >> 9ULL) & 0x1ULL) << UMC_V12_0_PA_R9_BIT) | \
+ (((row >> 10ULL) & 0x1ULL) << UMC_V12_0_PA_R10_BIT) | \
+ (((row >> 11ULL) & 0x1ULL) << UMC_V12_0_PA_R11_BIT) | \
+ (((row >> 12ULL) & 0x1ULL) << UMC_V12_0_PA_R12_BIT) | \
+ (((row >> 13ULL) & 0x1ULL) << UMC_V12_0_PA_R13_BIT))
+
+#define UMC_V12_0_SOC_COL_TO_PA(col) \
+ ((((col >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_C0_BIT) | \
+ (((col >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_C1_BIT) | \
+ (((col >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_C2_BIT) | \
+ (((col >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_C3_BIT) | \
+ (((col >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_C4_BIT))
+
+#define UMC_V12_0_SOC_CH_TO_PA(ch) \
+ ((((ch >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_CH0_BIT) | \
+ (((ch >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_CH1_BIT) | \
+ (((ch >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_CH2_BIT) | \
+ (((ch >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_CH3_BIT) | \
+ (((ch >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_CH4_BIT) | \
+ (((ch >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_CH5_BIT) | \
+ (((ch >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_CH6_BIT))
+
+#define UMC_V12_0_SOC_PC_TO_PA(pc) (((pc >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_PC0_BIT)
+
+/* bank hash settings */
+#define UMC_V12_0_XOR_EN0 1
+#define UMC_V12_0_XOR_EN1 1
+#define UMC_V12_0_XOR_EN2 1
+#define UMC_V12_0_XOR_EN3 1
+#define UMC_V12_0_COL_XOR0 0x0
+#define UMC_V12_0_COL_XOR1 0x0
+#define UMC_V12_0_COL_XOR2 0x800
+#define UMC_V12_0_COL_XOR3 0x1000
+#define UMC_V12_0_ROW_XOR0 0x11111
+#define UMC_V12_0_ROW_XOR1 0x22222
+#define UMC_V12_0_ROW_XOR2 0x4444
+#define UMC_V12_0_ROW_XOR3 0x8888
+
+/* channel hash settings */
+#define UMC_V12_0_HASH_4K 0
+#define UMC_V12_0_HASH_64K 1
+#define UMC_V12_0_HASH_2M 1
+#define UMC_V12_0_HASH_1G 1
+#define UMC_V12_0_HASH_1T 1
+
+/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA),
+ * hash bit is only effective when related setting is enabled
+ */
+#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \
+ (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \
+ (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \
+ (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \
+ (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \
+ (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
+ } while (0)
+
+
+/*
+ * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
+ * is the index of 4KB block
+ */
+#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4)
+/*
+ * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+/*
+ * (addr / 256) * 32768, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7)
+/* channel index is the index of 256B block */
+#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
+/* offset in 256B block */
+#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+
+
+#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
+ ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_R13_BIT)))
+
+#define ACA_IPID_HI_2_UMC_AID(_ipid_hi) (((_ipid_hi) >> 2) & 0x3)
+#define ACA_IPID_LO_2_UMC_CH(_ipid_lo) \
+ (((((_ipid_lo) >> 20) & 0x1) * 4) + (((_ipid_lo) >> 12) & 0xF))
+#define ACA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+
+#define ACA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03)
+#define ACA_IPID_2_UMC_CH(ipid) \
+ (ACA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define ACA_IPID_2_UMC_INST(ipid) \
+ (ACA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define ACA_IPID_2_SOCKET_ID(ipid) \
+ (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
+ (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+
+#define ACA_ADDR_2_ERR_ADDR(addr) \
+ REG_GET_FIELD(addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr)
+
+/* R13 bit shift should be considered, double the number */
+#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
+
+
+/* C2, C3, C4, R13, four MCA bits are looped in page retirement */
+#define UMC_V12_0_RETIRE_LOOP_BITS 4
+
+/* invalid node instance value */
+#define UMC_INV_AID_NODE 0xffff
+
+#define UMC_V12_0_AID_NUM_MAX 4
+#define UMC_V12_0_SOCKET_NUM_MAX 8
+
+#define UMC_V12_0_TOTAL_CHANNEL_NUM \
+ (UMC_V12_0_AID_NUM_MAX * UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM)
+
+/* one device has 192GB HBM */
+#define SOCKET_LFB_SIZE 0x3000000000ULL
+
+extern const struct ras_umc_ip_func ras_umc_func_v12_0;
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core);
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record);
+#endif
+
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index c901ac00c0c3..ed3ed617c688 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -9,6 +9,7 @@ config DRM_HDLCD
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
+ select DRM_BRIDGE # for TDA998x
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 2ad33559a33a..5a66948ffd24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -111,6 +111,7 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc,
static int
komeda_crtc_prepare(struct komeda_crtc *kcrtc)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
@@ -128,8 +129,8 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
err = mdev->funcs->change_opmode(mdev, new_mode);
if (err) {
- DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
- mdev->dpmode, new_mode);
+ drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
goto unlock;
}
@@ -142,18 +143,18 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
if (new_mode != KOMEDA_MODE_DUAL_DISP) {
err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st));
if (err)
- DRM_ERROR("failed to set aclk.\n");
+ drm_err(drm, "failed to set aclk.\n");
err = clk_prepare_enable(mdev->aclk);
if (err)
- DRM_ERROR("failed to enable aclk.\n");
+ drm_err(drm, "failed to enable aclk.\n");
}
err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000);
if (err)
- DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
+ drm_err(drm, "failed to set pxlclk for pipe%d\n", master->id);
err = clk_prepare_enable(master->pxlclk);
if (err)
- DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id);
+ drm_err(drm, "failed to enable pxl clk for pipe%d.\n", master->id);
unlock:
mutex_unlock(&mdev->lock);
@@ -164,6 +165,7 @@ unlock:
static int
komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
u32 new_mode;
@@ -180,8 +182,8 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
err = mdev->funcs->change_opmode(mdev, new_mode);
if (err) {
- DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
- mdev->dpmode, new_mode);
+ drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
goto unlock;
}
@@ -200,6 +202,7 @@ unlock:
void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
struct komeda_events *evts)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct drm_crtc *crtc = &kcrtc->base;
u32 events = evts->pipes[kcrtc->master->id];
@@ -212,7 +215,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
if (wb_conn)
drm_writeback_signal_completion(&wb_conn->base, 0);
else
- DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n",
+ drm_warn(drm, "CRTC[%d]: EOW happen but no wb_connector.\n",
drm_crtc_index(&kcrtc->base));
}
/* will handle it together with the write back support */
@@ -236,7 +239,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
} else {
- DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n",
+ drm_warn(drm, "CRTC[%d]: FLIP happened but no pending commit.\n",
drm_crtc_index(&kcrtc->base));
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -309,7 +312,7 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
/* wait the flip take affect.*/
if (wait_for_completion_timeout(flip_done, HZ) == 0) {
- DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
+ drm_err(drm, "wait pipe%d flip done timeout\n", kcrtc->master->id);
if (!input_flip_done) {
unsigned long flags;
@@ -562,6 +565,7 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = {
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
+ struct drm_device *drm = &kms->base;
struct komeda_crtc *crtc;
struct komeda_pipeline *master;
char str[16];
@@ -581,7 +585,7 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
else
sprintf(str, "None");
- DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n",
+ drm_info(drm, "CRTC-%d: master(pipe-%d) slave(%s).\n",
kms->n_crtcs, master->id, str);
kms->n_crtcs++;
@@ -613,6 +617,7 @@ static int komeda_attach_bridge(struct device *dev,
struct komeda_pipeline *pipe,
struct drm_encoder *encoder)
{
+ struct drm_device *drm = encoder->dev;
struct drm_bridge *bridge;
int err;
@@ -624,7 +629,7 @@ static int komeda_attach_bridge(struct device *dev,
err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err)
- dev_err(dev, "bridge_attach() failed for pipe: %s\n",
+ drm_err(drm, "bridge_attach() failed for pipe: %s\n",
of_node_full_name(pipe->of_node));
return err;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index df5da5a44755..3ca461eb0a24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -9,6 +9,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include "komeda_framebuffer.h"
#include "komeda_dev.h"
@@ -157,6 +158,7 @@ komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct komeda_dev *mdev = dev->dev_private;
@@ -177,7 +179,7 @@ komeda_fb_create(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EINVAL);
}
- drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &kfb->base, info, mode_cmd);
if (kfb->base.modifier)
ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
index c61ca98a3a63..02b2b8ae482a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
@@ -37,6 +37,7 @@ struct komeda_fb {
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int komeda_fb_check_src_coords(const struct komeda_fb *kfb,
u32 src_x, u32 src_y, u32 src_w, u32 src_h);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index ebccb74306a7..875cdbff18c9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -88,7 +88,7 @@ komeda_wb_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
komeda_wb_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
&n_formats);
+ if (!formats) {
+ kfree(kwb_conn);
+ return -ENOMEM;
+ }
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 3cfefadc7c9d..4b4a08cb396d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -11,8 +11,8 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
+#include <video/pixel_format.h>
#include <video/videomode.h>
#include <drm/drm_atomic.h>
@@ -22,6 +22,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -73,7 +74,17 @@ static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
.disable_vblank = hdlcd_crtc_disable_vblank,
};
-static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
+static const struct {
+ u32 fourcc;
+ struct pixel_format pixel;
+} supported_formats[] = {
+ { DRM_FORMAT_RGB565, PIXEL_FORMAT_RGB565 },
+ { DRM_FORMAT_XRGB1555, PIXEL_FORMAT_XRGB1555 },
+ { DRM_FORMAT_RGB888, PIXEL_FORMAT_RGB888 },
+ { DRM_FORMAT_XRGB8888, PIXEL_FORMAT_XRGB8888 },
+ { DRM_FORMAT_XBGR8888, PIXEL_FORMAT_XBGR8888 },
+ { DRM_FORMAT_XRGB2101010, PIXEL_FORMAT_XRGB2101010},
+};
/*
* Setup the HDLCD registers for decoding the pixels out of the framebuffer
@@ -83,15 +94,12 @@ static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
unsigned int btpp;
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
- uint32_t pixel_format;
- struct simplefb_format *format = NULL;
+ const struct pixel_format *format = NULL;
int i;
- pixel_format = fb->format->format;
-
for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
- if (supported_formats[i].fourcc == pixel_format)
- format = &supported_formats[i];
+ if (supported_formats[i].fourcc == fb->format->format)
+ format = &supported_formats[i].pixel;
}
if (WARN_ON(!format))
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index c3179d74f3f5..81d45f2dd6a7 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -33,6 +33,7 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index e083021e9e99..b765f6c9eea4 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -29,6 +29,7 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -306,10 +307,10 @@ malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
static bool
malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int n_superblocks = 0;
- const struct drm_format_info *info;
struct drm_gem_object *objs = NULL;
u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
u32 afbc_superblock_width = 0, afbc_size = 0;
@@ -325,8 +326,6 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
return false;
}
- info = drm_get_format_info(dev, mode_cmd);
-
n_superblocks = (mode_cmd->width / afbc_superblock_width) *
(mode_cmd->height / afbc_superblock_height);
@@ -366,24 +365,26 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
static bool
malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd))
- return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd);
+ return malidp_verify_afbc_framebuffer_size(dev, file, info, mode_cmd);
return false;
}
static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (mode_cmd->modifier[0]) {
- if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd))
+ if (!malidp_verify_afbc_framebuffer(dev, file, info, mode_cmd))
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file, mode_cmd);
+ return drm_gem_fb_create(dev, file, info, mode_cmd);
}
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 2577f0cef8fc..47733c85d271 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -14,6 +14,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
@@ -43,7 +44,7 @@ static int malidp_mw_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
malidp_mw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 34547edf1ee3..f1a5014bcfa1 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -159,7 +159,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
}
if (!fourcc_mod_is_vendor(modifier, ARM)) {
- DRM_ERROR("Unknown modifier (not Arm)\n");
+ DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
return false;
}
@@ -263,7 +263,7 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ drm_atomic_get_new_crtc_state(state->state, state->crtc);
struct malidp_crtc_state *mc;
u32 src_w, src_h;
int ret;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 0900e4466ffb..033b19b31f63 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index a763349dd89f..2445365c823f 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -12,6 +12,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index cf2e88218dc0..77098928f821 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -6,6 +6,7 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include "armada_drm.h"
#include "armada_fb.h"
@@ -18,7 +19,9 @@ static const struct drm_framebuffer_funcs armada_fb_funcs = {
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+ const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode,
+ struct armada_gem_object *obj)
{
struct armada_framebuffer *dfb;
uint8_t format, config;
@@ -64,7 +67,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->mod = config;
dfb->fb.obj[0] = &obj->obj;
- drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
+ drm_helper_mode_fill_fb_struct(dev, &dfb->fb, info, mode);
ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
if (ret) {
@@ -84,9 +87,9 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
}
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
- struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
+ struct drm_file *dfile, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode)
{
- const struct drm_format_info *info = drm_get_format_info(dev, mode);
struct armada_gem_object *obj;
struct armada_framebuffer *dfb;
int ret;
@@ -122,7 +125,7 @@ struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
goto err_unref;
}
- dfb = armada_framebuffer_create(dev, mode, obj);
+ dfb = armada_framebuffer_create(dev, info, mode, obj);
if (IS_ERR(dfb)) {
ret = PTR_ERR(dfb);
goto err;
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index c5bc53d7e0c4..f2b990f055a2 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -17,7 +17,9 @@ struct armada_framebuffer {
#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
- struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
+ struct drm_file *dfile, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode);
#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 6ee7ce04ee71..8bbae94804f8 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -13,6 +13,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "armada_crtc.h"
#include "armada_drm.h"
@@ -28,8 +29,6 @@ static void armada_fbdev_fb_destroy(struct fb_info *info)
fbh->fb->funcs->destroy(fbh->fb);
drm_client_release(&fbh->client);
- drm_fb_helper_unprepare(fbh);
- kfree(fbh);
}
static const struct fb_ops armada_fb_ops = {
@@ -45,10 +44,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
+ struct fb_info *info = fbh->info;
struct drm_mode_fb_cmd2 mode;
struct armada_framebuffer *dfb;
struct armada_gem_object *obj;
- struct fb_info *info;
int size, ret;
void *ptr;
@@ -78,7 +77,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
return -ENOMEM;
}
- dfb = armada_framebuffer_create(dev, &mode, obj);
+ dfb = armada_framebuffer_create(dev,
+ drm_get_format_info(dev, mode.pixel_format,
+ mode.modifier[0]),
+ &mode, obj);
/*
* A reference is now held by the framebuffer object if
@@ -89,12 +91,6 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
if (IS_ERR(dfb))
return PTR_ERR(dfb);
- info = drm_fb_helper_alloc_info(fbh);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_fballoc;
- }
-
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
@@ -110,8 +106,4 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
(unsigned long long)obj->phys_addr);
return 0;
-
- err_fballoc:
- dfb->fb.funcs->destroy(&dfb->fb);
- return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 1a1680d71486..35fcfa0d85ff 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -10,6 +10,7 @@
#include <drm/armada_drm.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "armada_drm.h"
#include "armada_gem.h"
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 3b9bd8ecda13..21fd3b4ba10f 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index cc47c032dbc1..a0326b4f568e 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "armada_crtc.h"
#include "armada_drm.h"
@@ -94,12 +95,7 @@ int armada_drm_plane_atomic_check(struct drm_plane *plane,
return 0;
}
- if (state)
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- else
- crtc_state = crtc->state;
-
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
0,
INT_MAX, true, false);
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 397e677a691c..46094cca2974 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -144,11 +144,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct device_node *np = pdev->dev.of_node;
const struct aspeed_gfx_config *config;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(drm->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da0663542e8a..242fbccdf844 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_AST
tristate "AST server chips"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 47da848fa3fc..cdbcba3b43ad 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -4,15 +4,23 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := \
+ ast_2000.o \
+ ast_2100.o \
+ ast_2200.o \
+ ast_2300.o \
+ ast_2400.o \
+ ast_2500.o \
+ ast_2600.o \
+ ast_cursor.o \
ast_ddc.o \
ast_dp501.o \
ast_dp.o \
ast_drv.o \
- ast_main.o \
ast_mm.o \
ast_mode.o \
ast_post.o \
ast_sil164.o \
+ ast_vbios.o \
ast_vga.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c
new file mode 100644
index 000000000000..fa3bc23ce098
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2000.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+void ast_2000_set_def_ext_reg(struct ast_device *ast)
+{
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x9f; i++)
+ ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
+
+ ext_reg_info = extreginfo;
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
+}
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xFFFFFFFF },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000089),
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+static void ast_post_chip_2000(struct ast_device *ast)
+{
+ u8 j;
+ u32 temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+
+ while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) {
+ if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) {
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else {
+ ast_write32(ast, 0x10000 + dram_reg_info->index,
+ dram_reg_info->data);
+ }
+ dram_reg_info++;
+ }
+
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+int ast_2000_post(struct ast_device *ast)
+{
+ ast_2000_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2000(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Mode setting
+ */
+
+const struct ast_vbios_dclk_info ast_2000_dclk_table[] = {
+ {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xee, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xc6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7b, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */
+ {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */
+ {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0d: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */
+ {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+ {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
+ {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
+ {0x77, 0x58, 0x80}, /* 17: VCLK119 */
+ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
+ {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
+ {0x3b, 0x2c, 0x81}, /* 1a: VCLK118_25 */
+};
+
+/*
+ * Device initialization
+ */
+
+void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post)
+{
+ enum ast_tx_chip tx_chip = AST_TX_NONE;
+ u8 vgacra3;
+
+ /*
+ * VGACRA3 Enhanced Color Mode Register, check if DVO is already
+ * enabled, in that case, assume we have a SIL164 TMDS transmitter
+ *
+ * Don't make that assumption if we the chip wasn't enabled and
+ * is at power-on reset, otherwise we'll incorrectly "detect" a
+ * SIL164 when there is none.
+ */
+ if (!need_post) {
+ vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
+ if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
+ tx_chip = AST_TX_SIL164;
+ }
+
+ __ast_device_set_tx_chip(ast, tx_chip);
+}
+
+static const struct ast_device_quirks ast_2000_device_quirks = {
+ .crtc_mem_req_threshold_low = 31,
+ .crtc_mem_req_threshold_high = 47,
+};
+
+struct drm_device *ast_2000_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2000_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c
new file mode 100644
index 000000000000..05aeb0624d41
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2100.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * DRAM type
+ */
+
+static enum ast_dram_layout ast_2100_get_dram_layout_p2a(struct ast_device *ast)
+{
+ u32 mcr_cfg;
+ enum ast_dram_layout dram_layout;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ mcr_cfg = ast_read32(ast, 0x10004);
+
+ switch (mcr_cfg & 0x0c) {
+ case 0:
+ case 4:
+ default:
+ dram_layout = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (mcr_cfg & 0x40)
+ dram_layout = AST_DRAM_1Gx16;
+ else
+ dram_layout = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ dram_layout = AST_DRAM_1Gx32;
+ break;
+ }
+
+ return dram_layout;
+}
+
+/*
+ * POST
+ */
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000585),
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000489),
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+static int cbrtest_ast2150(struct ast_device *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_device *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+static void cbrdlli_ast2150(struct ast_device *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = 0xff;
+ dll_min[1] = 0xff;
+ dll_min[2] = 0xff;
+ dll_min[3] = 0xff;
+ dll_max[0] = 0x00;
+ dll_max[1] = 0x00;
+ dll_max[2] = 0x00;
+ dll_max[3] = 0x00;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150) {
+ goto cbr_start;
+ }
+ }
+ if (dll_max[0] == 0 || (dll_max[0] - dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+static void ast_post_chip_2100(struct ast_device *ast)
+{
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+ enum ast_dram_layout dram_layout = ast_2100_get_dram_layout_p2a(ast);
+
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2100 || ast->chip == AST2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+
+ while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) {
+ if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) {
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) {
+ switch (dram_layout) {
+ case AST_DRAM_1Gx16:
+ data = 0x00000d89;
+ break;
+ case AST_DRAM_1Gx32:
+ data = 0x00000c8d;
+ break;
+ default:
+ data = dram_reg_info->data;
+ break;
+ }
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else {
+ ast_write32(ast, 0x10000 + dram_reg_info->index,
+ dram_reg_info->data);
+ }
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+int ast_2100_post(struct ast_device *ast)
+{
+ ast_2000_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2100(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Widescreen detection
+ */
+
+/* Try to detect WSXGA+ on Gen2+ */
+bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
+{
+ u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
+
+ if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
+ return true;
+ if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
+ return true;
+
+ return false;
+}
+
+/* Try to detect WUXGA on Gen2+ */
+bool __ast_2100_detect_wuxga(struct ast_device *ast)
+{
+ u8 vgacrd1;
+
+ if (ast->support_fullhd) {
+ vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
+ if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
+ return true;
+ }
+
+ return false;
+}
+
+static void ast_2100_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast)) {
+ ast->support_wsxga_p = true;
+ if (ast->chip == AST2100)
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2100_device_quirks = {
+ .crtc_mem_req_threshold_low = 47,
+ .crtc_mem_req_threshold_high = 63,
+};
+
+struct drm_device *ast_2100_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2100_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2100_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2200.c b/drivers/gpu/drm/ast/ast_2200.c
new file mode 100644
index 000000000000..b64345d11ffa
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2200.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "ast_drv.h"
+
+static void ast_2200_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast)) {
+ ast->support_wsxga_p = true;
+ if (ast->chip == AST2200)
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2200_device_quirks = {
+ .crtc_mem_req_threshold_low = 47,
+ .crtc_mem_req_threshold_high = 63,
+};
+
+struct drm_device *ast_2200_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2200_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2200_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
+
diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c
new file mode 100644
index 000000000000..5f50d9f91ffd
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2300.c
@@ -0,0 +1,1463 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+void ast_2300_set_def_ext_reg(struct ast_device *ast)
+{
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1f, 0xff };
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x9f; i++)
+ ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
+
+ ext_reg_info = extreginfo;
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE0 ((1 << 10) - 1)
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = ast_mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test2(ast, datagen, 0x41);
+}
+
+static bool mmc_test_single(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test(ast, datagen, 0xc5);
+}
+
+static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test2(ast, datagen, 0x05);
+}
+
+static int cbr_test(struct ast_device *ast)
+{
+ u32 data;
+ int i;
+
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_device *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ data = cbr_test(ast);
+ if (data != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_device *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_device *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ data = cbr_test2(ast);
+ if (data != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static bool cbr_test3(struct ast_device *ast)
+{
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_single(ast, 0))
+ return false;
+ return true;
+}
+
+static bool cbr_scan3(struct ast_device *ast)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < 2; loop++) {
+ if (cbr_test3(ast))
+ break;
+ }
+ if (loop == 2)
+ return false;
+ }
+ return true;
+}
+
+static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
+ bool status = false;
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli)
+ dllmin[cnt] = dlli;
+ if (dllmax[cnt] < dlli)
+ dllmax[cnt] = dlli;
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (retry++ > 10)
+ goto FINETUNE_DONE;
+ if (passcnt != 16)
+ goto FINETUNE_START;
+ status = true;
+FINETUNE_DONE:
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3)
+ dlli = 3;
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4)
+ dlli = 4;
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3)
+ dlli = 3;
+ else
+ dlli = (dlli - 1) & 0x7;
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4)
+ dlli = 4;
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0084, data);
+ return status;
+} /* finetuneDQI_L */
+
+static void finetuneDQSI(struct ast_device *ast)
+{
+ u32 dlli, dqsip, dqidly;
+ u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
+ u32 g_dqidly, g_dqsip, g_margin, g_side;
+ u16 pass[32][2][2];
+ char tag[2][76];
+
+ /* Disable DQI CBR */
+ reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
+ reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
+ reg_mcr18 &= 0x0000ffff;
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+
+ for (dlli = 0; dlli < 76; dlli++) {
+ tag[0][dlli] = 0x0;
+ tag[1][dlli] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ pass[dqidly][0][0] = 0xff;
+ pass[dqidly][0][1] = 0x0;
+ pass[dqidly][1][0] = 0xff;
+ pass[dqidly][1][1] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ passcnt[0] = 0;
+ passcnt[1] = 0;
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
+ ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068,
+ 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0070, 0);
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
+ if (cbr_scan3(ast)) {
+ if (dlli == 0)
+ break;
+ passcnt[dqsip]++;
+ tag[dqsip][dlli] = 'P';
+ if (dlli < pass[dqidly][dqsip][0])
+ pass[dqidly][dqsip][0] = (u16)dlli;
+ if (dlli > pass[dqidly][dqsip][1])
+ pass[dqidly][dqsip][1] = (u16)dlli;
+ } else if (passcnt[dqsip] >= 5) {
+ break;
+ } else {
+ pass[dqidly][dqsip][0] = 0xff;
+ pass[dqidly][dqsip][1] = 0x0;
+ }
+ }
+ }
+ if (passcnt[0] == 0 && passcnt[1] == 0)
+ dqidly++;
+ }
+ /* Search margin */
+ g_dqidly = 0;
+ g_dqsip = 0;
+ g_margin = 0;
+ g_side = 0;
+
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
+ continue;
+ diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
+ if ((diff + 2) < g_margin)
+ continue;
+ passcnt[0] = 0;
+ passcnt[1] = 0;
+ for (dlli = pass[dqidly][dqsip][0];
+ dlli > 0 && tag[dqsip][dlli] != 0;
+ dlli--, passcnt[0]++) {
+ }
+ for (dlli = pass[dqidly][dqsip][1];
+ dlli < 76 && tag[dqsip][dlli] != 0;
+ dlli++, passcnt[1]++) {
+ }
+ if (passcnt[0] > passcnt[1])
+ passcnt[0] = passcnt[1];
+ passcnt[1] = 0;
+ if (passcnt[0] > g_side)
+ passcnt[1] = passcnt[0] - g_side;
+ if (diff > (g_margin + 1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ } else if (passcnt[1] > 1 && g_side < 8) {
+ if (diff > g_margin)
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ }
+ }
+ }
+ reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+}
+
+static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
+ bool status = false;
+
+ finetuneDQSI(ast);
+ if (finetuneDQI_L(ast, param) == false)
+ return status;
+
+CBR_START2:
+ dllmin[0] = 0xff;
+ dllmin[1] = 0xff;
+ dllmax[0] = 0x0;
+ dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli)
+ dllmin[0] = dlli;
+ if (dllmax[0] < dlli)
+ dllmax[0] = dlli;
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli)
+ dllmin[1] = dlli;
+ if (dllmax[1] < dlli)
+ dllmax[1] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (retry++ > 10)
+ goto CBR_DONE2;
+ if (dllmax[0] == 0 || (dllmax[0] - dllmin[0]) < CBR_THRESHOLD)
+ goto CBR_START2;
+ if (dllmax[1] == 0 || (dllmax[1] - dllmin[1]) < CBR_THRESHOLD)
+ goto CBR_START2;
+ status = true;
+CBR_DONE2:
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
+ return status;
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA00761C | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA007636 | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case SZ_8M:
+ param->dram_config |= 0x00;
+ break;
+ case SZ_16M:
+ param->dram_config |= 0x04;
+ break;
+ case SZ_32M:
+ param->dram_config |= 0x08;
+ break;
+ case SZ_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr3_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max)
+ break;
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000)
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ else
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ data = 0;
+ if (param->wodt)
+ data = 0x300;
+ if (param->rodt)
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr3_init_start;
+
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+}
+
+static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xAA009012 | trap_AC2;
+ break;
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA009023 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA00903B | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case SZ_8M:
+ param->dram_config |= 0x00;
+ break;
+ case SZ_16M:
+ param->dram_config |= 0x04;
+ break;
+ case SZ_32M:
+ param->dram_config |= 0x08;
+ break;
+ case SZ_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr2_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
+ ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max)
+ break;
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000)
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ else
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt)
+ data = 0x500;
+ if (param->rodt)
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr2_init_start;
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+}
+
+static void ast_post_chip_2300(struct ast_device *ast)
+{
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_freq = 396;
+ param.dram_type = AST_DDR3;
+ temp = ast_mindwm(ast, 0x1e6e2070);
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ switch (temp & 0x18000000) {
+ case 0:
+ param.dram_chipid = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 0x08000000:
+ param.dram_chipid = AST_DRAM_1Gx16;
+ break;
+ case 0x10000000:
+ param.dram_chipid = AST_DRAM_2Gx16;
+ break;
+ case 0x18000000:
+ param.dram_chipid = AST_DRAM_4Gx16;
+ break;
+ }
+ switch (temp & 0x0c) {
+ default:
+ case 0x00:
+ param.vram_size = SZ_8M;
+ break;
+ case 0x04:
+ param.vram_size = SZ_16M;
+ break;
+ case 0x08:
+ param.vram_size = SZ_32M;
+ break;
+ case 0x0c:
+ param.vram_size = SZ_64M;
+ break;
+ }
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
+int ast_2300_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2300(ast);
+ ast_init_3rdtx(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Device initialization
+ */
+
+void ast_2300_detect_tx_chip(struct ast_device *ast)
+{
+ enum ast_tx_chip tx_chip = AST_TX_NONE;
+ struct drm_device *dev = &ast->base;
+ u8 vgacrd1;
+
+ /*
+ * On AST GEN4+, look at the configuration set by the SoC in
+ * the SOC scratch register #1 bits 11:8 (interestingly marked
+ * as "reserved" in the spec)
+ */
+ vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
+ AST_IO_VGACRD1_TX_TYPE_MASK);
+ switch (vgacrd1) {
+ /*
+ * GEN4 to GEN6
+ */
+ case AST_IO_VGACRD1_TX_SIL164_VBIOS:
+ tx_chip = AST_TX_SIL164;
+ break;
+ case AST_IO_VGACRD1_TX_DP501_VBIOS:
+ ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL);
+ if (ast->dp501_fw_addr) {
+ /* backup firmware */
+ if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) {
+ drmm_kfree(dev, ast->dp501_fw_addr);
+ ast->dp501_fw_addr = NULL;
+ }
+ }
+ fallthrough;
+ case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
+ tx_chip = AST_TX_DP501;
+ break;
+ /*
+ * GEN7+
+ */
+ case AST_IO_VGACRD1_TX_ASTDP:
+ tx_chip = AST_TX_ASTDP;
+ break;
+ /*
+ * Several of the listed TX chips are not explicitly supported
+ * by the ast driver. If these exist in real-world devices, they
+ * are most likely reported as VGA or SIL164 outputs. We warn here
+ * to get bug reports for these devices. If none come in for some
+ * time, we can begin to fail device probing on these values.
+ */
+ case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
+ drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_CH7003_VBIOS:
+ drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
+ drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ }
+
+ __ast_device_set_tx_chip(ast, tx_chip);
+}
+
+static void ast_2300_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1300) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2300_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+};
+
+struct drm_device *ast_2300_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2300_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2300_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2400.c b/drivers/gpu/drm/ast/ast_2400.c
new file mode 100644
index 000000000000..2e6befd24f91
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2400.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+
+static void ast_2400_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1400) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2400_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+};
+
+struct drm_device *ast_2400_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2400_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2400_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c
new file mode 100644
index 000000000000..2a52af0ded56
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2500.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+/*
+ * AST2500 DRAM settings modules
+ */
+
+#define REGTBL_NUM 17
+#define REGIDX_010 0
+#define REGIDX_014 1
+#define REGIDX_018 2
+#define REGIDX_020 3
+#define REGIDX_024 4
+#define REGIDX_02C 5
+#define REGIDX_030 6
+#define REGIDX_214 7
+#define REGIDX_2E0 8
+#define REGIDX_2E4 9
+#define REGIDX_2E8 10
+#define REGIDX_2EC 11
+#define REGIDX_2F0 12
+#define REGIDX_2F4 13
+#define REGIDX_2F8 14
+#define REGIDX_RFC 15
+#define REGIDX_PLL 16
+
+static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
+ 0x64604D38, /* 0x010 */
+ 0x29690599, /* 0x014 */
+ 0x00000300, /* 0x018 */
+ 0x00000000, /* 0x020 */
+ 0x00000000, /* 0x024 */
+ 0x02181E70, /* 0x02C */
+ 0x00000040, /* 0x030 */
+ 0x00000024, /* 0x214 */
+ 0x02001300, /* 0x2E0 */
+ 0x0E0000A0, /* 0x2E4 */
+ 0x000E001B, /* 0x2E8 */
+ 0x35B8C105, /* 0x2EC */
+ 0x08090408, /* 0x2F0 */
+ 0x9B000800, /* 0x2F4 */
+ 0x0E400A00, /* 0x2F8 */
+ 0x9971452F, /* tRFC */
+ 0x000071C1 /* PLL */
+};
+
+static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
+ 0x63604E37, /* 0x010 */
+ 0xE97AFA99, /* 0x014 */
+ 0x00019000, /* 0x018 */
+ 0x08000000, /* 0x020 */
+ 0x00000400, /* 0x024 */
+ 0x00000410, /* 0x02C */
+ 0x00000101, /* 0x030 */
+ 0x00000024, /* 0x214 */
+ 0x03002900, /* 0x2E0 */
+ 0x0E0000A0, /* 0x2E4 */
+ 0x000E001C, /* 0x2E8 */
+ 0x35B8C106, /* 0x2EC */
+ 0x08080607, /* 0x2F0 */
+ 0x9B000900, /* 0x2F4 */
+ 0x0E400A00, /* 0x2F8 */
+ 0x99714545, /* tRFC */
+ 0x000071C1 /* PLL */
+};
+
+#define TIMEOUT 5000000
+
+void ast_2500_patch_ahb(void __iomem *regs)
+{
+ u32 data;
+
+ /* Clear bus lock condition */
+ __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
+ __ast_moutdwm(regs, 0x1e600084, 0x00010000);
+ __ast_moutdwm(regs, 0x1e600088, 0x00000000);
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+
+ data = __ast_mindwm(regs, 0x1e6e2070);
+ if (data & 0x08000000) { /* check fast reset */
+ /*
+ * If "Fast restet" is enabled for ARM-ICE debugger,
+ * then WDT needs to enable, that
+ * WDT04 is WDT#1 Reload reg.
+ * WDT08 is WDT#1 counter restart reg to avoid system deadlock
+ * WDT0C is WDT#1 control reg
+ * [6:5]:= 01:Full chip
+ * [4]:= 1:1MHz clock source
+ * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
+ * [0]:= 1:WDT enable
+ */
+ __ast_moutdwm(regs, 0x1E785004, 0x00000010);
+ __ast_moutdwm(regs, 0x1E785008, 0x00004755);
+ __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
+ udelay(1000);
+ }
+
+ do {
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+ data = __ast_mindwm(regs, 0x1e6e2000);
+ } while (data != 1);
+
+ __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
+}
+
+static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test(ast, datagen, 0x85);
+}
+
+static bool cbr_test_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
+ ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_single_2500(ast, 0))
+ return false;
+ return true;
+}
+
+static bool ddr_test_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
+ ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_burst(ast, 1))
+ return false;
+ if (!mmc_test_burst(ast, 2))
+ return false;
+ if (!mmc_test_burst(ast, 3))
+ return false;
+ if (!mmc_test_single_2500(ast, 0))
+ return false;
+ return true;
+}
+
+static void ddr_init_common_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
+ ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
+ ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
+ ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
+ ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
+ ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
+}
+
+static void ddr_phy_init_2500(struct ast_device *ast)
+{
+ u32 data, pass, timecnt;
+
+ pass = 0;
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
+ while (!pass) {
+ for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
+ data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
+ if (!data)
+ break;
+ }
+ if (timecnt != TIMEOUT) {
+ data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
+ if (!data)
+ pass = 1;
+ }
+ if (!pass) {
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ udelay(10); /* delay 10 us */
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
+ }
+ }
+
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
+}
+
+/*
+ * Check DRAM Size
+ * 1Gb : 0x80000000 ~ 0x87FFFFFF
+ * 2Gb : 0x80000000 ~ 0x8FFFFFFF
+ * 4Gb : 0x80000000 ~ 0x9FFFFFFF
+ * 8Gb : 0x80000000 ~ 0xBFFFFFFF
+ */
+static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
+{
+ u32 reg_04, reg_14;
+
+ reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
+ reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
+
+ ast_moutdwm(ast, 0xA0100000, 0x41424344);
+ ast_moutdwm(ast, 0x90100000, 0x35363738);
+ ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
+ ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
+
+ /* Check 8Gbit */
+ if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
+ reg_04 |= 0x03;
+ reg_14 |= (tRFC >> 24) & 0xFF;
+ /* Check 4Gbit */
+ } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
+ reg_04 |= 0x02;
+ reg_14 |= (tRFC >> 16) & 0xFF;
+ /* Check 2Gbit */
+ } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
+ reg_04 |= 0x01;
+ reg_14 |= (tRFC >> 8) & 0xFF;
+ } else {
+ reg_14 |= tRFC & 0xFF;
+ }
+ ast_moutdwm(ast, 0x1E6E0004, reg_04);
+ ast_moutdwm(ast, 0x1E6E0014, reg_14);
+}
+
+static void enable_cache_2500(struct ast_device *ast)
+{
+ u32 reg_04, data;
+
+ reg_04 = ast_mindwm(ast, 0x1E6E0004);
+ ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
+
+ do
+ data = ast_mindwm(ast, 0x1E6E0004);
+ while (!(data & 0x80000));
+ ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
+}
+
+static void set_mpll_2500(struct ast_device *ast)
+{
+ u32 addr, data, param;
+
+ /* Reset MMC */
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
+ for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
+ ast_moutdwm(ast, addr, 0x0);
+ addr += 4;
+ }
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+ data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
+ if (data) {
+ /* CLKIN = 25MHz */
+ param = 0x930023E0;
+ ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
+ } else {
+ /* CLKIN = 24MHz */
+ param = 0x93002400;
+ }
+ ast_moutdwm(ast, 0x1E6E2020, param);
+ udelay(100);
+}
+
+static void reset_mmc_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E78505C, 0x00000004);
+ ast_moutdwm(ast, 0x1E785044, 0x00000001);
+ ast_moutdwm(ast, 0x1E785048, 0x00004755);
+ ast_moutdwm(ast, 0x1E78504C, 0x00000013);
+ mdelay(100);
+ ast_moutdwm(ast, 0x1E785054, 0x00000077);
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+}
+
+static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
+{
+ ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
+ ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
+ ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
+ ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
+ ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
+ ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
+ ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
+ ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
+
+ /* DDR PHY Setting */
+ ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
+ ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
+ ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
+ ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
+ ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
+ ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
+ ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
+ ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
+ ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
+ ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
+ ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
+ ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
+
+ /* Controller Setting */
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
+
+ /* Wait DDR PHY init done */
+ ddr_phy_init_2500(ast);
+
+ ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
+ ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
+
+ check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
+ enable_cache_2500(ast);
+ ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
+ ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
+}
+
+static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
+{
+ u32 data, data2, pass, retrycnt;
+ u32 ddr_vref, phy_vref;
+ u32 min_ddr_vref = 0, min_phy_vref = 0;
+ u32 max_ddr_vref = 0, max_phy_vref = 0;
+
+ ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
+ ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
+ ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
+ ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
+ ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
+ ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
+ ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
+ ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
+
+ /* DDR PHY Setting */
+ ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
+ ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
+ ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
+ ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
+ ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
+ ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
+ ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
+ ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
+ ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
+ ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
+ ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
+ ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
+ ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
+ ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
+
+ /* Controller Setting */
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
+
+ /* Train PHY Vref first */
+ pass = 0;
+
+ for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
+ max_phy_vref = 0x0;
+ pass = 0;
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
+ for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
+ /* Fire DFI Init */
+ ddr_phy_init_2500(ast);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ if (cbr_test_2500(ast)) {
+ pass++;
+ data = ast_mindwm(ast, 0x1E6E03D0);
+ data2 = data >> 8;
+ data = data & 0xff;
+ if (data > data2)
+ data = data2;
+ if (max_phy_vref < data) {
+ max_phy_vref = data;
+ min_phy_vref = phy_vref;
+ }
+ } else if (pass > 0) {
+ break;
+ }
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
+
+ /* Train DDR Vref next */
+ pass = 0;
+
+ for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
+ min_ddr_vref = 0xFF;
+ max_ddr_vref = 0x0;
+ pass = 0;
+ for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
+ /* Fire DFI Init */
+ ddr_phy_init_2500(ast);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ if (cbr_test_2500(ast)) {
+ pass++;
+ if (min_ddr_vref > ddr_vref)
+ min_ddr_vref = ddr_vref;
+ if (max_ddr_vref < ddr_vref)
+ max_ddr_vref = ddr_vref;
+ } else if (pass != 0) {
+ break;
+ }
+ }
+ }
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
+
+ /* Wait DDR PHY init done */
+ ddr_phy_init_2500(ast);
+
+ ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
+ ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
+
+ check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
+ enable_cache_2500(ast);
+ ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
+ ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
+}
+
+static bool ast_dram_init_2500(struct ast_device *ast)
+{
+ u32 data;
+ u32 max_tries = 5;
+
+ do {
+ if (max_tries-- == 0)
+ return false;
+ set_mpll_2500(ast);
+ reset_mmc_2500(ast);
+ ddr_init_common_2500(ast);
+
+ data = ast_mindwm(ast, 0x1E6E2070);
+ if (data & 0x01000000)
+ ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
+ else
+ ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
+ } while (!ddr_test_2500(ast));
+
+ ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
+
+ /* Patch code */
+ data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
+ ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
+
+ return true;
+}
+
+static void ast_post_chip_2500(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
+ /* Clear bus lock condition */
+ ast_2500_patch_ahb(ast->regs);
+
+ /* Disable watchdog */
+ ast_moutdwm(ast, 0x1E78502C, 0x00000000);
+ ast_moutdwm(ast, 0x1E78504C, 0x00000000);
+
+ /*
+ * Reset USB port to patch USB unknown device issue
+ * SCU90 is Multi-function Pin Control #5
+ * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
+ * port).
+ * SCU94 is Multi-function Pin Control #6
+ * [14:13]:= 1x:USB2.0 Host2 controller
+ * SCU70 is Hardware Strap reg
+ * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
+ * [18]: 0(24)/1(48) MHz)
+ * SCU7C is Write clear reg to SCU70
+ * [23]:= write 1 and then SCU70[23] will be clear as 0b.
+ */
+ ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
+ if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
+ ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
+ mdelay(100);
+ ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
+ }
+ /* Modify eSPI reset pin */
+ temp = ast_mindwm(ast, 0x1E6E2070);
+ if (temp & 0x02000000)
+ ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ if (!ast_dram_init_2500(ast))
+ drm_err(dev, "DRAM init failed !\n");
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
+int ast_2500_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2500(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Mode setting
+ */
+
+const struct ast_vbios_dclk_info ast_2500_dclk_table[] = {
+ {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xee, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xc6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7b, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */
+ {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */
+ {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0d: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */
+ {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+ {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
+ {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
+ {0x58, 0x01, 0x42}, /* 17: VCLK119 */
+ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
+ {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
+ {0x44, 0x20, 0x43}, /* 1a: VCLK118_25 */
+};
+
+/*
+ * Device initialization
+ */
+
+static void ast_2500_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2500_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+ .crtc_hsync_precatch_needed = true,
+};
+
+struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2500_device_quirks);
+
+ ast->dclk_table = ast_2500_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2500_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c
new file mode 100644
index 000000000000..dee78fd5b022
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2600.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+int ast_2600_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->tx_chip == AST_TX_ASTDP)
+ return ast_dp_launch(ast);
+
+ return 0;
+}
+
+/*
+ * Device initialization
+ */
+
+static void ast_2600_detect_widescreen(struct ast_device *ast)
+{
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2600_device_quirks = {
+ .crtc_mem_req_threshold_low = 160,
+ .crtc_mem_req_threshold_high = 224,
+ .crtc_hsync_precatch_needed = true,
+ .crtc_hsync_add4_needed = true,
+};
+
+struct drm_device *ast_2600_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2600_device_quirks);
+
+ ast->dclk_table = ast_2500_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ switch (ast->tx_chip) {
+ case AST_TX_ASTDP:
+ ret = ast_post_gpu(ast);
+ break;
+ default:
+ ret = 0;
+ if (need_post)
+ ret = ast_post_gpu(ast);
+ break;
+ }
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2600_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
new file mode 100644
index 000000000000..2d3ad7610c2e
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+#include <linux/bits.h>
+#include <linux/sizes.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+
+/*
+ * Hardware cursor
+ */
+
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_SIZE SZ_32
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+static unsigned long ast_cursor_vram_size(void)
+{
+ return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE;
+}
+
+long ast_cursor_vram_offset(struct ast_device *ast)
+{
+ unsigned long size = ast_cursor_vram_size();
+
+ if (size > ast->vram_size)
+ return -EINVAL;
+
+ return ALIGN_DOWN(ast->vram_size - size, SZ_8);
+}
+
+static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
+{
+ u32 csum = 0;
+ unsigned int one_pixel_copy = width & BIT(0);
+ unsigned int two_pixel_copy = width - one_pixel_copy;
+ unsigned int trailing_bytes = (AST_MAX_HWC_WIDTH - width) * sizeof(u16);
+ unsigned int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < two_pixel_copy; x += 2) {
+ const u32 *src32 = (const u32 *)src;
+
+ csum += *src32;
+ src += SZ_4;
+ }
+ if (one_pixel_copy) {
+ const u16 *src16 = (const u16 *)src;
+
+ csum += *src16;
+ src += SZ_2;
+ }
+ src += trailing_bytes;
+ }
+
+ return csum;
+}
+
+static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
+ unsigned int width, unsigned int height)
+{
+ u8 __iomem *dst = ast_plane_vaddr(&ast->cursor_plane.base);
+ u32 csum;
+
+ csum = ast_cursor_calculate_checksum(src, width, height);
+
+ /* write pixel data */
+ memcpy_toio(dst, src, AST_HWC_SIZE);
+
+ /* write checksum + signature */
+ dst += AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+}
+
+static void ast_set_cursor_base(struct ast_device *ast, u64 address)
+{
+ u8 addr0 = (address >> 3) & 0xff;
+ u8 addr1 = (address >> 11) & 0xff;
+ u8 addr2 = (address >> 19) & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc8, addr0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc9, addr1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xca, addr2);
+}
+
+static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
+ u8 x_offset, u8 y_offset)
+{
+ u8 x0 = (x & 0x00ff);
+ u8 x1 = (x & 0x0f00) >> 8;
+ u8 y0 = (y & 0x00ff);
+ u8 y1 = (y & 0x0700) >> 8;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc4, x0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc5, x1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc6, y0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc7, y1);
+}
+
+static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
+{
+ static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
+ AST_IO_VGACRCB_HWC_ENABLED);
+
+ u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
+
+ if (enabled)
+ vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
+
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xcb, mask, vgacrcb);
+}
+
+/*
+ * Cursor plane
+ */
+
+static const uint32_t ast_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+};
+
+static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ if (new_plane_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+ if (ret || !new_plane_state->visible)
+ return ret;
+
+ if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct ast_device *ast = to_ast_device(plane->dev);
+ struct drm_rect damage;
+ u64 dst_off = ast_plane->offset;
+ u8 __iomem *dst = ast_plane_vaddr(ast_plane); /* TODO: Use mapping abstraction properly */
+ u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
+ unsigned int offset_x, offset_y;
+ u16 x, y;
+ u8 x_offset, y_offset;
+
+ /*
+ * Do data transfer to hardware buffer and point the scanout
+ * engine to the offset.
+ */
+
+ if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
+ u8 *argb4444;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_ARGB4444:
+ argb4444 = shadow_plane_state->data[0].vaddr;
+ break;
+ default:
+ argb4444 = ast_cursor_plane->argb4444;
+ {
+ struct iosys_map argb4444_dst[DRM_FORMAT_MAX_PLANES] = {
+ IOSYS_MAP_INIT_VADDR(argb4444),
+ };
+ unsigned int argb4444_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ AST_HWC_PITCH,
+ };
+
+ drm_fb_argb8888_to_argb4444(argb4444_dst, argb4444_dst_pitch,
+ shadow_plane_state->data, fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
+ }
+ break;
+ }
+ ast_set_cursor_image(ast, argb4444, fb->width, fb->height);
+ ast_set_cursor_base(ast, dst_off);
+ }
+
+ /*
+ * Update location in HWC signature and registers.
+ */
+
+ writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
+ writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
+
+ offset_x = AST_MAX_HWC_WIDTH - fb->width;
+ offset_y = AST_MAX_HWC_HEIGHT - fb->height;
+
+ if (plane_state->crtc_x < 0) {
+ x_offset = (-plane_state->crtc_x) + offset_x;
+ x = 0;
+ } else {
+ x_offset = offset_x;
+ x = plane_state->crtc_x;
+ }
+ if (plane_state->crtc_y < 0) {
+ y_offset = (-plane_state->crtc_y) + offset_y;
+ y = 0;
+ } else {
+ y_offset = offset_y;
+ y = plane_state->crtc_y;
+ }
+
+ ast_set_cursor_location(ast, x, y, x_offset, y_offset);
+
+ /* Dummy write to enable HWC and make the HW pick-up the changes. */
+ ast_set_cursor_enabled(ast, true);
+}
+
+static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_device *ast = to_ast_device(plane->dev);
+
+ ast_set_cursor_enabled(ast, false);
+}
+
+static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = ast_cursor_plane_helper_atomic_check,
+ .atomic_update = ast_cursor_plane_helper_atomic_update,
+ .atomic_disable = ast_cursor_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_cursor_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+int ast_cursor_plane_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
+ struct ast_plane *ast_plane = &ast_cursor_plane->base;
+ struct drm_plane *cursor_plane = &ast_plane->base;
+ unsigned long size;
+ long offset;
+ int ret;
+
+ size = ast_cursor_vram_size();
+ offset = ast_cursor_vram_offset(ast);
+ if (offset < 0)
+ return offset;
+
+ ret = ast_plane_init(dev, ast_plane, offset, size,
+ 0x01, &ast_cursor_plane_funcs,
+ ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR);
+ if (ret) {
+ drm_err(dev, "ast_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(cursor_plane);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 0e282b7b167c..8e650a02c528 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -5,6 +5,7 @@
#include <linux/firmware.h>
#include <linux/delay.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -12,11 +13,71 @@
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
+#include "ast_vbios.h"
+
+struct ast_astdp_mode_index_table_entry {
+ unsigned int hdisplay;
+ unsigned int vdisplay;
+ unsigned int mode_index;
+};
+
+/* FIXME: Do refresh rate and flags actually matter? */
+static const struct ast_astdp_mode_index_table_entry ast_astdp_mode_index_table[] = {
+ { 320, 240, ASTDP_320x240_60 },
+ { 400, 300, ASTDP_400x300_60 },
+ { 512, 384, ASTDP_512x384_60 },
+ { 640, 480, ASTDP_640x480_60 },
+ { 800, 600, ASTDP_800x600_56 },
+ { 1024, 768, ASTDP_1024x768_60 },
+ { 1152, 864, ASTDP_1152x864_75 },
+ { 1280, 800, ASTDP_1280x800_60_RB },
+ { 1280, 1024, ASTDP_1280x1024_60 },
+ { 1360, 768, ASTDP_1366x768_60 }, // same as 1366x786
+ { 1366, 768, ASTDP_1366x768_60 },
+ { 1440, 900, ASTDP_1440x900_60_RB },
+ { 1600, 900, ASTDP_1600x900_60_RB },
+ { 1600, 1200, ASTDP_1600x1200_60 },
+ { 1680, 1050, ASTDP_1680x1050_60_RB },
+ { 1920, 1080, ASTDP_1920x1080_60 },
+ { 1920, 1200, ASTDP_1920x1200_60 },
+ { 0 }
+};
+
+struct ast_astdp_connector_state {
+ struct drm_connector_state base;
+
+ int mode_index;
+};
+
+static struct ast_astdp_connector_state *
+to_ast_astdp_connector_state(const struct drm_connector_state *state)
+{
+ return container_of(state, struct ast_astdp_connector_state, base);
+}
+
+static int ast_astdp_get_mode_index(unsigned int hdisplay, unsigned int vdisplay)
+{
+ const struct ast_astdp_mode_index_table_entry *entry = ast_astdp_mode_index_table;
+
+ while (entry->hdisplay && entry->vdisplay) {
+ if (entry->hdisplay == hdisplay && entry->vdisplay == vdisplay)
+ return entry->mode_index;
+ ++entry;
+ }
+
+ return -EINVAL;
+}
static bool ast_astdp_is_connected(struct ast_device *ast)
{
if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD))
return false;
+ /*
+ * HPD might be set even if no monitor is connected, so also check that
+ * the link training was successful.
+ */
+ if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, AST_IO_VGACRDC_LINK_SUCCESS))
+ return false;
return true;
}
@@ -73,7 +134,7 @@ static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, si
* 3. The Delays are often longer a lot when system resume from S3/S4.
*/
if (j)
- mdelay(j + 1);
+ msleep(j + 1);
/* Wait for EDID offset to show up in mirror register */
vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7);
@@ -195,7 +256,7 @@ static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled)
if (enabled)
vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE;
- for (i = 0; i < 200; ++i) {
+ for (i = 0; i < 1000; ++i) {
if (i)
mdelay(1);
vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf,
@@ -221,80 +282,6 @@ static void ast_dp_set_enable(struct ast_device *ast, bool enabled)
drm_WARN_ON(dev, !__ast_dp_wait_enable(ast, enabled));
}
-static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
-{
- struct ast_device *ast = to_ast_device(crtc->dev);
-
- u32 ulRefreshRateIndex;
- u8 ModeIdx;
-
- ulRefreshRateIndex = vbios_mode->enh_table->refresh_rate_index - 1;
-
- switch (crtc->mode.crtc_hdisplay) {
- case 320:
- ModeIdx = ASTDP_320x240_60;
- break;
- case 400:
- ModeIdx = ASTDP_400x300_60;
- break;
- case 512:
- ModeIdx = ASTDP_512x384_60;
- break;
- case 640:
- ModeIdx = (ASTDP_640x480_60 + (u8) ulRefreshRateIndex);
- break;
- case 800:
- ModeIdx = (ASTDP_800x600_56 + (u8) ulRefreshRateIndex);
- break;
- case 1024:
- ModeIdx = (ASTDP_1024x768_60 + (u8) ulRefreshRateIndex);
- break;
- case 1152:
- ModeIdx = ASTDP_1152x864_75;
- break;
- case 1280:
- if (crtc->mode.crtc_vdisplay == 800)
- ModeIdx = (ASTDP_1280x800_60_RB - (u8) ulRefreshRateIndex);
- else // 1024
- ModeIdx = (ASTDP_1280x1024_60 + (u8) ulRefreshRateIndex);
- break;
- case 1360:
- case 1366:
- ModeIdx = ASTDP_1366x768_60;
- break;
- case 1440:
- ModeIdx = (ASTDP_1440x900_60_RB - (u8) ulRefreshRateIndex);
- break;
- case 1600:
- if (crtc->mode.crtc_vdisplay == 900)
- ModeIdx = (ASTDP_1600x900_60_RB - (u8) ulRefreshRateIndex);
- else //1200
- ModeIdx = ASTDP_1600x1200_60;
- break;
- case 1680:
- ModeIdx = (ASTDP_1680x1050_60_RB - (u8) ulRefreshRateIndex);
- break;
- case 1920:
- if (crtc->mode.crtc_vdisplay == 1080)
- ModeIdx = ASTDP_1920x1080_60;
- else //1200
- ModeIdx = ASTDP_1920x1200_60;
- break;
- default:
- return;
- }
-
- /*
- * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
- * CRE1[7:0]: MISC1 (default: 0x00)
- * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
- */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE0, ASTDP_AND_CLEAR_MASK,
- ASTDP_MISC0_24bpp);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
-}
-
static void ast_wait_for_vretrace(struct ast_device *ast)
{
unsigned long timeout = jiffies + HZ;
@@ -313,15 +300,62 @@ static const struct drm_encoder_funcs ast_astdp_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static enum drm_mode_status
+ast_astdp_encoder_helper_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ int res;
+
+ res = ast_astdp_get_mode_index(mode->hdisplay, mode->vdisplay);
+ if (res < 0)
+ return MODE_NOMODE;
+
+ return MODE_OK;
+}
+
static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_device *dev = encoder->dev;
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
+ const struct ast_vbios_enhtable *vmode = ast_crtc_state->vmode;
+ struct ast_astdp_connector_state *astdp_conn_state =
+ to_ast_astdp_connector_state(conn_state);
+ int mode_index = astdp_conn_state->mode_index;
+ u8 refresh_rate_index;
+ u8 vgacre0, vgacre1, vgacre2;
+
+ if (drm_WARN_ON(dev, vmode->refresh_rate_index < 1 || vmode->refresh_rate_index > 255))
+ return;
+ refresh_rate_index = vmode->refresh_rate_index - 1;
+
+ /* FIXME: Why are we doing this? */
+ switch (mode_index) {
+ case ASTDP_1280x800_60_RB:
+ case ASTDP_1440x900_60_RB:
+ case ASTDP_1600x900_60_RB:
+ case ASTDP_1680x1050_60_RB:
+ mode_index = (u8)(mode_index - (u8)refresh_rate_index);
+ break;
+ default:
+ mode_index = (u8)(mode_index + (u8)refresh_rate_index);
+ break;
+ }
- ast_dp_set_mode(crtc, vbios_mode_info);
+ /*
+ * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
+ * CRE1[7:0]: MISC1 (default: 0x00)
+ * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
+ */
+ vgacre0 = AST_IO_VGACRE0_24BPP;
+ vgacre1 = 0x00;
+ vgacre2 = mode_index & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe0, vgacre0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe1, vgacre1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe2, vgacre2);
}
static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
@@ -348,10 +382,31 @@ static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder,
ast_dp_set_phy_sleep(ast, true);
}
+static int ast_astdp_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct ast_astdp_connector_state *astdp_conn_state =
+ to_ast_astdp_connector_state(conn_state);
+ int res;
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ res = ast_astdp_get_mode_index(mode->hdisplay, mode->vdisplay);
+ if (res < 0)
+ return res;
+ astdp_conn_state->mode_index = res;
+ }
+
+ return 0;
+}
+
static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = {
+ .mode_valid = ast_astdp_encoder_helper_mode_valid,
.atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set,
.atomic_enable = ast_astdp_encoder_helper_atomic_enable,
.atomic_disable = ast_astdp_encoder_helper_atomic_disable,
+ .atomic_check = ast_astdp_encoder_helper_atomic_check,
};
/*
@@ -422,18 +477,62 @@ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs
.detect_ctx = ast_astdp_connector_helper_detect_ctx,
};
-/*
- * Output
- */
+static void ast_astdp_connector_reset(struct drm_connector *connector)
+{
+ struct ast_astdp_connector_state *astdp_state =
+ kzalloc(sizeof(*astdp_state), GFP_KERNEL);
+
+ if (connector->state)
+ connector->funcs->atomic_destroy_state(connector, connector->state);
+
+ if (astdp_state)
+ __drm_atomic_helper_connector_reset(connector, &astdp_state->base);
+ else
+ __drm_atomic_helper_connector_reset(connector, NULL);
+}
+
+static struct drm_connector_state *
+ast_astdp_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct ast_astdp_connector_state *new_astdp_state, *astdp_state;
+ struct drm_device *dev = connector->dev;
+
+ if (drm_WARN_ON(dev, !connector->state))
+ return NULL;
+
+ new_astdp_state = kmalloc(sizeof(*new_astdp_state), GFP_KERNEL);
+ if (!new_astdp_state)
+ return NULL;
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_astdp_state->base);
+
+ astdp_state = to_ast_astdp_connector_state(connector->state);
+
+ new_astdp_state->mode_index = astdp_state->mode_index;
+
+ return &new_astdp_state->base;
+}
+
+static void ast_astdp_connector_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct ast_astdp_connector_state *astdp_state = to_ast_astdp_connector_state(state);
+
+ __drm_atomic_helper_connector_destroy_state(&astdp_state->base);
+ kfree(astdp_state);
+}
static const struct drm_connector_funcs ast_astdp_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
+ .reset = ast_astdp_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = ast_astdp_connector_atomic_duplicate_state,
+ .atomic_destroy_state = ast_astdp_connector_atomic_destroy_state,
};
+/*
+ * Output
+ */
+
int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
deleted file mode 100644
index 1e9ac9d6d26c..000000000000
--- a/drivers/gpu/drm/ast/ast_dram_tables.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef AST_DRAM_TABLES_H
-#define AST_DRAM_TABLES_H
-
-/* DRAM timing tables */
-struct ast_dramstruct {
- u16 index;
- u32 data;
-};
-
-static const struct ast_dramstruct ast2000_dram_table_data[] = {
- { 0x0108, 0x00000000 },
- { 0x0120, 0x00004a21 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xFFFFFFFF },
- { 0x0004, 0x00000089 },
- { 0x0008, 0x22331353 },
- { 0x000C, 0x0d07000b },
- { 0x0010, 0x11113333 },
- { 0x0020, 0x00110350 },
- { 0x0028, 0x1e0828f0 },
- { 0x0024, 0x00000001 },
- { 0x001C, 0x00000000 },
- { 0x0014, 0x00000003 },
- { 0xFF00, 0x00000043 },
- { 0x0018, 0x00000131 },
- { 0x0014, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x0018, 0x00000031 },
- { 0x0014, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x0028, 0x1e0828f1 },
- { 0x0024, 0x00000003 },
- { 0x002C, 0x1f0f28fb },
- { 0x0030, 0xFFFFFE01 },
- { 0xFFFF, 0xFFFFFFFF }
-};
-
-static const struct ast_dramstruct ast1100_dram_table_data[] = {
- { 0x2000, 0x1688a8a8 },
- { 0x2020, 0x000041f0 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xfc600309 },
- { 0x006C, 0x00909090 },
- { 0x0064, 0x00050000 },
- { 0x0004, 0x00000585 },
- { 0x0008, 0x0011030f },
- { 0x0010, 0x22201724 },
- { 0x0018, 0x1e29011a },
- { 0x0020, 0x00c82222 },
- { 0x0014, 0x01001523 },
- { 0x001C, 0x1024010d },
- { 0x0024, 0x00cb2522 },
- { 0x0038, 0xffffff82 },
- { 0x003C, 0x00000000 },
- { 0x0040, 0x00000000 },
- { 0x0044, 0x00000000 },
- { 0x0048, 0x00000000 },
- { 0x004C, 0x00000000 },
- { 0x0050, 0x00000000 },
- { 0x0054, 0x00000000 },
- { 0x0058, 0x00000000 },
- { 0x005C, 0x00000000 },
- { 0x0060, 0x032aa02a },
- { 0x0064, 0x002d3000 },
- { 0x0068, 0x00000000 },
- { 0x0070, 0x00000000 },
- { 0x0074, 0x00000000 },
- { 0x0078, 0x00000000 },
- { 0x007C, 0x00000000 },
- { 0x0034, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x002C, 0x00000732 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000005 },
- { 0x0028, 0x00000007 },
- { 0x0028, 0x00000003 },
- { 0x0028, 0x00000001 },
- { 0x000C, 0x00005a08 },
- { 0x002C, 0x00000632 },
- { 0x0028, 0x00000001 },
- { 0x0030, 0x000003c0 },
- { 0x0028, 0x00000003 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000003 },
- { 0x000C, 0x00005a21 },
- { 0x0034, 0x00007c03 },
- { 0x0120, 0x00004c41 },
- { 0xffff, 0xffffffff },
-};
-
-static const struct ast_dramstruct ast2100_dram_table_data[] = {
- { 0x2000, 0x1688a8a8 },
- { 0x2020, 0x00004120 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xfc600309 },
- { 0x006C, 0x00909090 },
- { 0x0064, 0x00070000 },
- { 0x0004, 0x00000489 },
- { 0x0008, 0x0011030f },
- { 0x0010, 0x32302926 },
- { 0x0018, 0x274c0122 },
- { 0x0020, 0x00ce2222 },
- { 0x0014, 0x01001523 },
- { 0x001C, 0x1024010d },
- { 0x0024, 0x00cb2522 },
- { 0x0038, 0xffffff82 },
- { 0x003C, 0x00000000 },
- { 0x0040, 0x00000000 },
- { 0x0044, 0x00000000 },
- { 0x0048, 0x00000000 },
- { 0x004C, 0x00000000 },
- { 0x0050, 0x00000000 },
- { 0x0054, 0x00000000 },
- { 0x0058, 0x00000000 },
- { 0x005C, 0x00000000 },
- { 0x0060, 0x0f2aa02a },
- { 0x0064, 0x003f3005 },
- { 0x0068, 0x02020202 },
- { 0x0070, 0x00000000 },
- { 0x0074, 0x00000000 },
- { 0x0078, 0x00000000 },
- { 0x007C, 0x00000000 },
- { 0x0034, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x002C, 0x00000942 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000005 },
- { 0x0028, 0x00000007 },
- { 0x0028, 0x00000003 },
- { 0x0028, 0x00000001 },
- { 0x000C, 0x00005a08 },
- { 0x002C, 0x00000842 },
- { 0x0028, 0x00000001 },
- { 0x0030, 0x000003c0 },
- { 0x0028, 0x00000003 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000003 },
- { 0x000C, 0x00005a21 },
- { 0x0034, 0x00007c03 },
- { 0x0120, 0x00005061 },
- { 0xffff, 0xffffffff },
-};
-
-/*
- * AST2500 DRAM settings modules
- */
-#define REGTBL_NUM 17
-#define REGIDX_010 0
-#define REGIDX_014 1
-#define REGIDX_018 2
-#define REGIDX_020 3
-#define REGIDX_024 4
-#define REGIDX_02C 5
-#define REGIDX_030 6
-#define REGIDX_214 7
-#define REGIDX_2E0 8
-#define REGIDX_2E4 9
-#define REGIDX_2E8 10
-#define REGIDX_2EC 11
-#define REGIDX_2F0 12
-#define REGIDX_2F4 13
-#define REGIDX_2F8 14
-#define REGIDX_RFC 15
-#define REGIDX_PLL 16
-
-static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
- 0x64604D38, /* 0x010 */
- 0x29690599, /* 0x014 */
- 0x00000300, /* 0x018 */
- 0x00000000, /* 0x020 */
- 0x00000000, /* 0x024 */
- 0x02181E70, /* 0x02C */
- 0x00000040, /* 0x030 */
- 0x00000024, /* 0x214 */
- 0x02001300, /* 0x2E0 */
- 0x0E0000A0, /* 0x2E4 */
- 0x000E001B, /* 0x2E8 */
- 0x35B8C105, /* 0x2EC */
- 0x08090408, /* 0x2F0 */
- 0x9B000800, /* 0x2F4 */
- 0x0E400A00, /* 0x2F8 */
- 0x9971452F, /* tRFC */
- 0x000071C1 /* PLL */
-};
-
-static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
- 0x63604E37, /* 0x010 */
- 0xE97AFA99, /* 0x014 */
- 0x00019000, /* 0x018 */
- 0x08000000, /* 0x020 */
- 0x00000400, /* 0x024 */
- 0x00000410, /* 0x02C */
- 0x00000101, /* 0x030 */
- 0x00000024, /* 0x214 */
- 0x03002900, /* 0x2E0 */
- 0x0E0000A0, /* 0x2E4 */
- 0x000E001C, /* 0x2E8 */
- 0x35B8C106, /* 0x2EC */
- 0x08080607, /* 0x2F0 */
- 0x9B000900, /* 0x2F4 */
- 0x0E400A00, /* 0x2F8 */
- 0x99714545, /* tRFC */
- 0x000071C1 /* PLL */
-};
-
-#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index ff3bcdd1cff2..b9a9b050b546 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -37,6 +37,7 @@
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_module.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
@@ -46,6 +47,34 @@ static int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
+void ast_device_init(struct ast_device *ast,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ const struct ast_device_quirks *quirks)
+{
+ ast->quirks = quirks;
+ ast->chip = chip;
+ ast->config_mode = config_mode;
+ ast->regs = regs;
+ ast->ioregs = ioregs;
+}
+
+void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip)
+{
+ static const char * const info_str[] = {
+ "analog VGA",
+ "Sil164 TMDS transmitter",
+ "DP501 DisplayPort transmitter",
+ "ASPEED DisplayPort transmitter",
+ };
+
+ drm_info(&ast->base, "Using %s\n", info_str[tx_chip]);
+
+ ast->tx_chip = tx_chip;
+}
+
/*
* DRM driver
*/
@@ -170,8 +199,8 @@ static int ast_detect_chip(struct pci_dev *pdev,
/* Patch AST2500/AST2510 */
if ((pdev->revision & 0xf0) == 0x40) {
- if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK))
- ast_patch_ahb_2500(regs);
+ if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK))
+ ast_2500_patch_ahb(regs);
}
/* Double check that it's actually working */
@@ -266,7 +295,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
*chip_out = chip;
*config_mode_out = config_mode;
- return 0;
+ return __AST_CHIP_GEN(chip);
}
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -277,6 +306,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *ioregs;
enum ast_config_mode config_mode;
enum ast_chip chip;
+ unsigned int chip_gen;
struct drm_device *drm;
bool need_post = false;
@@ -349,10 +379,43 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode);
- if (ret)
+ if (ret < 0)
return ret;
+ chip_gen = ret;
- drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post);
+ switch (chip_gen) {
+ case 1:
+ drm = ast_2000_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 2:
+ drm = ast_2100_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 3:
+ drm = ast_2200_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 4:
+ drm = ast_2300_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 5:
+ drm = ast_2400_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 6:
+ drm = ast_2500_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 7:
+ drm = ast_2600_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ default:
+ dev_err(&pdev->dev, "Gen%d not supported\n", chip_gen);
+ return -ENODEV;
+ }
if (IS_ERR(drm))
return PTR_ERR(drm);
pci_set_drvdata(pdev, drm);
@@ -393,11 +456,15 @@ static int ast_drm_freeze(struct drm_device *dev)
static int ast_drm_thaw(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
+ int ret;
ast_enable_vga(ast->ioregs);
ast_open_key(ast->ioregs);
ast_enable_mmio(dev->dev, ast->ioregs);
- ast_post_gpu(ast);
+
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ret;
return drm_mode_config_helper_resume(dev);
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 6b4305ac07d4..787e38c6c17d 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -39,6 +39,8 @@
#include "ast_reg.h"
+struct ast_vbios_enhtable;
+
#define DRIVER_AUTHOR "Dave Airlie"
#define DRIVER_NAME "ast"
@@ -96,13 +98,15 @@ enum ast_config_mode {
ast_use_defaults
};
-#define AST_DRAM_512Mx16 0
-#define AST_DRAM_1Gx16 1
-#define AST_DRAM_512Mx32 2
-#define AST_DRAM_1Gx32 3
-#define AST_DRAM_2Gx16 6
-#define AST_DRAM_4Gx16 7
-#define AST_DRAM_8Gx16 8
+enum ast_dram_layout {
+ AST_DRAM_512Mx16 = 0,
+ AST_DRAM_1Gx16 = 1,
+ AST_DRAM_512Mx32 = 2,
+ AST_DRAM_1Gx32 = 3,
+ AST_DRAM_2Gx16 = 6,
+ AST_DRAM_4Gx16 = 7,
+ AST_DRAM_8Gx16 = 8,
+};
/*
* Hardware cursor
@@ -110,18 +114,8 @@ enum ast_config_mode {
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
-
-#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
-#define AST_HWC_SIGNATURE_SIZE 32
-
-/* define for signature structure */
-#define AST_HWC_SIGNATURE_CHECKSUM 0x00
-#define AST_HWC_SIGNATURE_SizeX 0x04
-#define AST_HWC_SIGNATURE_SizeY 0x08
-#define AST_HWC_SIGNATURE_X 0x0C
-#define AST_HWC_SIGNATURE_Y 0x10
-#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
-#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
+#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
/*
* Planes
@@ -130,7 +124,6 @@ enum ast_config_mode {
struct ast_plane {
struct drm_plane base;
- void __iomem *vaddr;
u64 offset;
unsigned long size;
};
@@ -140,6 +133,17 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
return container_of(plane, struct ast_plane, base);
}
+struct ast_cursor_plane {
+ struct ast_plane base;
+
+ u8 argb4444[AST_HWC_SIZE];
+};
+
+static inline struct ast_cursor_plane *to_ast_cursor_plane(struct drm_plane *plane)
+{
+ return container_of(to_ast_plane(plane), struct ast_cursor_plane, base);
+}
+
/*
* Connector
*/
@@ -160,9 +164,31 @@ to_ast_connector(struct drm_connector *connector)
* Device
*/
+struct ast_device_quirks {
+ /*
+ * CRTC memory request threshold
+ */
+ unsigned char crtc_mem_req_threshold_low;
+ unsigned char crtc_mem_req_threshold_high;
+
+ /*
+ * Adjust hsync values to load next scanline early. Signalled
+ * by AST2500PreCatchCRT in VBIOS mode flags.
+ */
+ bool crtc_hsync_precatch_needed;
+
+ /*
+ * Workaround for modes with HSync Time that is not a multiple
+ * of 8 (e.g., 1920x1080@60Hz, HSync +44 pixels).
+ */
+ bool crtc_hsync_add4_needed;
+};
+
struct ast_device {
struct drm_device base;
+ const struct ast_device_quirks *quirks;
+
void __iomem *regs;
void __iomem *ioregs;
void __iomem *dp501_fw_buf;
@@ -170,21 +196,18 @@ struct ast_device {
enum ast_config_mode config_mode;
enum ast_chip chip;
- uint32_t dram_bus_width;
- uint32_t dram_type;
- uint32_t mclk;
+ const struct ast_vbios_dclk_info *dclk_table;
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
- unsigned long vram_fb_available;
struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
enum ast_tx_chip tx_chip;
struct ast_plane primary_plane;
- struct ast_plane cursor_plane;
+ struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
union {
struct {
@@ -205,7 +228,9 @@ struct ast_device {
} astdp;
} output;
- bool support_wide_screen;
+ bool support_wsxga_p; /* 1680x1050 */
+ bool support_fullhd; /* 1920x1080 */
+ bool support_wuxga; /* 1920x1200 */
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
@@ -216,14 +241,6 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev)
return container_of(dev, struct ast_device, base);
}
-struct drm_device *ast_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum ast_chip chip,
- enum ast_config_mode config_mode,
- void __iomem *regs,
- void __iomem *ioregs,
- bool need_post);
-
static inline unsigned long __ast_gen(struct ast_device *ast)
{
return __AST_CHIP_GEN(ast->chip);
@@ -281,13 +298,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
__ast_write8(addr, reg + 1, val);
}
-static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
+static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,
u8 val)
{
- u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
+ u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask);
- tmp |= val;
- __ast_write8_i(addr, reg, index, tmp);
+ val &= ~preserve_mask;
+ __ast_write8_i(addr, reg, index, tmp | val);
}
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
@@ -332,14 +349,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i
__ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
-#define AST_VIDMEM_SIZE_8M 0x00800000
-#define AST_VIDMEM_SIZE_16M 0x01000000
-#define AST_VIDMEM_SIZE_32M 0x02000000
-#define AST_VIDMEM_SIZE_64M 0x04000000
-#define AST_VIDMEM_SIZE_128M 0x08000000
-
-#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-
struct ast_vbios_stdtable {
u8 misc;
u8 seq[4];
@@ -348,46 +357,24 @@ struct ast_vbios_stdtable {
u8 gr[9];
};
-struct ast_vbios_enhtable {
- u32 ht;
- u32 hde;
- u32 hfp;
- u32 hsync;
- u32 vt;
- u32 vde;
- u32 vfp;
- u32 vsync;
- u32 dclk_index;
- u32 flags;
- u32 refresh_rate;
- u32 refresh_rate_index;
- u32 mode_id;
-};
-
struct ast_vbios_dclk_info {
u8 param1;
u8 param2;
u8 param3;
};
-struct ast_vbios_mode_info {
- const struct ast_vbios_stdtable *std_table;
- const struct ast_vbios_enhtable *enh_table;
-};
-
struct ast_crtc_state {
struct drm_crtc_state base;
/* Last known format of primary plane */
const struct drm_format_info *format;
- struct ast_vbios_mode_info vbios_mode_info;
+ const struct ast_vbios_stdtable *std_table;
+ const struct ast_vbios_enhtable *vmode;
};
#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
-int ast_mode_config_init(struct ast_device *ast);
-
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
@@ -444,15 +431,102 @@ int ast_mode_config_init(struct ast_device *ast);
int ast_mm_init(struct ast_device *ast);
+/* ast_drv.c */
+void ast_device_init(struct ast_device *ast,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ const struct ast_device_quirks *quirks);
+void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip);
+
+/* ast_2000.c */
+int ast_2000_post(struct ast_device *ast);
+extern const struct ast_vbios_dclk_info ast_2000_dclk_table[];
+void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post);
+struct drm_device *ast_2000_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2100.c */
+int ast_2100_post(struct ast_device *ast);
+bool __ast_2100_detect_wsxga_p(struct ast_device *ast);
+bool __ast_2100_detect_wuxga(struct ast_device *ast);
+struct drm_device *ast_2100_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2200.c */
+struct drm_device *ast_2200_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2300.c */
+int ast_2300_post(struct ast_device *ast);
+void ast_2300_detect_tx_chip(struct ast_device *ast);
+struct drm_device *ast_2300_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2400.c */
+struct drm_device *ast_2400_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2500.c */
+void ast_2500_patch_ahb(void __iomem *regs);
+int ast_2500_post(struct ast_device *ast);
+extern const struct ast_vbios_dclk_info ast_2500_dclk_table[];
+struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2600.c */
+int ast_2600_post(struct ast_device *ast);
+struct drm_device *ast_2600_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
/* ast post */
-void ast_post_gpu(struct ast_device *ast);
+int ast_post_gpu(struct ast_device *ast);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(void __iomem *regs);
int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
+/* ast_cursor.c */
+long ast_cursor_vram_offset(struct ast_device *ast);
+int ast_cursor_plane_init(struct ast_device *ast);
+
/* ast dp501 */
bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size);
void ast_init_3rdtx(struct ast_device *ast);
@@ -462,4 +536,15 @@ int ast_dp501_output_init(struct ast_device *ast);
int ast_dp_launch(struct ast_device *ast);
int ast_astdp_output_init(struct ast_device *ast);
+/* ast_mode.c */
+int ast_mode_config_init(struct ast_device *ast);
+int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
+ u64 offset, unsigned long size,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type);
+void __iomem *ast_plane_vaddr(struct ast_plane *ast);
+
#endif
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
deleted file mode 100644
index bc37c65305d4..000000000000
--- a/drivers/gpu/drm/ast/ast_main.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-
-#include <linux/of.h>
-#include <linux/pci.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_managed.h>
-
-#include "ast_drv.h"
-
-static void ast_detect_widescreen(struct ast_device *ast)
-{
- u8 jreg;
-
- /* Check if we support wide screen */
- switch (AST_GEN(ast)) {
- case 1:
- ast->support_wide_screen = false;
- break;
- default:
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if (!(jreg & 0x80))
- ast->support_wide_screen = true;
- else if (jreg & 0x01)
- ast->support_wide_screen = true;
- else {
- ast->support_wide_screen = false;
- if (ast->chip == AST1300)
- ast->support_wide_screen = true;
- if (ast->chip == AST1400)
- ast->support_wide_screen = true;
- if (ast->chip == AST2510)
- ast->support_wide_screen = true;
- if (IS_AST_GEN7(ast))
- ast->support_wide_screen = true;
- }
- break;
- }
-}
-
-static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
-{
- static const char * const info_str[] = {
- "analog VGA",
- "Sil164 TMDS transmitter",
- "DP501 DisplayPort transmitter",
- "ASPEED DisplayPort transmitter",
- };
-
- struct drm_device *dev = &ast->base;
- u8 jreg, vgacrd1;
-
- /*
- * Several of the listed TX chips are not explicitly supported
- * by the ast driver. If these exist in real-world devices, they
- * are most likely reported as VGA or SIL164 outputs. We warn here
- * to get bug reports for these devices. If none come in for some
- * time, we can begin to fail device probing on these values.
- */
- vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK);
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ITE66121_VBIOS,
- "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_CH7003_VBIOS,
- "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ANX9807_VBIOS,
- "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
-
- /* Check 3rd Tx option (digital output afaik) */
- ast->tx_chip = AST_TX_NONE;
-
- /*
- * VGACRA3 Enhanced Color Mode Register, check if DVO is already
- * enabled, in that case, assume we have a SIL164 TMDS transmitter
- *
- * Don't make that assumption if we the chip wasn't enabled and
- * is at power-on reset, otherwise we'll incorrectly "detect" a
- * SIL164 when there is none.
- */
- if (!need_post) {
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
- if (jreg & 0x80)
- ast->tx_chip = AST_TX_SIL164;
- }
-
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
- /*
- * On AST GEN4+, look the configuration set by the SoC in
- * the SOC scratch register #1 bits 11:8 (interestingly marked
- * as "reserved" in the spec)
- */
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
- AST_IO_VGACRD1_TX_TYPE_MASK);
- switch (jreg) {
- case AST_IO_VGACRD1_TX_SIL164_VBIOS:
- ast->tx_chip = AST_TX_SIL164;
- break;
- case AST_IO_VGACRD1_TX_DP501_VBIOS:
- ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
- if (ast->dp501_fw_addr) {
- /* backup firmware */
- if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) {
- drmm_kfree(dev, ast->dp501_fw_addr);
- ast->dp501_fw_addr = NULL;
- }
- }
- fallthrough;
- case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
- ast->tx_chip = AST_TX_DP501;
- }
- } else if (IS_AST_GEN7(ast)) {
- if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK) ==
- AST_IO_VGACRD1_TX_ASTDP) {
- int ret = ast_dp_launch(ast);
-
- if (!ret)
- ast->tx_chip = AST_TX_ASTDP;
- }
- }
-
- drm_info(dev, "Using %s\n", info_str[ast->tx_chip]);
-}
-
-static int ast_get_dram_info(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct device_node *np = dev->dev->of_node;
- uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
- uint32_t denum, num, div, ref_pll, dsel;
-
- switch (ast->config_mode) {
- case ast_use_dt:
- /*
- * If some properties are missing, use reasonable
- * defaults for GEN5
- */
- if (of_property_read_u32(np, "aspeed,mcr-configuration",
- &mcr_cfg))
- mcr_cfg = 0x00000577;
- if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
- &mcr_scu_mpll))
- mcr_scu_mpll = 0x000050C0;
- if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
- &mcr_scu_strap))
- mcr_scu_strap = 0;
- break;
- case ast_use_p2a:
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- mcr_cfg = ast_read32(ast, 0x10004);
- mcr_scu_mpll = ast_read32(ast, 0x10120);
- mcr_scu_strap = ast_read32(ast, 0x10170);
- break;
- case ast_use_defaults:
- default:
- ast->dram_bus_width = 16;
- ast->dram_type = AST_DRAM_1Gx16;
- if (IS_AST_GEN6(ast))
- ast->mclk = 800;
- else
- ast->mclk = 396;
- return 0;
- }
-
- if (mcr_cfg & 0x40)
- ast->dram_bus_width = 16;
- else
- ast->dram_bus_width = 32;
-
- if (IS_AST_GEN6(ast)) {
- switch (mcr_cfg & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_8Gx16;
- break;
- }
- } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
- switch (mcr_cfg & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (mcr_cfg & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- case 8:
- if (mcr_cfg & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
- break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
- break;
- }
- }
-
- if (mcr_scu_strap & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = mcr_scu_mpll & 0x1f;
- num = (mcr_scu_mpll & 0x3fe0) >> 5;
- dsel = (mcr_scu_mpll & 0xc000) >> 14;
- switch (dsel) {
- case 3:
- div = 0x4;
- break;
- case 2:
- case 1:
- div = 0x2;
- break;
- default:
- div = 0x1;
- break;
- }
- ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000));
- return 0;
-}
-
-struct drm_device *ast_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum ast_chip chip,
- enum ast_config_mode config_mode,
- void __iomem *regs,
- void __iomem *ioregs,
- bool need_post)
-{
- struct drm_device *dev;
- struct ast_device *ast;
- int ret;
-
- ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
- if (IS_ERR(ast))
- return ERR_CAST(ast);
- dev = &ast->base;
-
- ast->chip = chip;
- ast->config_mode = config_mode;
- ast->regs = regs;
- ast->ioregs = ioregs;
-
- ast_detect_widescreen(ast);
- ast_detect_tx_chip(ast, need_post);
-
- ret = ast_get_dram_info(ast);
- if (ret)
- return ERR_PTR(ret);
-
- drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
- ast->mclk, ast->dram_type, ast->dram_bus_width);
-
- if (need_post)
- ast_post_gpu(ast);
-
- ret = ast_mm_init(ast);
- if (ret)
- return ERR_PTR(ret);
-
- /* map reserved buffer */
- ast->dp501_fw_buf = NULL;
- if (ast->vram_size < pci_resource_len(pdev, 0)) {
- ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
- if (!ast->dp501_fw_buf)
- drm_info(dev, "failed to map reserved buffer!\n");
- }
-
- ret = ast_mode_config_init(ast);
- if (ret)
- return ERR_PTR(ret);
-
- return dev;
-}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 6dfe6d9777d4..0bc140319464 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -35,36 +35,35 @@
static u32 ast_get_vram_size(struct ast_device *ast)
{
- u8 jreg;
u32 vram_size;
+ u8 vgacr99, vgacraa;
- vram_size = AST_VIDMEM_DEFAULT_SIZE;
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff);
- switch (jreg & 3) {
+ vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa);
+ switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) {
case 0:
- vram_size = AST_VIDMEM_SIZE_8M;
+ vram_size = SZ_8M;
break;
case 1:
- vram_size = AST_VIDMEM_SIZE_16M;
+ vram_size = SZ_16M;
break;
case 2:
- vram_size = AST_VIDMEM_SIZE_32M;
+ vram_size = SZ_32M;
break;
case 3:
- vram_size = AST_VIDMEM_SIZE_64M;
+ vram_size = SZ_64M;
break;
}
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff);
- switch (jreg & 0x03) {
+ vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99);
+ switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) {
case 1:
- vram_size -= 0x100000;
+ vram_size -= SZ_1M;
break;
case 2:
- vram_size -= 0x200000;
+ vram_size -= SZ_2M;
break;
case 3:
- vram_size -= 0x400000;
+ vram_size -= SZ_4M;
break;
}
@@ -93,7 +92,6 @@ int ast_mm_init(struct ast_device *ast)
ast->vram_base = base;
ast->vram_size = vram_size;
- ast->vram_fb_available = vram_size;
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9d5321c81e68..cd08990a10f9 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -29,11 +29,11 @@
*/
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
@@ -43,38 +43,73 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
#include "ast_tables.h"
+#include "ast_vbios.h"
#define AST_LUT_SIZE 256
-static inline void ast_load_palette_index(struct ast_device *ast,
- u8 index, u8 red, u8 green,
- u8 blue)
+#define AST_PRIMARY_PLANE_MAX_OFFSET (BIT(16) - 1)
+
+static unsigned long ast_fb_vram_offset(void)
+{
+ return 0; // with shmem, the primary plane is always at offset 0
+}
+
+static unsigned long ast_fb_vram_size(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ unsigned long offset = ast_fb_vram_offset(); // starts at offset
+ long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset
+
+ if (cursor_offset < 0)
+ cursor_offset = ast->vram_size; // no cursor; it's all ours
+ if (drm_WARN_ON_ONCE(dev, offset > cursor_offset))
+ return 0; // cannot legally happen; signal error
+ return cursor_offset - offset;
+}
+
+static void ast_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- ast_io_write8(ast, AST_IO_VGADWR, index);
+ struct drm_device *dev = crtc->dev;
+ struct ast_device *ast = to_ast_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ ast_io_write8(ast, AST_IO_VGADWR, i8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, red);
+ ast_io_write8(ast, AST_IO_VGAPDR, r8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, green);
+ ast_io_write8(ast, AST_IO_VGAPDR, g8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, blue);
+ ast_io_write8(ast, AST_IO_VGAPDR, b8);
ast_io_read8(ast, AST_IO_VGASRI);
}
-static void ast_crtc_set_gamma_linear(struct ast_device *ast,
- const struct drm_format_info *format)
+static void ast_crtc_fill_gamma(struct ast_device *ast,
+ const struct drm_format_info *format)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_fill_palette_8(crtc, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -83,21 +118,22 @@ static void ast_crtc_set_gamma_linear(struct ast_device *ast,
}
}
-static void ast_crtc_set_gamma(struct ast_device *ast,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ast_crtc_load_gamma(struct ast_device *ast,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_load_palette_8(crtc, lut, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i,
- lut[i].red >> 8,
- lut[i].green >> 8,
- lut[i].blue >> 8);
+ drm_crtc_load_gamma_888(crtc, lut, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -106,134 +142,9 @@ static void ast_crtc_set_gamma(struct ast_device *ast,
}
}
-static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- struct ast_vbios_mode_info *vbios_mode)
-{
- u32 refresh_rate_index = 0, refresh_rate;
- const struct ast_vbios_enhtable *best = NULL;
- u32 hborder, vborder;
- bool check_sync;
-
- switch (format->cpp[0] * 8) {
- case 8:
- vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
- break;
- case 16:
- vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
- break;
- case 24:
- case 32:
- vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
- break;
- default:
- return false;
- }
-
- switch (mode->crtc_hdisplay) {
- case 640:
- vbios_mode->enh_table = &res_640x480[refresh_rate_index];
- break;
- case 800:
- vbios_mode->enh_table = &res_800x600[refresh_rate_index];
- break;
- case 1024:
- vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
- break;
- case 1152:
- vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
- break;
- case 1280:
- if (mode->crtc_vdisplay == 800)
- vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
- break;
- case 1360:
- vbios_mode->enh_table = &res_1360x768[refresh_rate_index];
- break;
- case 1440:
- vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
- break;
- case 1600:
- if (mode->crtc_vdisplay == 900)
- vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
- break;
- case 1680:
- vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
- break;
- case 1920:
- if (mode->crtc_vdisplay == 1080)
- vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
- break;
- default:
- return false;
- }
-
- refresh_rate = drm_mode_vrefresh(mode);
- check_sync = vbios_mode->enh_table->flags & WideScreenMode;
-
- while (1) {
- const struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
-
- while (loop->refresh_rate != 0xff) {
- if ((check_sync) &&
- (((mode->flags & DRM_MODE_FLAG_NVSYNC) &&
- (loop->flags & PVSync)) ||
- ((mode->flags & DRM_MODE_FLAG_PVSYNC) &&
- (loop->flags & NVSync)) ||
- ((mode->flags & DRM_MODE_FLAG_NHSYNC) &&
- (loop->flags & PHSync)) ||
- ((mode->flags & DRM_MODE_FLAG_PHSYNC) &&
- (loop->flags & NHSync)))) {
- loop++;
- continue;
- }
- if (loop->refresh_rate <= refresh_rate
- && (!best || loop->refresh_rate > best->refresh_rate))
- best = loop;
- loop++;
- }
- if (best || !check_sync)
- break;
- check_sync = 0;
- }
-
- if (best)
- vbios_mode->enh_table = best;
-
- hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
- vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
-
- adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
- adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
- adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
- adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
- vbios_mode->enh_table->hfp;
- adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
- vbios_mode->enh_table->hfp +
- vbios_mode->enh_table->hsync);
-
- adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
- adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
- adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
- adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
- vbios_mode->enh_table->vfp;
- adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
- vbios_mode->enh_table->vfp +
- vbios_mode->enh_table->vsync);
-
- return true;
-}
-
static void ast_set_vbios_color_reg(struct ast_device *ast,
const struct drm_format_info *format,
- const struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u32 color_index;
@@ -256,7 +167,7 @@ static void ast_set_vbios_color_reg(struct ast_device *ast,
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0x00);
- if (vbios_mode->enh_table->flags & NewModeInfo) {
+ if (vmode->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x92, format->cpp[0] * 8);
}
@@ -264,19 +175,19 @@ static void ast_set_vbios_color_reg(struct ast_device *ast,
static void ast_set_vbios_mode_reg(struct ast_device *ast,
const struct drm_display_mode *adjusted_mode,
- const struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u32 refresh_rate_index, mode_id;
- refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
- mode_id = vbios_mode->enh_table->mode_id;
+ refresh_rate_index = vmode->refresh_rate_index;
+ mode_id = vmode->mode_id;
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8d, refresh_rate_index & 0xff);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8e, mode_id & 0xff);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0x00);
- if (vbios_mode->enh_table->flags & NewModeInfo) {
+ if (vmode->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x94, adjusted_mode->crtc_hdisplay);
@@ -288,14 +199,11 @@ static void ast_set_vbios_mode_reg(struct ast_device *ast,
static void ast_set_std_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_stdtable *stdtable)
{
- const struct ast_vbios_stdtable *stdtable;
u32 i;
u8 jreg;
- stdtable = vbios_mode->std_table;
-
jreg = stdtable->misc;
ast_io_write8(ast, AST_IO_VGAMR_W, jreg);
@@ -334,16 +242,15 @@ static void ast_set_std_reg(struct ast_device *ast,
ast_set_index_reg(ast, AST_IO_VGAGRI, i, stdtable->gr[i]);
}
-static void ast_set_crtc_reg(struct ast_device *ast,
- struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+static void ast_set_crtc_reg(struct ast_device *ast, struct drm_display_mode *mode,
+ const struct ast_vbios_enhtable *vmode)
{
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
- u16 temp, precache = 0;
+ u16 temp;
+ unsigned char crtc_hsync_precatch = 0;
- if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) &&
- (vbios_mode->enh_table->flags & AST2500PreCatchCRT))
- precache = 40;
+ if (ast->quirks->crtc_hsync_precatch_needed && (vmode->flags & AST2500PreCatchCRT))
+ crtc_hsync_precatch = 40;
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00);
@@ -369,12 +276,12 @@ static void ast_set_crtc_reg(struct ast_device *ast,
jregAD |= 0x01; /* HBE D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x03, 0xE0, (temp & 0x1f));
- temp = ((mode->crtc_hsync_start-precache) >> 3) - 1;
+ temp = ((mode->crtc_hsync_start - crtc_hsync_precatch) >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x40; /* HRS D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x04, 0x00, temp);
- temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f;
+ temp = (((mode->crtc_hsync_end - crtc_hsync_precatch) >> 3) - 1) & 0x3f;
if (temp & 0x20)
jregAD |= 0x04; /* HRE D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
@@ -382,8 +289,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAC, 0x00, jregAC);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAD, 0x00, jregAD);
- // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
- if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080))
+ if (ast->quirks->crtc_hsync_add4_needed && mode->crtc_vdisplay == 1080)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x02);
else
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x00);
@@ -441,7 +347,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x09, 0xdf, jreg09);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAE, 0x00, (jregAE | 0x80));
- if (precache)
+ if (crtc_hsync_precatch)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x80);
else
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x00);
@@ -461,14 +367,9 @@ static void ast_set_offset_reg(struct ast_device *ast,
static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
- const struct ast_vbios_dclk_info *clk_info;
-
- if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
- clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index];
- else
- clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+ const struct ast_vbios_dclk_info *clk_info = &ast->dclk_table[vmode->dclk_index];
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2);
@@ -508,33 +409,24 @@ static void ast_set_color_reg(struct ast_device *ast,
static void ast_set_crtthd_reg(struct ast_device *ast)
{
- /* Set Threshold */
- if (IS_AST_GEN7(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0xe0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0xa0);
- } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x78);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x60);
- } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x3f);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x2f);
- } else {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x2f);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x1f);
- }
+ u8 vgacra6 = ast->quirks->crtc_mem_req_threshold_low;
+ u8 vgacra7 = ast->quirks->crtc_mem_req_threshold_high;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, vgacra7);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, vgacra6);
}
static void ast_set_sync_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u8 jreg;
jreg = ast_io_read8(ast, AST_IO_VGAMR_R);
jreg &= ~0xC0;
- if (vbios_mode->enh_table->flags & NVSync)
+ if (vmode->flags & NVSync)
jreg |= 0x80;
- if (vbios_mode->enh_table->flags & NHSync)
+ if (vmode->flags & NHSync)
jreg |= 0x40;
ast_io_write8(ast, AST_IO_VGAMR_W, jreg);
}
@@ -565,17 +457,16 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
* Planes
*/
-static int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- const uint64_t *format_modifiers,
- enum drm_plane_type type)
+int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
+ u64 offset, unsigned long size,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type)
{
struct drm_plane *plane = &ast_plane->base;
- ast_plane->vaddr = vaddr;
ast_plane->offset = offset;
ast_plane->size = size;
@@ -584,6 +475,13 @@ static int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
type, NULL);
}
+void __iomem *ast_plane_vaddr(struct ast_plane *ast_plane)
+{
+ struct ast_device *ast = to_ast_device(ast_plane->base.dev);
+
+ return ast->vram + ast_plane->offset;
+}
+
/*
* Primary plane
*/
@@ -630,7 +528,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane_vaddr(ast_plane));
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
@@ -654,15 +552,19 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!old_fb || (fb->format != old_fb->format) || crtc_state->mode_changed) {
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
ast_set_color_reg(ast, fb->format);
- ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info);
+ ast_set_vbios_color_reg(ast, fb->format, ast_crtc_state->vmode);
}
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage);
+ /* if the buffer comes from another device */
+ if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE) == 0) {
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage);
+ }
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
/*
@@ -704,12 +606,12 @@ static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
{
struct ast_plane *ast_plane = to_ast_plane(plane);
- if (plane->state && plane->state->fb && ast_plane->vaddr) {
+ if (plane->state && plane->state->fb) {
sb->format = plane->state->fb->format;
sb->width = plane->state->fb->width;
sb->height = plane->state->fb->height;
sb->pitch[0] = plane->state->fb->pitches[0];
- iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
+ iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane));
return 0;
}
return -ENODEV;
@@ -736,13 +638,11 @@ static int ast_primary_plane_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
- void __iomem *vaddr = ast->vram;
- u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
- unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- unsigned long size = ast->vram_fb_available - cursor_size;
+ u64 offset = ast_fb_vram_offset();
+ unsigned long size = ast_fb_vram_size(ast);
int ret;
- ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_primary_plane, offset, size,
0x01, &ast_primary_plane_funcs,
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY);
@@ -757,263 +657,6 @@ static int ast_primary_plane_init(struct ast_device *ast)
}
/*
- * Cursor plane
- */
-
-static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
-{
- union {
- u32 ul;
- u8 b[4];
- } srcdata32[2], data32;
- union {
- u16 us;
- u8 b[2];
- } data16;
- u32 csum = 0;
- s32 alpha_dst_delta, last_alpha_dst_delta;
- u8 __iomem *dstxor;
- const u8 *srcxor;
- int i, j;
- u32 per_pixel_copy, two_pixel_copy;
-
- alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
- last_alpha_dst_delta = alpha_dst_delta - (width << 1);
-
- srcxor = src;
- dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
- per_pixel_copy = width & 1;
- two_pixel_copy = width >> 1;
-
- for (j = 0; j < height; j++) {
- for (i = 0; i < two_pixel_copy; i++) {
- srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
- srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
- data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
- data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
- data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
-
- writel(data32.ul, dstxor);
- csum += data32.ul;
-
- dstxor += 4;
- srcxor += 8;
-
- }
-
- for (i = 0; i < per_pixel_copy; i++) {
- srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
- data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
- data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- writew(data16.us, dstxor);
- csum += (u32)data16.us;
-
- dstxor += 2;
- srcxor += 4;
- }
- dstxor += last_alpha_dst_delta;
- }
-
- /* write checksum + signature */
- dst += AST_HWC_SIZE;
- writel(csum, dst);
- writel(width, dst + AST_HWC_SIGNATURE_SizeX);
- writel(height, dst + AST_HWC_SIGNATURE_SizeY);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
-}
-
-static void ast_set_cursor_base(struct ast_device *ast, u64 address)
-{
- u8 addr0 = (address >> 3) & 0xff;
- u8 addr1 = (address >> 11) & 0xff;
- u8 addr2 = (address >> 19) & 0xff;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc8, addr0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc9, addr1);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xca, addr2);
-}
-
-static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
- u8 x_offset, u8 y_offset)
-{
- u8 x0 = (x & 0x00ff);
- u8 x1 = (x & 0x0f00) >> 8;
- u8 y0 = (y & 0x00ff);
- u8 y1 = (y & 0x0700) >> 8;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc2, x_offset);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc3, y_offset);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc4, x0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc5, x1);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc6, y0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc7, y1);
-}
-
-static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
-{
- static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
- AST_IO_VGACRCB_HWC_ENABLED);
-
- u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
-
- if (enabled)
- vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
-
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xcb, mask, vgacrcb);
-}
-
-static const uint32_t ast_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
-static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc_state *new_crtc_state = NULL;
- int ret;
-
- if (new_plane_state->crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret || !new_plane_state->visible)
- return ret;
-
- if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
- return -EINVAL;
-
- return 0;
-}
-
-static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct ast_plane *ast_plane = to_ast_plane(plane);
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct ast_device *ast = to_ast_device(plane->dev);
- struct iosys_map src_map = shadow_plane_state->data[0];
- struct drm_rect damage;
- const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
- u64 dst_off = ast_plane->offset;
- u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
- u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
- unsigned int offset_x, offset_y;
- u16 x, y;
- u8 x_offset, y_offset;
-
- /*
- * Do data transfer to hardware buffer and point the scanout
- * engine to the offset.
- */
-
- if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
- ast_update_cursor_image(dst, src, fb->width, fb->height);
- ast_set_cursor_base(ast, dst_off);
- }
-
- /*
- * Update location in HWC signature and registers.
- */
-
- writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
- writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
-
- offset_x = AST_MAX_HWC_WIDTH - fb->width;
- offset_y = AST_MAX_HWC_HEIGHT - fb->height;
-
- if (plane_state->crtc_x < 0) {
- x_offset = (-plane_state->crtc_x) + offset_x;
- x = 0;
- } else {
- x_offset = offset_x;
- x = plane_state->crtc_x;
- }
- if (plane_state->crtc_y < 0) {
- y_offset = (-plane_state->crtc_y) + offset_y;
- y = 0;
- } else {
- y_offset = offset_y;
- y = plane_state->crtc_y;
- }
-
- ast_set_cursor_location(ast, x, y, x_offset, y_offset);
-
- /* Dummy write to enable HWC and make the HW pick-up the changes. */
- ast_set_cursor_enabled(ast, true);
-}
-
-static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct ast_device *ast = to_ast_device(plane->dev);
-
- ast_set_cursor_enabled(ast, false);
-}
-
-static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ast_cursor_plane_helper_atomic_check,
- .atomic_update = ast_cursor_plane_helper_atomic_update,
- .atomic_disable = ast_cursor_plane_helper_atomic_disable,
-};
-
-static const struct drm_plane_funcs ast_cursor_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
-};
-
-static int ast_cursor_plane_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
- struct drm_plane *cursor_plane = &ast_cursor_plane->base;
- size_t size;
- void __iomem *vaddr;
- u64 offset;
- int ret;
-
- /*
- * Allocate backing storage for cursors. The BOs are permanently
- * pinned to the top end of the VRAM.
- */
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
-
- if (ast->vram_fb_available < size)
- return -ENOMEM;
-
- vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_fb_available - size;
-
- ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
- 0x01, &ast_cursor_plane_funcs,
- ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
- NULL, DRM_PLANE_TYPE_CURSOR);
- if (ret) {
- drm_err(dev, "ast_plane_init() failed: %d\n", ret);
- return ret;
- }
- drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
- drm_plane_enable_fb_damage_clips(cursor_plane);
-
- ast->vram_fb_available -= size;
-
- return 0;
-}
-
-/*
* CRTC
*/
@@ -1021,72 +664,13 @@ static enum drm_mode_status
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
struct ast_device *ast = to_ast_device(crtc->dev);
- enum drm_mode_status status;
- uint32_t jtemp;
-
- if (ast->support_wide_screen) {
- if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
- return MODE_OK;
- if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
- return MODE_OK;
- if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
- return MODE_OK;
- if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
- return MODE_OK;
- if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
- return MODE_OK;
- if ((mode->hdisplay == 1152) && (mode->vdisplay == 864))
- return MODE_OK;
-
- if ((ast->chip == AST2100) || // GEN2, but not AST1100 (?)
- (ast->chip == AST2200) || // GEN3, but not AST2150 (?)
- IS_AST_GEN4(ast) || IS_AST_GEN5(ast) ||
- IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) {
- if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
- return MODE_OK;
-
- if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
- jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
- if (jtemp & 0x01)
- return MODE_NOMODE;
- else
- return MODE_OK;
- }
- }
- }
+ const struct ast_vbios_enhtable *vmode;
- status = MODE_NOMODE;
+ vmode = ast_vbios_find_mode(ast, mode);
+ if (!vmode)
+ return MODE_NOMODE;
- switch (mode->hdisplay) {
- case 640:
- if (mode->vdisplay == 480)
- status = MODE_OK;
- break;
- case 800:
- if (mode->vdisplay == 600)
- status = MODE_OK;
- break;
- case 1024:
- if (mode->vdisplay == 768)
- status = MODE_OK;
- break;
- case 1152:
- if (mode->vdisplay == 864)
- status = MODE_OK;
- break;
- case 1280:
- if (mode->vdisplay == 1024)
- status = MODE_OK;
- break;
- case 1600:
- if (mode->vdisplay == 1200)
- status = MODE_OK;
- break;
- default:
- break;
- }
-
- return status;
+ return MODE_OK;
}
static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
@@ -1095,8 +679,8 @@ static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
struct ast_device *ast = to_ast_device(dev);
struct drm_crtc_state *crtc_state = crtc->state;
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info =
- &ast_crtc_state->vbios_mode_info;
+ const struct ast_vbios_stdtable *std_table = ast_crtc_state->std_table;
+ const struct ast_vbios_enhtable *vmode = ast_crtc_state->vmode;
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
/*
@@ -1107,25 +691,29 @@ static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
*/
ast_wait_for_vretrace(ast);
- ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_vbios_mode_reg(ast, adjusted_mode, vmode);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
- ast_set_std_reg(ast, adjusted_mode, vbios_mode_info);
- ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info);
- ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_std_reg(ast, adjusted_mode, std_table);
+ ast_set_crtc_reg(ast, adjusted_mode, vmode);
+ ast_set_dclk_reg(ast, adjusted_mode, vmode);
ast_set_crtthd_reg(ast);
- ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_sync_reg(ast, adjusted_mode, vmode);
}
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
struct drm_device *dev = crtc->dev;
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
- bool succ;
+ const struct ast_vbios_enhtable *vmode;
+ unsigned int hborder = 0;
+ unsigned int vborder = 0;
int ret;
if (!crtc_state->enable)
@@ -1157,11 +745,56 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
}
}
- succ = ast_get_vbios_mode_info(format, &crtc_state->mode,
- &crtc_state->adjusted_mode,
- &ast_state->vbios_mode_info);
- if (!succ)
+ /*
+ * Set register tables.
+ *
+ * TODO: These tables mix all kinds of fields and should
+ * probably be resolved into various helper functions.
+ */
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ ast_state->std_table = &vbios_stdtable[VGAModeIndex];
+ break;
+ case DRM_FORMAT_RGB565:
+ ast_state->std_table = &vbios_stdtable[HiCModeIndex];
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ ast_state->std_table = &vbios_stdtable[TrueCModeIndex];
+ break;
+ default:
return -EINVAL;
+ }
+
+ /*
+ * Find the VBIOS mode and adjust the DRM display mode accordingly
+ * if a full modeset is required. Otherwise keep the existing values.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ vmode = ast_vbios_find_mode(ast, &crtc_state->mode);
+ if (!vmode)
+ return -EINVAL;
+ ast_state->vmode = vmode;
+
+ if (vmode->flags & HBorder)
+ hborder = 8;
+ if (vmode->flags & VBorder)
+ vborder = 8;
+
+ adjusted_mode->crtc_hdisplay = vmode->hde;
+ adjusted_mode->crtc_hblank_start = vmode->hde + hborder;
+ adjusted_mode->crtc_hblank_end = vmode->ht - hborder;
+ adjusted_mode->crtc_hsync_start = vmode->hde + hborder + vmode->hfp;
+ adjusted_mode->crtc_hsync_end = vmode->hde + hborder + vmode->hfp + vmode->hsync;
+ adjusted_mode->crtc_htotal = vmode->ht;
+
+ adjusted_mode->crtc_vdisplay = vmode->vde;
+ adjusted_mode->crtc_vblank_start = vmode->vde + vborder;
+ adjusted_mode->crtc_vblank_end = vmode->vt - vborder;
+ adjusted_mode->crtc_vsync_start = vmode->vde + vborder + vmode->vfp;
+ adjusted_mode->crtc_vsync_end = vmode->vde + vborder + vmode->vfp + vmode->vsync;
+ adjusted_mode->crtc_vtotal = vmode->vt;
+ }
return 0;
}
@@ -1182,33 +815,35 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (crtc_state->gamma_lut)
- ast_crtc_set_gamma(ast,
- ast_crtc_state->format,
- crtc_state->gamma_lut->data);
+ ast_crtc_load_gamma(ast,
+ ast_crtc_state->format,
+ crtc_state->gamma_lut->data);
else
- ast_crtc_set_gamma_linear(ast, ast_crtc_state->format);
+ ast_crtc_fill_gamma(ast, ast_crtc_state->format);
}
}
static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct ast_device *ast = to_ast_device(crtc->dev);
+ u8 vgacr17 = 0x00;
+ u8 vgacrb6 = 0xff;
+
+ vgacr17 |= AST_IO_VGACR17_SYNC_ENABLE;
+ vgacrb6 &= ~(AST_IO_VGACRB6_VSYNC_OFF | AST_IO_VGACRB6_HSYNC_OFF);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, 0x00);
- ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, 0x00);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6);
}
static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct ast_device *ast = to_ast_device(crtc->dev);
- u8 vgacrb6;
-
- ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, AST_IO_VGASR1_SD);
+ u8 vgacr17 = 0xff;
- vgacrb6 = AST_IO_VGACRB6_VSYNC_OFF |
- AST_IO_VGACRB6_HSYNC_OFF;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6);
+ vgacr17 &= ~AST_IO_VGACR17_SYNC_ENABLE;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17);
/*
* HW cursors require the underlying primary plane and CRTC to
@@ -1263,8 +898,8 @@ ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
ast_state = to_ast_crtc_state(crtc->state);
new_ast_state->format = ast_state->format;
- memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info,
- sizeof(new_ast_state->vbios_mode_info));
+ new_ast_state->std_table = ast_state->std_table;
+ new_ast_state->vmode = ast_state->vmode;
return &new_ast_state->base;
}
@@ -1294,7 +929,7 @@ static int ast_crtc_init(struct ast_device *ast)
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane.base,
- &ast->cursor_plane.base, &ast_crtc_funcs,
+ &ast->cursor_plane.base.base, &ast_crtc_funcs,
NULL);
if (ret)
return ret;
@@ -1317,9 +952,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
/*
* Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock. Released in atomic_flush().
+ * drm_connector_helper_funcs.get_modes by reading the display
+ * modes. Protect access to registers by acquiring the modeset
+ * lock.
*/
mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail(state);
@@ -1333,16 +968,20 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB8888);
struct ast_device *ast = to_ast_device(dev);
- unsigned long fbsize, fbpages, max_fbpages;
-
- max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
+ unsigned long max_fb_size = ast_fb_vram_size(ast);
+ u64 pitch;
+
+ if (drm_WARN_ON_ONCE(dev, !info))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(info, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > AST_PRIMARY_PLANE_MAX_OFFSET)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > max_fb_size / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -1373,12 +1012,7 @@ int ast_mode_config_init(struct ast_device *ast)
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
- if (ast->chip == AST2100 || // GEN2, but not AST1100 (?)
- ast->chip == AST2200 || // GEN3, but not AST2150 (?)
- IS_AST_GEN7(ast) ||
- IS_AST_GEN6(ast) ||
- IS_AST_GEN5(ast) ||
- IS_AST_GEN4(ast)) {
+ if (ast->support_fullhd) {
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 2048;
} else {
@@ -1418,10 +1052,7 @@ int ast_mode_config_init(struct ast_device *ast)
return ret;
drm_mode_config_reset(dev);
-
- ret = drmm_kms_helper_poll_init(dev);
- if (ret)
- return ret;
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 364030f97571..b72914dbed38 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -31,51 +31,10 @@
#include <drm/drm_print.h>
-#include "ast_dram_tables.h"
#include "ast_drv.h"
+#include "ast_post.h"
-static void ast_post_chip_2300(struct ast_device *ast);
-static void ast_post_chip_2500(struct ast_device *ast);
-
-static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
-static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
-
-static void ast_set_def_ext_reg(struct ast_device *ast)
-{
- u8 i, index, reg;
- const u8 *ext_reg_info;
-
- /* reset scratch */
- for (i = 0x81; i <= 0x9f; i++)
- ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
-
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
- ext_reg_info = extreginfo_ast2300;
- else
- ext_reg_info = extreginfo;
-
- index = 0xa0;
- while (*ext_reg_info != 0xff) {
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
- index++;
- ext_reg_info++;
- }
-
- /* disable standard IO/MEM decode if secondary */
- /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
-
- /* Set Ext. Default */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
-
- /* Enable RAMDAC for A1 */
- reg = 0x04;
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
- reg |= 0x20;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
-}
-
-static u32 __ast_mindwm(void __iomem *regs, u32 r)
+u32 __ast_mindwm(void __iomem *regs, u32 r)
{
u32 data;
@@ -89,7 +48,7 @@ static u32 __ast_mindwm(void __iomem *regs, u32 r)
return __ast_read32(regs, 0x10000 + (r & 0x0000ffff));
}
-static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
+void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
{
u32 data;
@@ -113,309 +72,38 @@ void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
__ast_moutdwm(ast->regs, r, v);
}
-/*
- * AST2100/2150 DLL CBR Setting
- */
-#define CBR_SIZE_AST2150 ((16 << 10) - 1)
-#define CBR_PASSNUM_AST2150 5
-#define CBR_THRESHOLD_AST2150 10
-#define CBR_THRESHOLD2_AST2150 10
-#define TIMEOUT_AST2150 5000000
-
-#define CBR_PATNUM_AST2150 8
-
-static const u32 pattern_AST2150[14] = {
- 0xFF00FF00,
- 0xCC33CC33,
- 0xAA55AA55,
- 0xFFFE0001,
- 0x683501FE,
- 0x0F1929B0,
- 0x2D0B4346,
- 0x60767F02,
- 0x6FBE36A6,
- 0x3A253035,
- 0x3019686D,
- 0x41C6167E,
- 0x620152BF,
- 0x20F050E0
-};
-
-static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
+int ast_post_gpu(struct ast_device *ast)
{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-
-#if 0 /* unused in DDX driver - here for completeness */
-static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-#endif
-
-static int cbrtest_ast2150(struct ast_device *ast)
-{
- int i;
-
- for (i = 0; i < 8; i++)
- if (mmctestburst2_ast2150(ast, i))
- return 0;
- return 1;
-}
+ int ret;
-static int cbrscan_ast2150(struct ast_device *ast, int busw)
-{
- u32 patcnt, loop;
-
- for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
- if (cbrtest_ast2150(ast))
- break;
- }
- if (loop == CBR_PASSNUM_AST2150)
- return 0;
- }
- return 1;
-}
-
-
-static void cbrdlli_ast2150(struct ast_device *ast, int busw)
-{
- u32 dll_min[4], dll_max[4], dlli, data, passcnt;
-
-cbr_start:
- dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
- dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
- passcnt = 0;
-
- for (dlli = 0; dlli < 100; dlli++) {
- ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
- data = cbrscan_ast2150(ast, busw);
- if (data != 0) {
- if (data & 0x1) {
- if (dll_min[0] > dlli)
- dll_min[0] = dlli;
- if (dll_max[0] < dlli)
- dll_max[0] = dlli;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD_AST2150)
- goto cbr_start;
- }
- if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
- goto cbr_start;
-
- dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
- ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
-}
-
-
-
-static void ast_init_dram_reg(struct ast_device *ast)
-{
- u8 j;
- u32 data, temp, i;
- const struct ast_dramstruct *dram_reg_info;
-
- j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
-
- if ((j & 0x80) == 0) { /* VGA only */
- if (IS_AST_GEN1(ast)) {
- dram_reg_info = ast2000_dram_table_data;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x10100, 0xa8);
-
- do {
- ;
- } while (ast_read32(ast, 0x10100) != 0xa8);
- } else { /* GEN2/GEN3 */
- if (ast->chip == AST2100 || ast->chip == AST2200)
- dram_reg_info = ast2100_dram_table_data;
- else
- dram_reg_info = ast1100_dram_table_data;
-
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x12000, 0x1688A8A8);
- do {
- ;
- } while (ast_read32(ast, 0x12000) != 0x01);
-
- ast_write32(ast, 0x10000, 0xfc600309);
- do {
- ;
- } while (ast_read32(ast, 0x10000) != 0x01);
- }
-
- while (dram_reg_info->index != 0xffff) {
- if (dram_reg_info->index == 0xff00) {/* delay fn */
- for (i = 0; i < 15; i++)
- udelay(dram_reg_info->data);
- } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) {
- data = dram_reg_info->data;
- if (ast->dram_type == AST_DRAM_1Gx16)
- data = 0x00000d89;
- else if (ast->dram_type == AST_DRAM_1Gx32)
- data = 0x00000c8d;
-
- temp = ast_read32(ast, 0x12070);
- temp &= 0xc;
- temp <<= 2;
- ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
- } else
- ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
- dram_reg_info++;
- }
-
- /* AST 2100/2150 DRAM calibration */
- data = ast_read32(ast, 0x10120);
- if (data == 0x5061) { /* 266Mhz */
- data = ast_read32(ast, 0x10004);
- if (data & 0x40)
- cbrdlli_ast2150(ast, 16); /* 16 bits */
- else
- cbrdlli_ast2150(ast, 32); /* 32 bits */
- }
-
- switch (AST_GEN(ast)) {
- case 1:
- temp = ast_read32(ast, 0x10140);
- ast_write32(ast, 0x10140, temp | 0x40);
- break;
- case 2:
- case 3:
- temp = ast_read32(ast, 0x1200c);
- ast_write32(ast, 0x1200c, temp & 0xfffffffd);
- temp = ast_read32(ast, 0x12040);
- ast_write32(ast, 0x12040, temp | 0x40);
- break;
- default:
- break;
- }
+ if (AST_GEN(ast) >= 7) {
+ ret = ast_2600_post(ast);
+ if (ret)
+ return ret;
+ } else if (AST_GEN(ast) >= 6) {
+ ret = ast_2500_post(ast);
+ if (ret)
+ return ret;
+ } else if (AST_GEN(ast) >= 4) {
+ ret = ast_2300_post(ast);
+ if (ret)
+ return ret;
+ } else if (AST_GEN(ast) >= 2) {
+ ret = ast_2100_post(ast);
+ if (ret)
+ return ret;
+ } else {
+ ret = ast_2000_post(ast);
+ if (ret)
+ return ret;
}
- /* wait ready */
- do {
- j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((j & 0x40) == 0);
-}
-
-void ast_post_gpu(struct ast_device *ast)
-{
- ast_set_def_ext_reg(ast);
-
- if (IS_AST_GEN7(ast)) {
- if (ast->tx_chip == AST_TX_ASTDP)
- ast_dp_launch(ast);
- } else if (ast->config_mode == ast_use_p2a) {
- if (IS_AST_GEN6(ast))
- ast_post_chip_2500(ast);
- else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast))
- ast_post_chip_2300(ast);
- else
- ast_init_dram_reg(ast);
-
- ast_init_3rdtx(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164)
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); /* Enable DVO */
- }
+ return 0;
}
-/* AST 2300 DRAM settings */
-#define AST_DDR3 0
-#define AST_DDR2 1
-
-struct ast2300_dram_param {
- u32 dram_type;
- u32 dram_chipid;
- u32 dram_freq;
- u32 vram_size;
- u32 odt;
- u32 wodt;
- u32 rodt;
- u32 dram_config;
- u32 reg_PERIOD;
- u32 reg_MADJ;
- u32 reg_SADJ;
- u32 reg_MRS;
- u32 reg_EMRS;
- u32 reg_AC1;
- u32 reg_AC2;
- u32 reg_DQSIC;
- u32 reg_DRV;
- u32 reg_IOZ;
- u32 reg_DQIDLY;
- u32 reg_FREQ;
- u32 madj_max;
- u32 dll2_finetune_step;
-};
-
-/*
- * DQSI DLL CBR Setting
- */
-#define CBR_SIZE0 ((1 << 10) - 1)
-#define CBR_SIZE1 ((4 << 10) - 1)
-#define CBR_SIZE2 ((64 << 10) - 1)
-#define CBR_PASSNUM 5
-#define CBR_PASSNUM2 5
-#define CBR_THRESHOLD 10
-#define CBR_THRESHOLD2 10
#define TIMEOUT 5000000
-#define CBR_PATNUM 8
-
-static const u32 pattern[8] = {
- 0xFF00FF00,
- 0xCC33CC33,
- 0xAA55AA55,
- 0x88778877,
- 0x92CC4D6E,
- 0x543D3CDE,
- 0xF1E843C7,
- 0x7C61D253
-};
-static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
+bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -435,1657 +123,7 @@ static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
return true;
}
-static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
- if (++timeout > TIMEOUT) {
- ast_moutdwm(ast, 0x1e6e0070, 0x0);
- return 0xffffffff;
- }
- } while (!data);
- data = ast_mindwm(ast, 0x1e6e0078);
- data = (data | (data >> 16)) & 0xffff;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-
-
-static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
+bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
-
-static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
-{
- return mmc_test2(ast, datagen, 0x41);
-}
-
-static bool mmc_test_single(struct ast_device *ast, u32 datagen)
-{
- return mmc_test(ast, datagen, 0xc5);
-}
-
-static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
-{
- return mmc_test2(ast, datagen, 0x05);
-}
-
-static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
-{
- return mmc_test(ast, datagen, 0x85);
-}
-
-static int cbr_test(struct ast_device *ast)
-{
- u32 data;
- int i;
- data = mmc_test_single2(ast, 0);
- if ((data & 0xff) && (data & 0xff00))
- return 0;
- for (i = 0; i < 8; i++) {
- data = mmc_test_burst2(ast, i);
- if ((data & 0xff) && (data & 0xff00))
- return 0;
- }
- if (!data)
- return 3;
- else if (data & 0xff)
- return 2;
- return 1;
-}
-
-static int cbr_scan(struct ast_device *ast)
-{
- u32 data, data2, patcnt, loop;
-
- data2 = 3;
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM2; loop++) {
- if ((data = cbr_test(ast)) != 0) {
- data2 &= data;
- if (!data2)
- return 0;
- break;
- }
- }
- if (loop == CBR_PASSNUM2)
- return 0;
- }
- return data2;
-}
-
-static u32 cbr_test2(struct ast_device *ast)
-{
- u32 data;
-
- data = mmc_test_burst2(ast, 0);
- if (data == 0xffff)
- return 0;
- data |= mmc_test_single2(ast, 0);
- if (data == 0xffff)
- return 0;
-
- return ~data & 0xffff;
-}
-
-static u32 cbr_scan2(struct ast_device *ast)
-{
- u32 data, data2, patcnt, loop;
-
- data2 = 0xffff;
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM2; loop++) {
- if ((data = cbr_test2(ast)) != 0) {
- data2 &= data;
- if (!data2)
- return 0;
- break;
- }
- }
- if (loop == CBR_PASSNUM2)
- return 0;
- }
- return data2;
-}
-
-static bool cbr_test3(struct ast_device *ast)
-{
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_single(ast, 0))
- return false;
- return true;
-}
-
-static bool cbr_scan3(struct ast_device *ast)
-{
- u32 patcnt, loop;
-
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < 2; loop++) {
- if (cbr_test3(ast))
- break;
- }
- if (loop == 2)
- return false;
- }
- return true;
-}
-
-static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
- bool status = false;
-FINETUNE_START:
- for (cnt = 0; cnt < 16; cnt++) {
- dllmin[cnt] = 0xff;
- dllmax[cnt] = 0x0;
- }
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
- data = cbr_scan2(ast);
- if (data != 0) {
- mask = 0x00010001;
- for (cnt = 0; cnt < 16; cnt++) {
- if (data & mask) {
- if (dllmin[cnt] > dlli) {
- dllmin[cnt] = dlli;
- }
- if (dllmax[cnt] < dlli) {
- dllmax[cnt] = dlli;
- }
- }
- mask <<= 1;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD2) {
- break;
- }
- }
- gold_sadj[0] = 0x0;
- passcnt = 0;
- for (cnt = 0; cnt < 16; cnt++) {
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- gold_sadj[0] += dllmin[cnt];
- passcnt++;
- }
- }
- if (retry++ > 10)
- goto FINETUNE_DONE;
- if (passcnt != 16) {
- goto FINETUNE_START;
- }
- status = true;
-FINETUNE_DONE:
- gold_sadj[0] = gold_sadj[0] >> 4;
- gold_sadj[1] = gold_sadj[0];
-
- data = 0;
- for (cnt = 0; cnt < 8; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = dllmin[cnt];
- if (gold_sadj[0] >= dlli) {
- dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
- if (dlli > 3) {
- dlli = 3;
- }
- } else {
- dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- ast_moutdwm(ast, 0x1E6E0080, data);
-
- data = 0;
- for (cnt = 8; cnt < 16; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = dllmin[cnt];
- if (gold_sadj[1] >= dlli) {
- dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
- if (dlli > 3) {
- dlli = 3;
- } else {
- dlli = (dlli - 1) & 0x7;
- }
- } else {
- dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
- dlli += 1;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- ast_moutdwm(ast, 0x1E6E0084, data);
- return status;
-} /* finetuneDQI_L */
-
-static void finetuneDQSI(struct ast_device *ast)
-{
- u32 dlli, dqsip, dqidly;
- u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
- u32 g_dqidly, g_dqsip, g_margin, g_side;
- u16 pass[32][2][2];
- char tag[2][76];
-
- /* Disable DQI CBR */
- reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
- reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
- reg_mcr18 &= 0x0000ffff;
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-
- for (dlli = 0; dlli < 76; dlli++) {
- tag[0][dlli] = 0x0;
- tag[1][dlli] = 0x0;
- }
- for (dqidly = 0; dqidly < 32; dqidly++) {
- pass[dqidly][0][0] = 0xff;
- pass[dqidly][0][1] = 0x0;
- pass[dqidly][1][0] = 0xff;
- pass[dqidly][1][1] = 0x0;
- }
- for (dqidly = 0; dqidly < 32; dqidly++) {
- passcnt[0] = passcnt[1] = 0;
- for (dqsip = 0; dqsip < 2; dqsip++) {
- ast_moutdwm(ast, 0x1E6E000C, 0);
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
- ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0070, 0);
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
- if (cbr_scan3(ast)) {
- if (dlli == 0)
- break;
- passcnt[dqsip]++;
- tag[dqsip][dlli] = 'P';
- if (dlli < pass[dqidly][dqsip][0])
- pass[dqidly][dqsip][0] = (u16) dlli;
- if (dlli > pass[dqidly][dqsip][1])
- pass[dqidly][dqsip][1] = (u16) dlli;
- } else if (passcnt[dqsip] >= 5)
- break;
- else {
- pass[dqidly][dqsip][0] = 0xff;
- pass[dqidly][dqsip][1] = 0x0;
- }
- }
- }
- if (passcnt[0] == 0 && passcnt[1] == 0)
- dqidly++;
- }
- /* Search margin */
- g_dqidly = g_dqsip = g_margin = g_side = 0;
-
- for (dqidly = 0; dqidly < 32; dqidly++) {
- for (dqsip = 0; dqsip < 2; dqsip++) {
- if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
- continue;
- diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
- if ((diff+2) < g_margin)
- continue;
- passcnt[0] = passcnt[1] = 0;
- for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++);
- for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++);
- if (passcnt[0] > passcnt[1])
- passcnt[0] = passcnt[1];
- passcnt[1] = 0;
- if (passcnt[0] > g_side)
- passcnt[1] = passcnt[0] - g_side;
- if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
- g_margin = diff;
- g_dqidly = dqidly;
- g_dqsip = dqsip;
- g_side = passcnt[0];
- } else if (passcnt[1] > 1 && g_side < 8) {
- if (diff > g_margin)
- g_margin = diff;
- g_dqidly = dqidly;
- g_dqsip = dqsip;
- g_side = passcnt[0];
- }
- }
- }
- reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-
-}
-static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
- bool status = false;
-
- finetuneDQSI(ast);
- if (finetuneDQI_L(ast, param) == false)
- return status;
-
-CBR_START2:
- dllmin[0] = dllmin[1] = 0xff;
- dllmax[0] = dllmax[1] = 0x0;
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
- data = cbr_scan(ast);
- if (data != 0) {
- if (data & 0x1) {
- if (dllmin[0] > dlli) {
- dllmin[0] = dlli;
- }
- if (dllmax[0] < dlli) {
- dllmax[0] = dlli;
- }
- }
- if (data & 0x2) {
- if (dllmin[1] > dlli) {
- dllmin[1] = dlli;
- }
- if (dllmax[1] < dlli) {
- dllmax[1] = dlli;
- }
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD) {
- break;
- }
- }
- if (retry++ > 10)
- goto CBR_DONE2;
- if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
- goto CBR_START2;
- }
- if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
- goto CBR_START2;
- }
- status = true;
-CBR_DONE2:
- dlli = (dllmin[1] + dllmax[1]) >> 1;
- dlli <<= 8;
- dlli += (dllmin[0] + dllmax[0]) >> 1;
- ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
- return status;
-} /* CBRDLL2 */
-
-static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 trap, trap_AC2, trap_MRS;
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
-
- /* Ger trap info */
- trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
- trap_AC2 = 0x00020000 + (trap << 16);
- trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
- trap_MRS = 0x00000010 + (trap << 4);
- trap_MRS |= ((trap & 0x2) << 18);
-
- param->reg_MADJ = 0x00034C4C;
- param->reg_SADJ = 0x00001800;
- param->reg_DRV = 0x000000F0;
- param->reg_PERIOD = param->dram_freq;
- param->rodt = 0;
-
- switch (param->dram_freq) {
- case 336:
- ast_moutdwm(ast, 0x1E6E2020, 0x0190);
- param->wodt = 0;
- param->reg_AC1 = 0x22202725;
- param->reg_AC2 = 0xAA007613 | trap_AC2;
- param->reg_DQSIC = 0x000000BA;
- param->reg_MRS = 0x04001400 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000074;
- param->reg_FREQ = 0x00004DC0;
- param->madj_max = 96;
- param->dll2_finetune_step = 3;
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xAA007613 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xAA00761C | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xAA007636 | trap_AC2;
- break;
- }
- break;
- default:
- case 396:
- ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
- param->wodt = 1;
- param->reg_AC1 = 0x33302825;
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x04001600 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000034;
- param->reg_DRV = 0x000000FA;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x00005040;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC009622 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00963F | trap_AC2;
- break;
- }
- break;
-
- case 408:
- ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
- param->wodt = 1;
- param->reg_AC1 = 0x33302825;
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x04001600 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000023;
- param->reg_DRV = 0x000000FA;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC009622 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00963F | trap_AC2;
- break;
- }
-
- break;
- case 456:
- ast_moutdwm(ast, 0x1E6E2020, 0x0230);
- param->wodt = 0;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xCD44961A;
- param->reg_DQSIC = 0x000000FC;
- param->reg_MRS = 0x00081830;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x00000097;
- param->reg_FREQ = 0x000052C0;
- param->madj_max = 88;
- param->dll2_finetune_step = 4;
- break;
- case 504:
- ast_moutdwm(ast, 0x1E6E2020, 0x0270);
- param->wodt = 1;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xDE44A61D;
- param->reg_DQSIC = 0x00000117;
- param->reg_MRS = 0x00081A30;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x070000BB;
- param->reg_DQIDLY = 0x000000A0;
- param->reg_FREQ = 0x000054C0;
- param->madj_max = 79;
- param->dll2_finetune_step = 4;
- break;
- case 528:
- ast_moutdwm(ast, 0x1E6E2020, 0x0290);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xEF44B61E;
- param->reg_DQSIC = 0x00000125;
- param->reg_MRS = 0x00081A30;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000088;
- param->reg_FREQ = 0x000055C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 576:
- ast_moutdwm(ast, 0x1E6E2020, 0x0140);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302A37;
- param->reg_AC2 = 0xEF56B61E;
- param->reg_DQSIC = 0x0000013F;
- param->reg_MRS = 0x00101A50;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000057C0;
- param->madj_max = 136;
- param->dll2_finetune_step = 3;
- break;
- case 600:
- ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x32302A37;
- param->reg_AC2 = 0xDF56B61F;
- param->reg_DQSIC = 0x0000014D;
- param->reg_MRS = 0x00101A50;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000058C0;
- param->madj_max = 132;
- param->dll2_finetune_step = 3;
- break;
- case 624:
- ast_moutdwm(ast, 0x1E6E2020, 0x0160);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x32302A37;
- param->reg_AC2 = 0xEF56B621;
- param->reg_DQSIC = 0x0000015A;
- param->reg_MRS = 0x02101A50;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000059C0;
- param->madj_max = 128;
- param->dll2_finetune_step = 3;
- break;
- } /* switch freq */
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->dram_config = 0x130;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->dram_config = 0x131;
- break;
- case AST_DRAM_2Gx16:
- param->dram_config = 0x132;
- break;
- case AST_DRAM_4Gx16:
- param->dram_config = 0x133;
- break;
- } /* switch size */
-
- switch (param->vram_size) {
- default:
- case AST_VIDMEM_SIZE_8M:
- param->dram_config |= 0x00;
- break;
- case AST_VIDMEM_SIZE_16M:
- param->dram_config |= 0x04;
- break;
- case AST_VIDMEM_SIZE_32M:
- param->dram_config |= 0x08;
- break;
- case AST_VIDMEM_SIZE_64M:
- param->dram_config |= 0x0c;
- break;
- }
-
-}
-
-static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 data, data2, retry = 0;
-
-ddr3_init_start:
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
- ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
- udelay(10);
-
- ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
- ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
- ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
- ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
- ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
- ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
- ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0054, 0);
- ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- /* Wait MCLK2X lock to MCLK */
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
- if ((data2 & 0xff) > param->madj_max) {
- break;
- }
- ast_moutdwm(ast, 0x1E6E0064, data2);
- if (data2 & 0x00100000) {
- data2 = ((data2 & 0xff) >> 3) + 3;
- } else {
- data2 = ((data2 & 0xff) >> 2) + 5;
- }
- data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
- data2 += data & 0xff;
- data = data | (data2 << 8);
- ast_moutdwm(ast, 0x1E6E0068, data);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
- udelay(10);
- data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- ast_moutdwm(ast, 0x1E6E0018, data);
- data = data | 0x200;
- ast_moutdwm(ast, 0x1E6E0018, data);
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
-
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- }
- ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
- data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
- ast_moutdwm(ast, 0x1E6E0018, data);
-
- ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
- ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
- udelay(50);
- /* Mode Register Setting */
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- data = 0;
- if (param->wodt) {
- data = 0x300;
- }
- if (param->rodt) {
- data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
- }
- ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
-
- /* Calibrate the DQSI delay */
- if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
- goto ddr3_init_start;
-
- ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
- /* ECC Memory Initialization */
-#ifdef ECC
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0070, 0x221);
- do {
- data = ast_mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
-#endif
-
-
-}
-
-static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 trap, trap_AC2, trap_MRS;
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
-
- /* Ger trap info */
- trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
- trap_AC2 = (trap << 20) | (trap << 16);
- trap_AC2 += 0x00110000;
- trap_MRS = 0x00000040 | (trap << 4);
-
-
- param->reg_MADJ = 0x00034C4C;
- param->reg_SADJ = 0x00001800;
- param->reg_DRV = 0x000000F0;
- param->reg_PERIOD = param->dram_freq;
- param->rodt = 0;
-
- switch (param->dram_freq) {
- case 264:
- ast_moutdwm(ast, 0x1E6E2020, 0x0130);
- param->wodt = 0;
- param->reg_AC1 = 0x11101513;
- param->reg_AC2 = 0x78117011;
- param->reg_DQSIC = 0x00000092;
- param->reg_MRS = 0x00000842;
- param->reg_EMRS = 0x00000000;
- param->reg_DRV = 0x000000F0;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x0000005A;
- param->reg_FREQ = 0x00004AC0;
- param->madj_max = 138;
- param->dll2_finetune_step = 3;
- break;
- case 336:
- ast_moutdwm(ast, 0x1E6E2020, 0x0190);
- param->wodt = 1;
- param->reg_AC1 = 0x22202613;
- param->reg_AC2 = 0xAA009016 | trap_AC2;
- param->reg_DQSIC = 0x000000BA;
- param->reg_MRS = 0x00000A02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000074;
- param->reg_FREQ = 0x00004DC0;
- param->madj_max = 96;
- param->dll2_finetune_step = 3;
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xAA009012 | trap_AC2;
- break;
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xAA009016 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xAA009023 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xAA00903B | trap_AC2;
- break;
- }
- break;
- default:
- case 396:
- ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
- param->wodt = 1;
- param->rodt = 0;
- param->reg_AC1 = 0x33302714;
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x00000C02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x00005040;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xCC00B016 | trap_AC2;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC00B02B | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00B03F | trap_AC2;
- break;
- }
-
- break;
-
- case 408:
- ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
- param->wodt = 1;
- param->rodt = 0;
- param->reg_AC1 = 0x33302714;
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x00000C02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xCC00B016 | trap_AC2;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC00B02B | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00B03F | trap_AC2;
- break;
- }
-
- break;
- case 456:
- ast_moutdwm(ast, 0x1E6E2020, 0x0230);
- param->wodt = 0;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xCD44B01E;
- param->reg_DQSIC = 0x000000FC;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000000;
- param->reg_DRV = 0x00000000;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000097;
- param->reg_FREQ = 0x000052C0;
- param->madj_max = 88;
- param->dll2_finetune_step = 3;
- break;
- case 504:
- ast_moutdwm(ast, 0x1E6E2020, 0x0261);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xDE44C022;
- param->reg_DQSIC = 0x00000117;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x0000000A;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000A0;
- param->reg_FREQ = 0x000054C0;
- param->madj_max = 79;
- param->dll2_finetune_step = 3;
- break;
- case 528:
- ast_moutdwm(ast, 0x1E6E2020, 0x0120);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xEF44D024;
- param->reg_DQSIC = 0x00000125;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F9;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000A7;
- param->reg_FREQ = 0x000055C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 552:
- ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x43402915;
- param->reg_AC2 = 0xFF44E025;
- param->reg_DQSIC = 0x00000132;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x0000000A;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000AD;
- param->reg_FREQ = 0x000056C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 576:
- ast_moutdwm(ast, 0x1E6E2020, 0x0140);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x43402915;
- param->reg_AC2 = 0xFF44E027;
- param->reg_DQSIC = 0x0000013F;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000B3;
- param->reg_FREQ = 0x000057C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- }
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->dram_config = 0x100;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->dram_config = 0x121;
- break;
- case AST_DRAM_2Gx16:
- param->dram_config = 0x122;
- break;
- case AST_DRAM_4Gx16:
- param->dram_config = 0x123;
- break;
- } /* switch size */
-
- switch (param->vram_size) {
- default:
- case AST_VIDMEM_SIZE_8M:
- param->dram_config |= 0x00;
- break;
- case AST_VIDMEM_SIZE_16M:
- param->dram_config |= 0x04;
- break;
- case AST_VIDMEM_SIZE_32M:
- param->dram_config |= 0x08;
- break;
- case AST_VIDMEM_SIZE_64M:
- param->dram_config |= 0x0c;
- break;
- }
-}
-
-static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 data, data2, retry = 0;
-
-ddr2_init_start:
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
- ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
- udelay(10);
-
- ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
- ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
- ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
- ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
- ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
- ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
- ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0054, 0);
- ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
-
- /* Wait MCLK2X lock to MCLK */
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
- if ((data2 & 0xff) > param->madj_max) {
- break;
- }
- ast_moutdwm(ast, 0x1E6E0064, data2);
- if (data2 & 0x00100000) {
- data2 = ((data2 & 0xff) >> 3) + 3;
- } else {
- data2 = ((data2 & 0xff) >> 2) + 5;
- }
- data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
- data2 += data & 0xff;
- data = data | (data2 << 8);
- ast_moutdwm(ast, 0x1E6E0068, data);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
- udelay(10);
- data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- ast_moutdwm(ast, 0x1E6E0018, data);
- data = data | 0x200;
- ast_moutdwm(ast, 0x1E6E0018, data);
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
-
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- }
- ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
- data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
- ast_moutdwm(ast, 0x1E6E0018, data);
-
- ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- udelay(50);
- /* Mode Register Setting */
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
- data = 0;
- if (param->wodt) {
- data = 0x500;
- }
- if (param->rodt) {
- data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
- }
- ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
- ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
-
- /* Calibrate the DQSI delay */
- if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
- goto ddr2_init_start;
-
- /* ECC Memory Initialization */
-#ifdef ECC
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0070, 0x221);
- do {
- data = ast_mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
-#endif
-
-}
-
-static void ast_post_chip_2300(struct ast_device *ast)
-{
- struct ast2300_dram_param param;
- u32 temp;
- u8 reg;
-
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & 0x80) == 0) {/* vga only */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x12000, 0x1688a8a8);
- do {
- ;
- } while (ast_read32(ast, 0x12000) != 0x1);
-
- ast_write32(ast, 0x10000, 0xfc600309);
- do {
- ;
- } while (ast_read32(ast, 0x10000) != 0x1);
-
- /* Slow down CPU/AHB CLK in VGA only mode */
- temp = ast_read32(ast, 0x12008);
- temp |= 0x73;
- ast_write32(ast, 0x12008, temp);
-
- param.dram_freq = 396;
- param.dram_type = AST_DDR3;
- temp = ast_mindwm(ast, 0x1e6e2070);
- if (temp & 0x01000000)
- param.dram_type = AST_DDR2;
- switch (temp & 0x18000000) {
- case 0:
- param.dram_chipid = AST_DRAM_512Mx16;
- break;
- default:
- case 0x08000000:
- param.dram_chipid = AST_DRAM_1Gx16;
- break;
- case 0x10000000:
- param.dram_chipid = AST_DRAM_2Gx16;
- break;
- case 0x18000000:
- param.dram_chipid = AST_DRAM_4Gx16;
- break;
- }
- switch (temp & 0x0c) {
- default:
- case 0x00:
- param.vram_size = AST_VIDMEM_SIZE_8M;
- break;
-
- case 0x04:
- param.vram_size = AST_VIDMEM_SIZE_16M;
- break;
-
- case 0x08:
- param.vram_size = AST_VIDMEM_SIZE_32M;
- break;
-
- case 0x0c:
- param.vram_size = AST_VIDMEM_SIZE_64M;
- break;
- }
-
- if (param.dram_type == AST_DDR3) {
- get_ddr3_info(ast, &param);
- ddr3_init(ast, &param);
- } else {
- get_ddr2_info(ast, &param);
- ddr2_init(ast, &param);
- }
-
- temp = ast_mindwm(ast, 0x1e6e2040);
- ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
- }
-
- /* wait ready */
- do {
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((reg & 0x40) == 0);
-}
-
-static bool cbr_test_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
- ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_single_2500(ast, 0))
- return false;
- return true;
-}
-
-static bool ddr_test_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
- ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_burst(ast, 1))
- return false;
- if (!mmc_test_burst(ast, 2))
- return false;
- if (!mmc_test_burst(ast, 3))
- return false;
- if (!mmc_test_single_2500(ast, 0))
- return false;
- return true;
-}
-
-static void ddr_init_common_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
- ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
- ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
- ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
- ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
- ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
- ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
- ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
- ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
-}
-
-static void ddr_phy_init_2500(struct ast_device *ast)
-{
- u32 data, pass, timecnt;
-
- pass = 0;
- ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
- while (!pass) {
- for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
- data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
- if (!data)
- break;
- }
- if (timecnt != TIMEOUT) {
- data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
- if (!data)
- pass = 1;
- }
- if (!pass) {
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- udelay(10); /* delay 10 us */
- ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
- }
- }
-
- ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
-}
-
-/*
- * Check DRAM Size
- * 1Gb : 0x80000000 ~ 0x87FFFFFF
- * 2Gb : 0x80000000 ~ 0x8FFFFFFF
- * 4Gb : 0x80000000 ~ 0x9FFFFFFF
- * 8Gb : 0x80000000 ~ 0xBFFFFFFF
- */
-static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
-{
- u32 reg_04, reg_14;
-
- reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
- reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
-
- ast_moutdwm(ast, 0xA0100000, 0x41424344);
- ast_moutdwm(ast, 0x90100000, 0x35363738);
- ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
- ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
-
- /* Check 8Gbit */
- if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
- reg_04 |= 0x03;
- reg_14 |= (tRFC >> 24) & 0xFF;
- /* Check 4Gbit */
- } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
- reg_04 |= 0x02;
- reg_14 |= (tRFC >> 16) & 0xFF;
- /* Check 2Gbit */
- } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
- reg_04 |= 0x01;
- reg_14 |= (tRFC >> 8) & 0xFF;
- } else {
- reg_14 |= tRFC & 0xFF;
- }
- ast_moutdwm(ast, 0x1E6E0004, reg_04);
- ast_moutdwm(ast, 0x1E6E0014, reg_14);
-}
-
-static void enable_cache_2500(struct ast_device *ast)
-{
- u32 reg_04, data;
-
- reg_04 = ast_mindwm(ast, 0x1E6E0004);
- ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
-
- do
- data = ast_mindwm(ast, 0x1E6E0004);
- while (!(data & 0x80000));
- ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
-}
-
-static void set_mpll_2500(struct ast_device *ast)
-{
- u32 addr, data, param;
-
- /* Reset MMC */
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
- for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
- ast_moutdwm(ast, addr, 0x0);
- addr += 4;
- }
- ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
- if (data) {
- /* CLKIN = 25MHz */
- param = 0x930023E0;
- ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
- } else {
- /* CLKIN = 24MHz */
- param = 0x93002400;
- }
- ast_moutdwm(ast, 0x1E6E2020, param);
- udelay(100);
-}
-
-static void reset_mmc_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E78505C, 0x00000004);
- ast_moutdwm(ast, 0x1E785044, 0x00000001);
- ast_moutdwm(ast, 0x1E785048, 0x00004755);
- ast_moutdwm(ast, 0x1E78504C, 0x00000013);
- mdelay(100);
- ast_moutdwm(ast, 0x1E785054, 0x00000077);
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
-}
-
-static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
-{
-
- ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
- ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
- ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
- ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
- ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
- ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
- ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
- ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
-
- /* DDR PHY Setting */
- ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
- ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
- ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
- ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
- ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
- ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
- ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
- ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
- ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
- ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
- ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
- ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
- ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
-
- /* Controller Setting */
- ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
-
- /* Wait DDR PHY init done */
- ddr_phy_init_2500(ast);
-
- ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
- ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
- ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
-
- check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
- enable_cache_2500(ast);
- ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
- ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
-}
-
-static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
-{
- u32 data, data2, pass, retrycnt;
- u32 ddr_vref, phy_vref;
- u32 min_ddr_vref = 0, min_phy_vref = 0;
- u32 max_ddr_vref = 0, max_phy_vref = 0;
-
- ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
- ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
- ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
- ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
- ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
- ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
- ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
- ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
-
- /* DDR PHY Setting */
- ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
- ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
- ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
- ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
- ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
- ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
- ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
- ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
- ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
- ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
- ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
- ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
- ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
- ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
- ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
-
- /* Controller Setting */
- ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
-
- /* Train PHY Vref first */
- pass = 0;
-
- for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
- max_phy_vref = 0x0;
- pass = 0;
- ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
- for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
- /* Fire DFI Init */
- ddr_phy_init_2500(ast);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- if (cbr_test_2500(ast)) {
- pass++;
- data = ast_mindwm(ast, 0x1E6E03D0);
- data2 = data >> 8;
- data = data & 0xff;
- if (data > data2)
- data = data2;
- if (max_phy_vref < data) {
- max_phy_vref = data;
- min_phy_vref = phy_vref;
- }
- } else if (pass > 0)
- break;
- }
- }
- ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
-
- /* Train DDR Vref next */
- pass = 0;
-
- for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
- min_ddr_vref = 0xFF;
- max_ddr_vref = 0x0;
- pass = 0;
- for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
- /* Fire DFI Init */
- ddr_phy_init_2500(ast);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- if (cbr_test_2500(ast)) {
- pass++;
- if (min_ddr_vref > ddr_vref)
- min_ddr_vref = ddr_vref;
- if (max_ddr_vref < ddr_vref)
- max_ddr_vref = ddr_vref;
- } else if (pass != 0)
- break;
- }
- }
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
-
- /* Wait DDR PHY init done */
- ddr_phy_init_2500(ast);
-
- ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
- ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
- ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
-
- check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
- enable_cache_2500(ast);
- ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
- ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
-}
-
-static bool ast_dram_init_2500(struct ast_device *ast)
-{
- u32 data;
- u32 max_tries = 5;
-
- do {
- if (max_tries-- == 0)
- return false;
- set_mpll_2500(ast);
- reset_mmc_2500(ast);
- ddr_init_common_2500(ast);
-
- data = ast_mindwm(ast, 0x1E6E2070);
- if (data & 0x01000000)
- ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
- else
- ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
- } while (!ddr_test_2500(ast));
-
- ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
-
- /* Patch code */
- data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
- ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
-
- return true;
-}
-
-void ast_patch_ahb_2500(void __iomem *regs)
-{
- u32 data;
-
- /* Clear bus lock condition */
- __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
- __ast_moutdwm(regs, 0x1e600084, 0x00010000);
- __ast_moutdwm(regs, 0x1e600088, 0x00000000);
- __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
-
- data = __ast_mindwm(regs, 0x1e6e2070);
- if (data & 0x08000000) { /* check fast reset */
- /*
- * If "Fast restet" is enabled for ARM-ICE debugger,
- * then WDT needs to enable, that
- * WDT04 is WDT#1 Reload reg.
- * WDT08 is WDT#1 counter restart reg to avoid system deadlock
- * WDT0C is WDT#1 control reg
- * [6:5]:= 01:Full chip
- * [4]:= 1:1MHz clock source
- * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
- * [0]:= 1:WDT enable
- */
- __ast_moutdwm(regs, 0x1E785004, 0x00000010);
- __ast_moutdwm(regs, 0x1E785008, 0x00004755);
- __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
- udelay(1000);
- }
-
- do {
- __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
- data = __ast_mindwm(regs, 0x1e6e2000);
- } while (data != 1);
-
- __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
-}
-
-void ast_post_chip_2500(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- u32 temp;
- u8 reg;
-
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
- /* Clear bus lock condition */
- ast_patch_ahb_2500(ast->regs);
-
- /* Disable watchdog */
- ast_moutdwm(ast, 0x1E78502C, 0x00000000);
- ast_moutdwm(ast, 0x1E78504C, 0x00000000);
-
- /*
- * Reset USB port to patch USB unknown device issue
- * SCU90 is Multi-function Pin Control #5
- * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
- * port).
- * SCU94 is Multi-function Pin Control #6
- * [14:13]:= 1x:USB2.0 Host2 controller
- * SCU70 is Hardware Strap reg
- * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
- * [18]: 0(24)/1(48) MHz)
- * SCU7C is Write clear reg to SCU70
- * [23]:= write 1 and then SCU70[23] will be clear as 0b.
- */
- ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
- ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
- if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
- ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
- mdelay(100);
- ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
- }
- /* Modify eSPI reset pin */
- temp = ast_mindwm(ast, 0x1E6E2070);
- if (temp & 0x02000000)
- ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
-
- /* Slow down CPU/AHB CLK in VGA only mode */
- temp = ast_read32(ast, 0x12008);
- temp |= 0x73;
- ast_write32(ast, 0x12008, temp);
-
- if (!ast_dram_init_2500(ast))
- drm_err(dev, "DRAM init failed !\n");
-
- temp = ast_mindwm(ast, 0x1e6e2040);
- ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
- }
-
- /* wait ready */
- do {
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((reg & 0x40) == 0);
-}
diff --git a/drivers/gpu/drm/ast/ast_post.h b/drivers/gpu/drm/ast/ast_post.h
new file mode 100644
index 000000000000..aa5d247bebe8
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef AST_POST_H
+#define AST_POST_H
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct ast_device;
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+/* hardware fields */
+#define __AST_DRAMSTRUCT_DRAM_TYPE 0x0004
+
+/* control commands */
+#define __AST_DRAMSTRUCT_UDELAY 0xff00
+#define __AST_DRAMSTRUCT_INVALID 0xffff
+
+#define __AST_DRAMSTRUCT_INDEX(_name) \
+ (__AST_DRAMSTRUCT_ ## _name)
+
+#define AST_DRAMSTRUCT_INIT(_name, _value) \
+ { __AST_DRAMSTRUCT_INDEX(_name), (_value) }
+
+#define AST_DRAMSTRUCT_UDELAY(_usecs) \
+ AST_DRAMSTRUCT_INIT(UDELAY, _usecs)
+#define AST_DRAMSTRUCT_INVALID \
+ AST_DRAMSTRUCT_INIT(INVALID, U32_MAX)
+
+#define AST_DRAMSTRUCT_IS(_entry, _name) \
+ ((_entry)->index == __AST_DRAMSTRUCT_INDEX(_name))
+
+u32 __ast_mindwm(void __iomem *regs, u32 r);
+void __ast_moutdwm(void __iomem *regs, u32 r, u32 v);
+
+bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl);
+bool mmc_test_burst(struct ast_device *ast, u32 datagen);
+
+/* ast_2000.c */
+void ast_2000_set_def_ext_reg(struct ast_device *ast);
+
+/* ast_2300.c */
+void ast_2300_set_def_ext_reg(struct ast_device *ast);
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 2aadf07d135a..30578e3b07e4 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -29,14 +29,24 @@
#define AST_IO_VGAGRI (0x4E)
#define AST_IO_VGACRI (0x54)
+#define AST_IO_VGACR17_SYNC_ENABLE BIT(7) /* called "Hardware reset" in docs */
#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
+#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
+#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0)
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
+/* mirrors SCU100[7:0] */
+#define AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
+#define AST_IO_VGACRD0_VRAM_INIT_BY_BMC BIT(7)
+#define AST_IO_VGACRD0_VRAM_INIT_READY BIT(6)
+#define AST_IO_VGACRD0_IKVM_WIDESCREEN BIT(0)
+
#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5)
/* Display Transmitter Type */
#define AST_IO_VGACRD1_TX_TYPE_MASK GENMASK(3, 1)
@@ -48,11 +58,16 @@
#define AST_IO_VGACRD1_TX_ANX9807_VBIOS 0x0a
#define AST_IO_VGACRD1_TX_FW_EMBEDDED_FW 0x0c /* special case of DP501 */
#define AST_IO_VGACRD1_TX_ASTDP 0x0e
+#define AST_IO_VGACRD1_SUPPORTS_WUXGA BIT(0)
+/*
+ * AST DisplayPort
+ */
#define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0)
#define AST_IO_VGACRDC_LINK_SUCCESS BIT(0)
#define AST_IO_VGACRDF_HPD BIT(0)
#define AST_IO_VGACRDF_DP_VIDEO_ENABLE BIT(4) /* mirrors AST_IO_VGACRE3_DP_VIDEO_ENABLE */
+#define AST_IO_VGACRE0_24BPP BIT(5) /* 18 bpp, if unset */
#define AST_IO_VGACRE3_DP_VIDEO_ENABLE BIT(0)
#define AST_IO_VGACRE3_DP_PHY_SLEEP BIT(4)
#define AST_IO_VGACRE5_EDID_READ_DONE BIT(0)
@@ -60,23 +75,4 @@
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
-
-#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
-//#define AST_VRAM_INIT_BY_BMC BIT(7)
-//#define AST_VRAM_INIT_READY BIT(6)
-
-/*
- * AST DisplayPort
- */
-
-/*
- * ASTDP setmode registers:
- * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
- * CRE1[7:0]: MISC1 (default: 0x00)
- * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
- */
-#define ASTDP_MISC0_24bpp BIT(5)
-#define ASTDP_MISC1 0
-#define ASTDP_AND_CLEAR_MASK 0x00
-
#endif
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 0378c9bc079b..7da5b5c60f41 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -24,6 +24,8 @@
#ifndef AST_TABLES_H
#define AST_TABLES_H
+#include "ast_drv.h"
+
/* Std. Table Index Definition */
#define TextModeIndex 0
#define EGAModeIndex 1
@@ -31,114 +33,6 @@
#define HiCModeIndex 3
#define TrueCModeIndex 4
-#define Charx8Dot 0x00000001
-#define HalfDCLK 0x00000002
-#define DoubleScanMode 0x00000004
-#define LineCompareOff 0x00000008
-#define HBorder 0x00000020
-#define VBorder 0x00000010
-#define WideScreenMode 0x00000100
-#define NewModeInfo 0x00000200
-#define NHSync 0x00000400
-#define PHSync 0x00000800
-#define NVSync 0x00001000
-#define PVSync 0x00002000
-#define SyncPP (PVSync | PHSync)
-#define SyncPN (PVSync | NHSync)
-#define SyncNP (NVSync | PHSync)
-#define SyncNN (NVSync | NHSync)
-#define AST2500PreCatchCRT 0x00004000
-
-/* DCLK Index */
-#define VCLK25_175 0x00
-#define VCLK28_322 0x01
-#define VCLK31_5 0x02
-#define VCLK36 0x03
-#define VCLK40 0x04
-#define VCLK49_5 0x05
-#define VCLK50 0x06
-#define VCLK56_25 0x07
-#define VCLK65 0x08
-#define VCLK75 0x09
-#define VCLK78_75 0x0A
-#define VCLK94_5 0x0B
-#define VCLK108 0x0C
-#define VCLK135 0x0D
-#define VCLK157_5 0x0E
-#define VCLK162 0x0F
-/* #define VCLK193_25 0x10 */
-#define VCLK154 0x10
-#define VCLK83_5 0x11
-#define VCLK106_5 0x12
-#define VCLK146_25 0x13
-#define VCLK148_5 0x14
-#define VCLK71 0x15
-#define VCLK88_75 0x16
-#define VCLK119 0x17
-#define VCLK85_5 0x18
-#define VCLK97_75 0x19
-#define VCLK118_25 0x1A
-
-static const struct ast_vbios_dclk_info dclk_table[] = {
- {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
- {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
- {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
- {0x76, 0x63, 0x01}, /* 03: VCLK36 */
- {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
- {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
- {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
- {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
- {0x80, 0x64, 0x00}, /* 08: VCLK65 */
- {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
- {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
- {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
- {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
- {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
- {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
- {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
- {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
- {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
- {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
- {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
- {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
- {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
- {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
- {0x77, 0x58, 0x80}, /* 17: VCLK119 */
- {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
- {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
- {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */
-};
-
-static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
- {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
- {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
- {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
- {0x76, 0x63, 0x01}, /* 03: VCLK36 */
- {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
- {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
- {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
- {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
- {0x80, 0x64, 0x00}, /* 08: VCLK65 */
- {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
- {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
- {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
- {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
- {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
- {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
- {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
- {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
- {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
- {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
- {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
- {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
- {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
- {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
- {0x58, 0x01, 0x42}, /* 17: VCLK119 */
- {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
- {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
- {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */
-};
-
static const struct ast_vbios_stdtable vbios_stdtable[] = {
/* MD_2_3_400 */
{
@@ -212,141 +106,4 @@ static const struct ast_vbios_stdtable vbios_stdtable[] = {
},
};
-static const struct ast_vbios_enhtable res_640x480[] = {
- { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
- (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
- { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
- (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
- { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
- (SyncNN | Charx8Dot) , 75, 3, 0x2E },
- { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
- (SyncNN | Charx8Dot) , 85, 4, 0x2E },
- { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
- (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
-};
-
-static const struct ast_vbios_enhtable res_800x600[] = {
- {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
- (SyncPP | Charx8Dot), 56, 1, 0x30 },
- {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 2, 0x30 },
- {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
- (SyncPP | Charx8Dot), 72, 3, 0x30 },
- {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 4, 0x30 },
- {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
- (SyncPP | Charx8Dot), 84, 5, 0x30 },
- {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
- (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
-};
-
-
-static const struct ast_vbios_enhtable res_1024x768[] = {
- {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
- (SyncNN | Charx8Dot), 60, 1, 0x31 },
- {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
- (SyncNN | Charx8Dot), 70, 2, 0x31 },
- {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 3, 0x31 },
- {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
- (SyncPP | Charx8Dot), 84, 4, 0x31 },
- {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
- (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
-};
-
-static const struct ast_vbios_enhtable res_1280x1024[] = {
- {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 1, 0x32 },
- {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 2, 0x32 },
- {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
- (SyncPP | Charx8Dot), 85, 3, 0x32 },
- {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
- (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
-};
-
-static const struct ast_vbios_enhtable res_1600x1200[] = {
- {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 1, 0x33 },
- {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
- (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
-};
-
-static const struct ast_vbios_enhtable res_1152x864[] = {
- {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */
- (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B },
- {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */
- (SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B },
-};
-
-/* 16:9 */
-static const struct ast_vbios_enhtable res_1360x768[] = {
- {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
- {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* end */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x39 },
-};
-
-static const struct ast_vbios_enhtable res_1600x900[] = {
- {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x3A },
- {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3A },
- {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x3A },
-};
-
-static const struct ast_vbios_enhtable res_1920x1080[] = {
- {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x38 },
- {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x38 },
-};
-
-
-/* 16:10 */
-static const struct ast_vbios_enhtable res_1280x800[] = {
- {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x35 },
- {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 },
- {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x35 },
-
-};
-
-static const struct ast_vbios_enhtable res_1440x900[] = {
- {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x36 },
- {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 },
- {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x36 },
-};
-
-static const struct ast_vbios_enhtable res_1680x1050[] = {
- {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x37 },
- {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 },
- {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x37 },
-};
-
-static const struct ast_vbios_enhtable res_1920x1200[] = {
- {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x34 },
- {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x34 },
-};
-
#endif
diff --git a/drivers/gpu/drm/ast/ast_vbios.c b/drivers/gpu/drm/ast/ast_vbios.c
new file mode 100644
index 000000000000..0953e6dd3976
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_vbios.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ast_drv.h"
+#include "ast_vbios.h"
+
+/* 4:3 */
+
+static const struct ast_vbios_enhtable res_640x480[] = {
+ { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60 Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2e },
+ { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72 Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2e },
+ { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75 Hz */
+ (SyncNN | Charx8Dot), 75, 3, 0x2e },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85 Hz */
+ (SyncNN | Charx8Dot), 85, 4, 0x2e },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_800x600[] = {
+ { 1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56 Hz */
+ (SyncPP | Charx8Dot), 56, 1, 0x30 },
+ { 1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 2, 0x30 },
+ { 1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72 Hz */
+ (SyncPP | Charx8Dot), 72, 3, 0x30 },
+ { 1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 4, 0x30 },
+ { 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85 Hz */
+ (SyncPP | Charx8Dot), 84, 5, 0x30 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1024x768[] = {
+ { 1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60 Hz */
+ (SyncNN | Charx8Dot), 60, 1, 0x31 },
+ { 1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70 Hz */
+ (SyncNN | Charx8Dot), 70, 2, 0x31 },
+ { 1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 3, 0x31 },
+ { 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85 Hz */
+ (SyncPP | Charx8Dot), 84, 4, 0x31 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1152x864[] = {
+ { 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75 Hz */
+ (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3b },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1280x1024[] = {
+ { 1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x32 },
+ { 1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 2, 0x32 },
+ { 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85 Hz */
+ (SyncPP | Charx8Dot), 85, 3, 0x32 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1600x1200[] = {
+ { 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x33 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/* 16:9 */
+
+static const struct ast_vbios_enhtable res_1360x768[] = {
+ { 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60 Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1600x900[] = {
+ { 1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60 Hz CVT RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x3a },
+ { 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60 Hz CVT */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3a },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1920x1080[] = {
+ { 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60 Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x38 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/* 16:10 */
+
+static const struct ast_vbios_enhtable res_1280x800[] = {
+ { 1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x35 },
+ { 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1440x900[] = {
+ { 1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x36 },
+ { 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1680x1050[] = {
+ { 1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x37 },
+ { 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1920x1200[] = {
+ { 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60 Hz RB*/
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x34 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/*
+ * VBIOS mode tables
+ */
+
+static const struct ast_vbios_enhtable *res_table_wuxga[] = {
+ &res_1920x1200[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table_fullhd[] = {
+ &res_1920x1080[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table_wsxga_p[] = {
+ &res_1280x800[0],
+ &res_1360x768[0],
+ &res_1440x900[0],
+ &res_1600x900[0],
+ &res_1680x1050[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table[] = {
+ &res_640x480[0],
+ &res_800x600[0],
+ &res_1024x768[0],
+ &res_1152x864[0],
+ &res_1280x1024[0],
+ &res_1600x1200[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *
+__ast_vbios_find_mode_table(const struct ast_vbios_enhtable **vmode_tables,
+ unsigned int hdisplay,
+ unsigned int vdisplay)
+{
+ while (*vmode_tables) {
+ if ((*vmode_tables)->hde == hdisplay && (*vmode_tables)->vde == vdisplay)
+ return *vmode_tables;
+ ++vmode_tables;
+ }
+
+ return NULL;
+}
+
+static const struct ast_vbios_enhtable *ast_vbios_find_mode_table(const struct ast_device *ast,
+ unsigned int hdisplay,
+ unsigned int vdisplay)
+{
+ const struct ast_vbios_enhtable *vmode_table = NULL;
+
+ if (ast->support_wuxga)
+ vmode_table = __ast_vbios_find_mode_table(res_table_wuxga, hdisplay, vdisplay);
+ if (!vmode_table && ast->support_fullhd)
+ vmode_table = __ast_vbios_find_mode_table(res_table_fullhd, hdisplay, vdisplay);
+ if (!vmode_table && ast->support_wsxga_p)
+ vmode_table = __ast_vbios_find_mode_table(res_table_wsxga_p, hdisplay, vdisplay);
+ if (!vmode_table)
+ vmode_table = __ast_vbios_find_mode_table(res_table, hdisplay, vdisplay);
+
+ return vmode_table;
+}
+
+const struct ast_vbios_enhtable *ast_vbios_find_mode(const struct ast_device *ast,
+ const struct drm_display_mode *mode)
+{
+ const struct ast_vbios_enhtable *best_vmode = NULL;
+ const struct ast_vbios_enhtable *vmode_table;
+ const struct ast_vbios_enhtable *vmode;
+ u32 refresh_rate;
+
+ vmode_table = ast_vbios_find_mode_table(ast, mode->hdisplay, mode->vdisplay);
+ if (!vmode_table)
+ return NULL;
+
+ refresh_rate = drm_mode_vrefresh(mode);
+
+ for (vmode = vmode_table; ast_vbios_mode_is_valid(vmode); ++vmode) {
+ if (((mode->flags & DRM_MODE_FLAG_NVSYNC) && (vmode->flags & PVSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_PVSYNC) && (vmode->flags & NVSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) && (vmode->flags & PHSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_PHSYNC) && (vmode->flags & NHSync))) {
+ continue;
+ }
+ if (vmode->refresh_rate <= refresh_rate &&
+ (!best_vmode || vmode->refresh_rate > best_vmode->refresh_rate))
+ best_vmode = vmode;
+ }
+
+ return best_vmode;
+}
diff --git a/drivers/gpu/drm/ast/ast_vbios.h b/drivers/gpu/drm/ast/ast_vbios.h
new file mode 100644
index 000000000000..8cf025010594
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_vbios.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_VBIOS_H
+#define AST_VBIOS_H
+
+#include <linux/types.h>
+
+struct ast_device;
+struct drm_display_mode;
+
+#define Charx8Dot 0x00000001
+#define HalfDCLK 0x00000002
+#define DoubleScanMode 0x00000004
+#define LineCompareOff 0x00000008
+#define HBorder 0x00000020
+#define VBorder 0x00000010
+#define WideScreenMode 0x00000100
+#define NewModeInfo 0x00000200
+#define NHSync 0x00000400
+#define PHSync 0x00000800
+#define NVSync 0x00001000
+#define PVSync 0x00002000
+#define SyncPP (PVSync | PHSync)
+#define SyncPN (PVSync | NHSync)
+#define SyncNP (NVSync | PHSync)
+#define SyncNN (NVSync | NHSync)
+#define AST2500PreCatchCRT 0x00004000
+
+/* DCLK Index */
+#define VCLK25_175 0x00
+#define VCLK28_322 0x01
+#define VCLK31_5 0x02
+#define VCLK36 0x03
+#define VCLK40 0x04
+#define VCLK49_5 0x05
+#define VCLK50 0x06
+#define VCLK56_25 0x07
+#define VCLK65 0x08
+#define VCLK75 0x09
+#define VCLK78_75 0x0a
+#define VCLK94_5 0x0b
+#define VCLK108 0x0c
+#define VCLK135 0x0d
+#define VCLK157_5 0x0e
+#define VCLK162 0x0f
+/* #define VCLK193_25 0x10 */
+#define VCLK154 0x10
+#define VCLK83_5 0x11
+#define VCLK106_5 0x12
+#define VCLK146_25 0x13
+#define VCLK148_5 0x14
+#define VCLK71 0x15
+#define VCLK88_75 0x16
+#define VCLK119 0x17
+#define VCLK85_5 0x18
+#define VCLK97_75 0x19
+#define VCLK118_25 0x1a
+
+struct ast_vbios_enhtable {
+ u32 ht;
+ u32 hde;
+ u32 hfp;
+ u32 hsync;
+ u32 vt;
+ u32 vde;
+ u32 vfp;
+ u32 vsync;
+ u32 dclk_index;
+ u32 flags;
+ u32 refresh_rate;
+ u32 refresh_rate_index;
+ u32 mode_id;
+};
+
+#define AST_VBIOS_INVALID_MODE \
+ {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}
+
+static inline bool ast_vbios_mode_is_valid(const struct ast_vbios_enhtable *vmode)
+{
+ return vmode->ht && vmode->vt && vmode->refresh_rate;
+}
+
+const struct ast_vbios_enhtable *ast_vbios_find_mode(const struct ast_device *ast,
+ const struct drm_display_mode *mode);
+
+#endif
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 0f7ffb3ced20..e0efc7309b1b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -215,32 +216,32 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_XLCDC_CM),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_SD);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_XLCDC_SD,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n");
}
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_DISP),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_SYNC),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_PIXEL_CLK),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n");
clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
pinctrl_pm_select_sleep_state(dev->dev);
@@ -269,32 +270,32 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_PIXEL_CLK,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_SYNC,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_DISP,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n");
if (crtc->dc->desc->is_xlcdc) {
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_CM);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_XLCDC_CM,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_SD);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_XLCDC_SD),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n");
}
pm_runtime_put_sync(dev->dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index fa8ad94e431a..dd70894c8f38 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -724,19 +725,19 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
ret = atmel_hlcdc_create_outputs(dev);
if (ret) {
- dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret);
+ drm_err(dev, "failed to create HLCDC outputs: %d\n", ret);
return ret;
}
ret = atmel_hlcdc_create_planes(dev);
if (ret) {
- dev_err(dev->dev, "failed to create planes: %d\n", ret);
+ drm_err(dev, "failed to create planes: %d\n", ret);
return ret;
}
ret = atmel_hlcdc_crtc_create(dev);
if (ret) {
- dev_err(dev->dev, "failed to create crtc\n");
+ drm_err(dev, "failed to create crtc\n");
return ret;
}
@@ -778,7 +779,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
- dev_err(dev->dev, "failed to enable periph_clk\n");
+ drm_err(dev, "failed to enable periph_clk\n");
return ret;
}
@@ -786,13 +787,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = drm_vblank_init(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ drm_err(dev, "failed to initialize vblank\n");
goto err_periph_clk_disable;
}
ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
+ drm_err(dev, "failed to initialize mode setting\n");
goto err_periph_clk_disable;
}
@@ -802,7 +803,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq);
pm_runtime_put_sync(dev->dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to install IRQ handler\n");
+ drm_err(dev, "failed to install IRQ handler\n");
goto err_periph_clk_disable;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index e1a0bb24b511..53d47f01db0b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -378,7 +378,8 @@ struct atmel_lcdc_dc_ops {
void (*lcdc_update_buffers)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state,
u32 sr, int i);
- void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane);
+ void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc);
void (*lcdc_update_general_settings)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state);
void (*lcdc_atomic_update)(struct atmel_hlcdc_plane *plane,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 50fee6a93964..0b8a86afb096 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -15,6 +15,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "atmel_hlcdc_dc.h"
@@ -92,7 +93,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep);
of_node_put(ep);
if (output->bus_fmt < 0) {
- dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint);
+ drm_err(dev, "endpoint %d: invalid bus width\n", endpoint);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 4a7ba0918eca..92132be9823f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -16,6 +16,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "atmel_hlcdc_dc.h"
@@ -365,13 +366,34 @@ void atmel_xlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane,
xfactor);
/*
- * With YCbCr 4:2:2 and YCbYcr 4:2:0 window resampling, configuration
- * register LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT is half
+ * With YCbCr 4:2:0 window resampling, configuration register
+ * LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT values are half
* the value of yfactor and xfactor.
+ *
+ * On the other hand, with YCbCr 4:2:2 window resampling, only the
+ * configuration register LCDC_HEOCFG27.HXSCFACT value is half the value
+ * of the xfactor; the value of LCDC_HEOCFG25.VXSCFACT is yfactor (no
+ * division by 2).
*/
- if (state->base.fb->format->format == DRM_FORMAT_YUV420) {
+ switch (state->base.fb->format->format) {
+ /* YCbCr 4:2:2 */
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_NV61:
+ xfactor /= 2;
+ break;
+
+ /* YCbCr 4:2:0 */
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_NV21:
yfactor /= 2;
xfactor /= 2;
+ break;
+ default:
+ break;
}
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 2,
@@ -714,7 +736,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (!hstate->base.crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, s->crtc);
mode = &crtc_state->adjusted_mode;
ret = drm_atomic_helper_check_plane_state(s, crtc_state,
@@ -816,7 +838,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
return 0;
}
-static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc)
{
/* Disable interrupts */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR,
@@ -832,7 +855,8 @@ static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
-static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
+static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc)
{
/* Disable interrupts */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IDR,
@@ -842,6 +866,15 @@ static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_XLCDC_LAYER_ENR, 0);
+ /*
+ * Updating XLCDC_xxxCFGx, XLCDC_xxxFBA and XLCDC_xxxEN,
+ * (where xxx indicates each layer) requires writing one to the
+ * Update Attribute field for each layer in LCDC_ATTRE register for SAM9X7.
+ */
+ regmap_write(dc->hlcdc->regmap, ATMEL_XLCDC_ATTRE, ATMEL_XLCDC_BASE_UPDATE |
+ ATMEL_XLCDC_OVR1_UPDATE | ATMEL_XLCDC_OVR3_UPDATE |
+ ATMEL_XLCDC_HEO_UPDATE);
+
/* Clear all pending interrupts */
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR);
}
@@ -852,7 +885,7 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private;
- dc->desc->ops->lcdc_atomic_disable(plane);
+ dc->desc->ops->lcdc_atomic_disable(plane, dc);
}
static void atmel_hlcdc_atomic_update(struct atmel_hlcdc_plane *plane,
@@ -1034,7 +1067,7 @@ static void atmel_hlcdc_irq_dbg(struct atmel_hlcdc_plane *plane,
if (isr &
(ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) |
ATMEL_HLCDC_LAYER_OVR_IRQ(2)))
- dev_dbg(plane->base.dev->dev, "overrun on plane %s\n",
+ drm_dbg(plane->base.dev, "overrun on plane %s\n",
desc->name);
}
@@ -1051,7 +1084,7 @@ static void atmel_xlcdc_irq_dbg(struct atmel_hlcdc_plane *plane,
if (isr &
(ATMEL_XLCDC_LAYER_OVR_IRQ(0) | ATMEL_XLCDC_LAYER_OVR_IRQ(1) |
ATMEL_XLCDC_LAYER_OVR_IRQ(2)))
- dev_dbg(plane->base.dev->dev, "overrun on plane %s\n",
+ drm_dbg(plane->base.dev, "overrun on plane %s\n",
desc->name);
}
@@ -1140,7 +1173,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
if (state) {
if (atmel_hlcdc_plane_alloc_dscrs(p, state)) {
kfree(state);
- dev_err(p->dev->dev,
+ drm_err(p->dev,
"Failed to allocate initial plane state\n");
return;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 6b4664d91faa..a250afd8d662 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,6 +16,7 @@ config DRM_AUX_BRIDGE
tristate
depends on DRM_BRIDGE && OF
select AUXILIARY_BUS
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Simple transparent bridge that is used by several non-DRM drivers to
@@ -90,6 +91,15 @@ config DRM_FSL_LDB
help
Support for i.MX8MP DPI-to-LVDS on-SoC encoder.
+config DRM_I2C_NXP_TDA998X
+ tristate "NXP Semiconductors TDA998X HDMI encoder"
+ default m if DRM_TILCDC
+ select CEC_CORE if CEC_NOTIFIER
+ select DRM_KMS_HELPER
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ help
+ Support for NXP Semiconductors TDA998X HDMI encoders.
+
config DRM_ITE_IT6263
tristate "ITE IT6263 LVDS/HDMI bridge"
depends on OF
@@ -110,8 +120,8 @@ config DRM_ITE_IT6505
select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
select EXTCON
- select CRYPTO
- select CRYPTO_HASH
+ select CRYPTO_LIB_SHA1
+ select REGMAP_I2C
help
ITE IT6505 DisplayPort bridge chip driver.
@@ -306,6 +316,19 @@ config DRM_SIMPLE_BRIDGE
Support for non-programmable DRM bridges, such as ADI ADV7123, TI
THS8134 and THS8135 or passive resistor ladder DACs.
+config DRM_SOLOMON_SSD2825
+ tristate "SSD2825 RGB/DSI bridge"
+ depends on SPI_MASTER && OF
+ select DRM_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ help
+ Say Y here if you want support for the Solomon SSD2825 RGB/DSI
+ SPI bridge driver.
+
+ Say M here if you want to support this hardware as a module.
+ The module will be named "ssd2825".
+
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
@@ -428,6 +451,18 @@ config DRM_TI_TPD12S015
Texas Instruments TPD12S015 HDMI level shifter and ESD protection
driver.
+config DRM_WAVESHARE_BRIDGE
+ tristate "Waveshare DSI bridge"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select REGMAP_I2C
+ help
+ Driver for waveshare DSI to DPI bridge board.
+ Please say Y if you have such hardware
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 97304b429a53..c7dc03182e59 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,10 @@ obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_FSL_LDB) += fsl-ldb.o
+
+tda998x-y := tda998x_drv.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
+
obj-$(CONFIG_DRM_ITE_IT6263) += ite-it6263.o
obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
@@ -23,6 +27,7 @@ obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
+obj-$(CONFIG_DRM_SOLOMON_SSD2825) += ssd2825.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358762) += tc358762.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
@@ -36,6 +41,7 @@ obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_WAVESHARE_BRIDGE) += waveshare-dsi.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
obj-$(CONFIG_DRM_ITE_IT66121) += ite-it66121.o
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index f46a5e26b5dd..59a5256ce8a6 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -5,6 +5,9 @@ config DRM_I2C_ADV7511
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_MIPI_DSI
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
@@ -19,7 +22,7 @@ config DRM_I2C_ADV7511_AUDIO
config DRM_I2C_ADV7511_CEC
bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
- select CEC_CORE
+ select DRM_DISPLAY_HDMI_CEC_HELPER
default y
help
When selected the HDMI transmitter will support the CEC feature.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index ec0b7f3d889c..8be7266fd4f4 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -195,13 +195,14 @@
#define ADV7511_I2S_IEC958_DIRECT 3
#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
-#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_SPD(x) ADV7511_PACKET(0, x)
#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
-#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+#define ADV7511_PACKET_SPARE1(x) ADV7511_PACKET(6, x)
+#define ADV7511_PACKET_SPARE2(x) ADV7511_PACKET(7, x)
#define ADV7511_REG_CEC_TX_FRAME_HDR 0x00
#define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01
@@ -313,16 +314,11 @@ enum adv7511_csc_scaling {
* @csc_enable: Whether to enable color space conversion
* @csc_scaling_factor: Color space conversion scaling factor
* @csc_coefficents: Color space conversion coefficents
- * @hdmi_mode: Whether to use HDMI or DVI output mode
- * @avi_infoframe: HDMI infoframe
*/
struct adv7511_video_config {
bool csc_enable;
enum adv7511_csc_scaling csc_scaling_factor;
const uint16_t *csc_coefficents;
-
- bool hdmi_mode;
- struct hdmi_avi_infoframe avi_infoframe;
};
enum adv7511_type {
@@ -337,6 +333,7 @@ struct adv7511_chip_info {
enum adv7511_type type;
unsigned int max_mode_clock_khz;
unsigned int max_lane_freq_khz;
+ const char *name;
const char * const *supply_names;
unsigned int num_supplies;
unsigned int reg_cec_offset;
@@ -352,6 +349,7 @@ struct adv7511 {
struct i2c_client *i2c_cec;
struct regmap *regmap;
+ struct regmap *regmap_packet;
struct regmap *regmap_cec;
enum drm_connector_status status;
bool powered;
@@ -371,7 +369,7 @@ struct adv7511 {
struct work_struct hpd_work;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_connector *cec_connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -389,9 +387,7 @@ struct adv7511 {
bool use_timing_gen;
const struct adv7511_chip_info *info;
- struct platform_device *audio_pdev;
- struct cec_adapter *cec_adap;
u8 cec_addr[ADV7511_MAX_ADDRS];
u8 cec_valid_addrs;
bool cec_enabled_adap;
@@ -399,20 +395,29 @@ struct adv7511 {
u32 cec_clk_freq;
};
+static inline struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
+int adv7511_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable);
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr);
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
-static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
-{
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
- ADV7511_CEC_CTRL_POWER_DOWN);
- return 0;
-}
+#define adv7511_cec_init NULL
+#define adv7511_cec_enable NULL
+#define adv7511_cec_log_addr NULL
+#define adv7511_cec_transmit NULL
#endif
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv);
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
@@ -421,16 +426,18 @@ int adv7533_attach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
-void adv7511_audio_exit(struct adv7511 *adv7511);
+int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
-static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- return 0;
-}
-static inline void adv7511_audio_exit(struct adv7511 *adv7511)
-{
-}
+#define adv7511_hdmi_audio_startup NULL
+#define adv7511_hdmi_audio_shutdown NULL
+#define adv7511_hdmi_audio_prepare NULL
#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 51fb9a574b4e..87e7e820810a 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -12,6 +12,8 @@
#include <sound/soc.h>
#include <linux/of_graph.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+
#include "adv7511.h"
static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
@@ -55,11 +57,12 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511)
return 0;
}
-static int adv7511_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
+int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int audio_source, i2s_format = 0;
unsigned int invert_clock;
unsigned int rate;
@@ -153,14 +156,15 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
ADV7511_AUDIO_CFG3_LEN_MASK, len);
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
- regmap_write(adv7511->regmap, 0x73, 0x1);
- return 0;
+ return drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &hparms->cea);
}
-static int audio_startup(struct device *dev, void *data)
+int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
@@ -177,15 +181,10 @@ static int audio_startup(struct device *dev, void *data)
/* not copyrighted */
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1,
BIT(5), BIT(5));
- /* enable audio infoframes */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
- BIT(3), BIT(3));
/* AV mute disable */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
- /* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
- BIT(5), 0);
+
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
@@ -194,64 +193,14 @@ static int audio_startup(struct device *dev, void *data)
return 0;
}
-static void audio_shutdown(struct device *dev, void *data)
+void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
-}
-
-static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint,
- void *data)
-{
- struct of_endpoint of_ep;
- int ret;
-
- ret = of_graph_parse_endpoint(endpoint, &of_ep);
- if (ret < 0)
- return ret;
-
- /*
- * HDMI sound should be located as reg = <2>
- * Then, it is sound port 0
- */
- if (of_ep.port == 2)
- return 0;
- return -EINVAL;
-}
-
-static const struct hdmi_codec_ops adv7511_codec_ops = {
- .hw_params = adv7511_hdmi_hw_params,
- .audio_shutdown = audio_shutdown,
- .audio_startup = audio_startup,
- .get_dai_id = adv7511_hdmi_i2s_get_dai_id,
-};
-
-static const struct hdmi_codec_pdata codec_data = {
- .ops = &adv7511_codec_ops,
- .max_i2s_channels = 2,
- .i2s = 1,
- .spdif = 1,
-};
-
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- adv7511->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
-}
-
-void adv7511_audio_exit(struct adv7511 *adv7511)
-{
- if (adv7511->audio_pdev) {
- platform_device_unregister(adv7511->audio_pdev);
- adv7511->audio_pdev = NULL;
- }
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 2e9c88a2b5ed..8ecbc25dc647 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -12,6 +12,8 @@
#include <media/cec.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
#include "adv7511.h"
static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
@@ -44,8 +46,8 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
return;
if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
- cec_transmit_attempt_done(adv7511->cec_adap,
- CEC_TX_STATUS_ARB_LOST);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_ARB_LOST);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
@@ -72,12 +74,14 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
if (low_drive_cnt)
status |= CEC_TX_STATUS_LOW_DRIVE;
}
- cec_transmit_done(adv7511->cec_adap, status,
- 0, nack_cnt, low_drive_cnt, err_cnt);
+ drm_connector_hdmi_cec_transmit_done(adv7511->cec_connector, status,
+ 0, nack_cnt, low_drive_cnt,
+ err_cnt);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
- cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_OK);
return;
}
}
@@ -116,7 +120,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, BIT(rx_buf), 0);
- cec_received_msg(adv7511->cec_adap, &msg);
+ drm_connector_hdmi_cec_received_msg(adv7511->cec_connector, &msg);
}
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
@@ -179,9 +183,9 @@ int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
return IRQ_HANDLED;
}
-static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
if (adv7511->i2c_cec == NULL)
@@ -225,9 +229,9 @@ static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
return 0;
}
-static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
unsigned int i, free_idx = ADV7511_MAX_ADDRS;
@@ -293,10 +297,10 @@ static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
return 0;
}
-static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
u8 len = msg->len;
unsigned int i;
@@ -328,12 +332,6 @@ static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
return 0;
}
-static const struct cec_adap_ops adv7511_cec_adap_ops = {
- .adap_enable = adv7511_cec_adap_enable,
- .adap_log_addr = adv7511_cec_adap_log_addr,
- .adap_transmit = adv7511_cec_adap_transmit,
-};
-
static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
{
adv7511->cec_clk = devm_clk_get(dev, "cec");
@@ -348,20 +346,18 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
return 0;
}
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+int adv7511_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+ struct device *dev = &adv7511->i2c_main->dev;
unsigned int offset = adv7511->info->reg_cec_offset;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
goto err_cec_parse_dt;
- adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
- adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
- if (IS_ERR(adv7511->cec_adap)) {
- ret = PTR_ERR(adv7511->cec_adap);
- goto err_cec_alloc;
- }
+ adv7511->cec_connector = connector;
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
@@ -378,17 +374,8 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
ADV7511_REG_CEC_CLK_DIV + offset,
((adv7511->cec_clk_freq / 750000) - 1) << 2);
- ret = cec_register_adapter(adv7511->cec_adap, dev);
- if (ret)
- goto err_cec_register;
return 0;
-err_cec_register:
- cec_delete_adapter(adv7511->cec_adap);
- adv7511->cec_adap = NULL;
-err_cec_alloc:
- dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
- ret);
err_cec_parse_dt:
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index eb5919b38263..b9be86541307 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -12,14 +12,17 @@
#include <linux/of.h>
#include <linux/slab.h>
-#include <media/cec.h>
+#include <sound/pcm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "adv7511.h"
@@ -129,6 +132,13 @@ static const struct regmap_config adv7511_regmap_config = {
.volatile_reg = adv7511_register_volatile,
};
+static const struct regmap_config adv7511_packet_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+};
+
/* -----------------------------------------------------------------------------
* Hardware configuration
*/
@@ -203,62 +213,37 @@ static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
static void adv7511_set_config_csc(struct adv7511 *adv7511,
struct drm_connector *connector,
- bool rgb, bool hdmi_mode)
+ bool rgb)
{
struct adv7511_video_config config;
bool output_format_422, output_format_ycbcr;
unsigned int mode;
- uint8_t infoframe[17];
-
- config.hdmi_mode = hdmi_mode;
-
- hdmi_avi_infoframe_init(&config.avi_infoframe);
-
- config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
if (rgb) {
config.csc_enable = false;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ output_format_422 = false;
+ output_format_ycbcr = false;
} else {
config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
if ((connector->display_info.color_formats &
DRM_COLOR_FORMAT_YCBCR422) &&
- config.hdmi_mode) {
+ connector->display_info.is_hdmi) {
config.csc_enable = false;
- config.avi_infoframe.colorspace =
- HDMI_COLORSPACE_YUV422;
- } else {
- config.csc_enable = true;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
- }
- }
-
- if (config.hdmi_mode) {
- mode = ADV7511_HDMI_CFG_MODE_HDMI;
-
- switch (config.avi_infoframe.colorspace) {
- case HDMI_COLORSPACE_YUV444:
- output_format_422 = false;
- output_format_ycbcr = true;
- break;
- case HDMI_COLORSPACE_YUV422:
output_format_422 = true;
output_format_ycbcr = true;
- break;
- default:
+ } else {
+ config.csc_enable = true;
output_format_422 = false;
output_format_ycbcr = false;
- break;
}
- } else {
- mode = ADV7511_HDMI_CFG_MODE_DVI;
- output_format_422 = false;
- output_format_ycbcr = false;
}
- adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ if (connector->display_info.is_hdmi)
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+ else
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
adv7511_set_colormap(adv7511, config.csc_enable,
config.csc_coefficents,
@@ -269,15 +254,6 @@ static void adv7511_set_config_csc(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
ADV7511_HDMI_CFG_MODE_MASK, mode);
-
- hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
- sizeof(infoframe));
-
- /* The AVI infoframe id is not configurable */
- regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
- infoframe + 1, sizeof(infoframe) - 1);
-
- adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
}
static void adv7511_set_link_config(struct adv7511 *adv7511,
@@ -446,22 +422,16 @@ static void adv7511_hpd_work(struct work_struct *work)
* restore its state.
*/
if (status == connector_status_connected &&
- adv7511->connector.status == connector_status_disconnected &&
+ adv7511->status == connector_status_disconnected &&
adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
}
- if (adv7511->connector.status != status) {
- adv7511->connector.status = status;
+ if (adv7511->status != status) {
+ adv7511->status = status;
- if (adv7511->connector.dev) {
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv7511->cec_adap);
- drm_kms_helper_hotplug_event(adv7511->connector.dev);
- } else {
- drm_bridge_hpd_notify(&adv7511->bridge, status);
- }
+ drm_bridge_hpd_notify(&adv7511->bridge, status);
}
}
@@ -636,45 +606,11 @@ static const struct drm_edid *adv7511_edid_read(struct adv7511 *adv7511,
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- if (drm_edid) {
- /*
- * FIXME: The CEC physical address should be set using
- * cec_s_phys_addr(adap,
- * connector->display_info.source_physical_address, false) from
- * a path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
- drm_detect_hdmi_monitor(edid));
-
- cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
- } else {
- cec_s_phys_addr_from_edid(adv7511->cec_adap, NULL);
- }
-
return drm_edid;
}
-static int adv7511_get_modes(struct adv7511 *adv7511,
- struct drm_connector *connector)
-{
- const struct drm_edid *drm_edid;
- unsigned int count;
-
- drm_edid = adv7511_edid_read(adv7511, connector);
-
- drm_edid_connector_update(connector, drm_edid);
- count = drm_edid_connector_add_modes(connector);
-
- drm_edid_free(drm_edid);
-
- return count;
-}
-
static enum drm_connector_status
-adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+adv7511_detect(struct adv7511 *adv7511)
{
enum drm_connector_status status;
unsigned int val;
@@ -699,8 +635,6 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- if (connector)
- adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -719,17 +653,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
return status;
}
-static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
- const struct drm_display_mode *mode)
-{
- if (mode->clock > 165000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
static void adv7511_mode_set(struct adv7511 *adv7511,
- const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
unsigned int low_refresh_rate;
@@ -800,11 +724,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (drm_mode_vrefresh(mode) <= 24)
+ if (drm_mode_vrefresh(adj_mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (drm_mode_vrefresh(mode) <= 25)
+ else if (drm_mode_vrefresh(adj_mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (drm_mode_vrefresh(mode) <= 30)
+ else if (drm_mode_vrefresh(adj_mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
@@ -821,82 +745,30 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
drm_mode_copy(&adv7511->curr_mode, adj_mode);
+ /* Update horizontal/vertical porch params */
+ if (adv7511->info->has_dsi && adv7511->use_timing_gen)
+ adv7533_dsi_config_timing_gen(adv7511);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
*/
- adv7511->f_tmds = mode->clock;
-}
-
-/* -----------------------------------------------------------------------------
- * DRM Connector Operations
- */
-
-static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
-{
- return container_of(connector, struct adv7511, connector);
+ adv7511->f_tmds = adj_mode->clock;
}
-static int adv7511_connector_get_modes(struct drm_connector *connector)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_get_modes(adv, connector);
-}
-
-static enum drm_mode_status
-adv7511_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_mode_valid(adv, mode);
-}
-
-static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
- .get_modes = adv7511_connector_get_modes,
- .mode_valid = adv7511_connector_mode_valid,
-};
-
-static enum drm_connector_status
-adv7511_connector_detect(struct drm_connector *connector, bool force)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_detect(adv, connector);
-}
-
-static const struct drm_connector_funcs adv7511_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = adv7511_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int adv7511_connector_init(struct adv7511 *adv)
{
struct drm_bridge *bridge = &adv->bridge;
- int ret;
+ struct drm_connector *connector;
- if (adv->i2c_main->irq)
- adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
- else
- adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
-
- ret = drm_connector_init(bridge->dev, &adv->connector,
- &adv7511_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0) {
+ connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ if (IS_ERR(connector)) {
DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
+ return PTR_ERR(connector);
}
- drm_connector_helper_add(&adv->connector,
- &adv7511_connector_helper_funcs);
- drm_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ drm_connector_attach_encoder(connector, bridge->encoder);
return 0;
}
@@ -905,32 +777,59 @@ static int adv7511_connector_init(struct adv7511 *adv)
* DRM Bridge Operations
*/
-static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+static const struct adv7511 *bridge_to_adv7511_const(const struct drm_bridge *bridge)
{
return container_of(bridge, struct adv7511, bridge);
}
-static void adv7511_bridge_enable(struct drm_bridge *bridge)
+static void adv7511_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
adv7511_power_on(adv);
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ adv7511_set_config_csc(adv, connector, adv->rgb);
+
+ adv7511_mode_set(adv, &crtc_state->adjusted_mode);
+
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
}
-static void adv7511_bridge_disable(struct drm_bridge *bridge)
+static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
adv7511_power_off(adv);
}
-static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj_mode)
+static enum drm_mode_status
+adv7511_bridge_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ const struct adv7511 *adv = bridge_to_adv7511_const(bridge);
+
+ if (tmds_rate > 1000ULL * adv->info->max_mode_clock_khz)
+ return MODE_CLOCK_HIGH;
- adv7511_mode_set(adv, mode, adj_mode);
+ return MODE_OK;
}
static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
@@ -939,20 +838,21 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- if (adv->info->has_dsi)
- return adv7533_mode_valid(adv, mode);
- else
- return adv7511_mode_valid(adv, mode);
+ if (!adv->info->has_dsi)
+ return MODE_OK;
+
+ return adv7533_mode_valid(adv, mode);
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret = 0;
if (adv->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, adv->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
@@ -971,11 +871,12 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
return ret;
}
-static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+adv7511_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- return adv7511_detect(adv, NULL);
+ return adv7511_detect(adv);
}
static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge,
@@ -986,24 +887,116 @@ static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge
return adv7511_edid_read(adv, connector);
}
-static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
- enum drm_connector_status status)
+static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ break;
+ case HDMI_INFOFRAME_TYPE_AVI:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
+
+ return 0;
+}
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv->cec_adap);
+static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ /* send current Audio infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+
+ /* The Audio infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ break;
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* send current AVI infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(6), BIT(6));
+
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_LENGTH, 0x2);
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(1), 0x1);
+
+ /* use AVI infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(6), 0);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPD(0),
+ buffer, len);
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPARE1(0),
+ buffer, len);
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
+
+ return 0;
}
static const struct drm_bridge_funcs adv7511_bridge_funcs = {
- .enable = adv7511_bridge_enable,
- .disable = adv7511_bridge_disable,
- .mode_set = adv7511_bridge_mode_set,
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.edid_read = adv7511_bridge_edid_read,
- .hpd_notify = adv7511_bridge_hpd_notify,
+
+ .atomic_enable = adv7511_bridge_atomic_enable,
+ .atomic_disable = adv7511_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+
+ .hdmi_tmds_char_rate_valid = adv7511_bridge_hdmi_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = adv7511_bridge_hdmi_clear_infoframe,
+ .hdmi_write_infoframe = adv7511_bridge_hdmi_write_infoframe,
+
+ .hdmi_audio_startup = adv7511_hdmi_audio_startup,
+ .hdmi_audio_prepare = adv7511_hdmi_audio_prepare,
+ .hdmi_audio_shutdown = adv7511_hdmi_audio_shutdown,
+
+ .hdmi_cec_init = adv7511_cec_init,
+ .hdmi_cec_enable = adv7511_cec_enable,
+ .hdmi_cec_log_addr = adv7511_cec_log_addr,
+ .hdmi_cec_transmit = adv7511_cec_transmit,
};
/* -----------------------------------------------------------------------------
@@ -1217,9 +1210,10 @@ static int adv7511_probe(struct i2c_client *i2c)
if (!dev->of_node)
return -EINVAL;
- adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
- if (!adv7511)
- return -ENOMEM;
+ adv7511 = devm_drm_bridge_alloc(dev, struct adv7511, bridge,
+ &adv7511_bridge_funcs);
+ if (IS_ERR(adv7511))
+ return PTR_ERR(adv7511);
adv7511->i2c_main = i2c;
adv7511->powered = false;
@@ -1241,8 +1235,10 @@ static int adv7511_probe(struct i2c_client *i2c)
return ret;
ret = adv7511_init_regulators(adv7511);
- if (ret)
- return dev_err_probe(dev, ret, "failed to init regulators\n");
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init regulators\n");
+ goto err_of_node_put;
+ }
/*
* The power down GPIO is optional. If present, toggle it from active to
@@ -1298,6 +1294,13 @@ static int adv7511_probe(struct i2c_client *i2c)
goto err_i2c_unregister_edid;
}
+ adv7511->regmap_packet = devm_regmap_init_i2c(adv7511->i2c_packet,
+ &adv7511_packet_config);
+ if (IS_ERR(adv7511->regmap_packet)) {
+ ret = PTR_ERR(adv7511->regmap_packet);
+ goto err_i2c_unregister_packet;
+ }
+
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
adv7511->i2c_packet->addr << 1);
@@ -1314,22 +1317,43 @@ static int adv7511_probe(struct i2c_client *i2c)
if (adv7511->info->link_config)
adv7511_set_link_config(adv7511, &link_config);
- ret = adv7511_cec_init(dev, adv7511);
- if (ret)
- goto err_unregister_cec;
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
- adv7511->bridge.funcs = &adv7511_bridge_funcs;
- adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI;
if (adv7511->i2c_main->irq)
adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ adv7511->bridge.vendor = "Analog";
+ adv7511->bridge.product = adv7511->info->name;
+
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_AUDIO;
+ adv7511->bridge.hdmi_audio_dev = dev;
+ adv7511->bridge.hdmi_audio_max_i2s_playback_channels = 2;
+ adv7511->bridge.hdmi_audio_i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
+ adv7511->bridge.hdmi_audio_spdif_playback = 1;
+ adv7511->bridge.hdmi_audio_dai_port = 2;
+#endif
+
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER;
+ adv7511->bridge.hdmi_cec_dev = dev;
+ adv7511->bridge.hdmi_cec_adapter_name = dev_name(dev);
+ adv7511->bridge.hdmi_cec_available_las = ADV7511_MAX_ADDRS;
+#endif
+
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);
- adv7511_audio_init(dev, adv7511);
-
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -1351,10 +1375,7 @@ static int adv7511_probe(struct i2c_client *i2c)
return 0;
err_unregister_audio:
- adv7511_audio_exit(adv7511);
drm_bridge_remove(&adv7511->bridge);
-err_unregister_cec:
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
@@ -1363,6 +1384,8 @@ err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
adv7511_uninit_regulators(adv7511);
+err_of_node_put:
+ of_node_put(adv7511->host_node);
return ret;
}
@@ -1371,13 +1394,12 @@ static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ of_node_put(adv7511->host_node);
+
adv7511_uninit_regulators(adv7511);
drm_bridge_remove(&adv7511->bridge);
- adv7511_audio_exit(adv7511);
-
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
@@ -1387,6 +1409,8 @@ static void adv7511_remove(struct i2c_client *i2c)
static const struct adv7511_chip_info adv7511_chip_info = {
.type = ADV7511,
+ .name = "ADV7511",
+ .max_mode_clock_khz = 165000,
.supply_names = adv7511_supply_names,
.num_supplies = ARRAY_SIZE(adv7511_supply_names),
.link_config = true,
@@ -1394,6 +1418,7 @@ static const struct adv7511_chip_info adv7511_chip_info = {
static const struct adv7511_chip_info adv7533_chip_info = {
.type = ADV7533,
+ .name = "ADV7533",
.max_mode_clock_khz = 80000,
.max_lane_freq_khz = 800000,
.supply_names = adv7533_supply_names,
@@ -1404,6 +1429,7 @@ static const struct adv7511_chip_info adv7533_chip_info = {
static const struct adv7511_chip_info adv7535_chip_info = {
.type = ADV7535,
+ .name = "ADV7535",
.max_mode_clock_khz = 148500,
.max_lane_freq_khz = 891000,
.supply_names = adv7533_supply_names,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 4481489aaf5e..188c1093a66e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -24,7 +24,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x05, 0xc8 },
};
-static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
struct drm_display_mode *mode = &adv->curr_mode;
@@ -67,9 +67,6 @@ void adv7533_dsi_power_on(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
- if (adv->use_timing_gen)
- adv7511_dsi_config_timing_gen(adv);
-
/* set number of dsi lanes */
regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
@@ -106,10 +103,6 @@ enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
struct mipi_dsi_device *dsi = adv->dsi;
u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- /* Check max clock for either 7533 or 7535 */
- if (mode->clock > adv->info->max_mode_clock_khz)
- return MODE_CLOCK_HIGH;
-
/* Check max clock for each lane */
if (mode->clock * bpp > adv->info->max_lane_freq_khz * adv->num_dsi_lanes)
return MODE_CLOCK_HIGH;
@@ -172,7 +165,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
- if (num_lanes < 1 || num_lanes > 4)
+ if (num_lanes < 2 || num_lanes > 4)
return -EINVAL;
adv->num_dsi_lanes = num_lanes;
@@ -181,8 +174,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
if (!adv->host_node)
return -ENODEV;
- of_node_put(adv->host_node);
-
adv->use_timing_gen = !of_property_read_bool(np,
"adi,disable-timing-generator");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 83d711ee3a2e..f3fe47b12edc 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -143,35 +143,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx6345->aux, anx6345->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
@@ -517,6 +489,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
};
static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
@@ -553,7 +526,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx6345->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
@@ -691,9 +664,10 @@ static int anx6345_i2c_probe(struct i2c_client *client)
struct device *dev;
int i, err;
- anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
- if (!anx6345)
- return -ENOMEM;
+ anx6345 = devm_drm_bridge_alloc(&client->dev, struct anx6345, bridge,
+ &anx6345_bridge_funcs);
+ if (IS_ERR(anx6345))
+ return PTR_ERR(anx6345);
mutex_init(&anx6345->lock);
@@ -765,7 +739,6 @@ static int anx6345_i2c_probe(struct i2c_client *client)
/* Look for supported chip ID */
anx6345_poweron(anx6345);
if (anx6345_get_chip_id(anx6345)) {
- anx6345->bridge.funcs = &anx6345_bridge_funcs;
drm_bridge_add(&anx6345->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index f74694bb9c50..ba0fc149a9e7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -656,35 +656,7 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx78xx->aux, anx78xx->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
@@ -888,6 +860,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
};
static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
@@ -924,7 +897,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
@@ -1220,9 +1193,10 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
bool found = false;
int err;
- anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
- if (!anx78xx)
- return -ENOMEM;
+ anx78xx = devm_drm_bridge_alloc(&client->dev, struct anx78xx, bridge,
+ &anx78xx_bridge_funcs);
+ if (IS_ERR(anx78xx))
+ return PTR_ERR(anx78xx);
pdata = &anx78xx->pdata;
@@ -1333,8 +1307,6 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
goto err_poweroff;
}
- anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
-
drm_bridge_add(&anx78xx->bridge);
/* If cable is pulled out, just poweroff and wait for HPD event */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
index b1e482994ffe..e8662168717d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -5,6 +5,8 @@
* Based on anx7808 driver obtained from chromeos with copyright:
* Copyright(c) 2013, Google Inc.
*/
+
+#include <linux/export.h>
#include <linux/regmap.h>
#include <drm/display/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index bfa88409a7ff..efe534977d12 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -838,10 +839,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
int ret;
/* Keep the panel disabled while we configure video */
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel))
- DRM_ERROR("failed to disable the panel\n");
- }
+ drm_panel_disable(dp->plat_data->panel);
ret = analogix_dp_train_link(dp);
if (ret) {
@@ -863,13 +861,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
}
/* Safe to enable the panel now */
- if (dp->plat_data->panel) {
- ret = drm_panel_enable(dp->plat_data->panel);
- if (ret) {
- DRM_ERROR("failed to enable the panel\n");
- return ret;
- }
- }
+ drm_panel_enable(dp->plat_data->panel);
/* Check whether panel supports fast training */
ret = analogix_dp_fast_link_train_detection(dp);
@@ -955,67 +947,15 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
-/*
- * This function is a bit of a catch-all for panel preparation, hopefully
- * simplifying the logic of functions that need to prepare/unprepare the panel
- * below.
- *
- * If @prepare is true, this function will prepare the panel. Conversely, if it
- * is false, the panel will be unprepared.
- *
- * If @is_modeset_prepare is true, the function will disregard the current state
- * of the panel and either prepare/unprepare the panel based on @prepare. Once
- * it finishes, it will update dp->panel_is_modeset to reflect the current state
- * of the panel.
- */
-static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
- bool prepare, bool is_modeset_prepare)
-{
- int ret = 0;
-
- if (!dp->plat_data->panel)
- return 0;
-
- mutex_lock(&dp->panel_lock);
-
- /*
- * Exit early if this is a temporary prepare/unprepare and we're already
- * modeset (since we neither want to prepare twice or unprepare early).
- */
- if (dp->panel_is_modeset && !is_modeset_prepare)
- goto out;
-
- if (prepare)
- ret = drm_panel_prepare(dp->plat_data->panel);
- else
- ret = drm_panel_unprepare(dp->plat_data->panel);
-
- if (ret)
- goto out;
-
- if (is_modeset_prepare)
- dp->panel_is_modeset = prepare;
-
-out:
- mutex_unlock(&dp->panel_lock);
- return ret;
-}
-
static int analogix_dp_get_modes(struct drm_connector *connector)
{
struct analogix_dp_device *dp = to_dp(connector);
const struct drm_edid *drm_edid;
- int ret, num_modes = 0;
+ int num_modes = 0;
if (dp->plat_data->panel) {
num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
} else {
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return 0;
- }
-
drm_edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
drm_edid_connector_update(&dp->connector, drm_edid);
@@ -1024,10 +964,6 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
num_modes += drm_edid_connector_add_modes(&dp->connector);
drm_edid_free(drm_edid);
}
-
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
}
if (dp->plat_data->get_modes)
@@ -1082,24 +1018,13 @@ analogix_dp_detect(struct drm_connector *connector, bool force)
{
struct analogix_dp_device *dp = to_dp(connector);
enum drm_connector_status status = connector_status_disconnected;
- int ret;
if (dp->plat_data->panel)
return connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return connector_status_disconnected;
- }
-
if (!analogix_dp_detect_hpd(dp))
status = connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
-
return status;
}
@@ -1113,10 +1038,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
};
static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_encoder *encoder = dp->encoder;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_connector *connector = NULL;
int ret = 0;
@@ -1197,15 +1122,12 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
return conn_state->crtc;
}
-static void
-analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int ret;
crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
@@ -1216,9 +1138,7 @@ analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
- ret = analogix_dp_prepare_panel(dp, true, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_prepare(dp->plat_data->panel);
}
static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
@@ -1257,12 +1177,10 @@ out_dp_init:
return ret;
}
-static void
-analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
@@ -1299,18 +1217,12 @@ analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
{
- struct analogix_dp_device *dp = bridge->driver_private;
- int ret;
+ struct analogix_dp_device *dp = to_dp(bridge);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel)) {
- DRM_ERROR("failed to disable the panel\n");
- return;
- }
- }
+ drm_panel_disable(dp->plat_data->panel);
disable_irq(dp->irq);
@@ -1318,21 +1230,17 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put_sync(dp->dev);
- ret = analogix_dp_prepare_panel(dp, false, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_unprepare(dp->plat_data->panel);
dp->fast_train_enable = false;
dp->psr_supported = false;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static void
-analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *old_crtc, *new_crtc;
struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -1367,12 +1275,10 @@ out:
analogix_dp_bridge_disable(bridge);
}
-static void
-analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int ret;
@@ -1394,7 +1300,7 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_display_info *display_info = &dp->connector.display_info;
struct video_info *video = &dp->video_info;
struct device_node *dp_node = dp->dev->of_node;
@@ -1479,25 +1385,6 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
.attach = analogix_dp_bridge_attach,
};
-static int analogix_dp_create_bridge(struct drm_device *drm_dev,
- struct analogix_dp_device *dp)
-{
- struct drm_bridge *bridge;
-
- bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- DRM_ERROR("failed to allocate for drm bridge\n");
- return -ENOMEM;
- }
-
- dp->bridge = bridge;
-
- bridge->driver_private = dp;
- bridge->funcs = &analogix_dp_bridge_funcs;
-
- return drm_bridge_attach(dp->encoder, bridge, NULL, 0);
-}
-
static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
{
struct device_node *dp_node = dp->dev->of_node;
@@ -1513,6 +1400,10 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
video_info->max_link_rate = 0x0A;
video_info->max_lane_count = 0x04;
break;
+ case RK3588_EDP:
+ video_info->max_link_rate = 0x14;
+ video_info->max_lane_count = 0x04;
+ break;
case EXYNOS_DP:
/*
* NOTE: those property parseing code is used for
@@ -1548,12 +1439,31 @@ out:
return ret;
}
+static int analogix_dpaux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+ int val;
+ int ret;
+
+ if (dp->force_hpd)
+ return 0;
+
+ pm_runtime_get_sync(dp->dev);
+
+ ret = readx_poll_timeout(analogix_dp_get_plug_in_status, dp, val, !val,
+ wait_us / 100, wait_us);
+
+ pm_runtime_mark_last_busy(dp->dev);
+ pm_runtime_put_autosuspend(dp->dev);
+
+ return ret;
+}
+
struct analogix_dp_device *
analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
struct platform_device *pdev = to_platform_device(dev);
struct analogix_dp_device *dp;
- struct resource *res;
unsigned int irq_flags;
int ret;
@@ -1562,16 +1472,14 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
return ERR_PTR(-EINVAL);
}
- dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL);
- if (!dp)
- return ERR_PTR(-ENOMEM);
+ dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
+ &analogix_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return ERR_CAST(dp);
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
- mutex_init(&dp->panel_lock);
- dp->panel_is_modeset = false;
-
/*
* platform dp driver need containor_of the plat_data to get
* the driver private data, so we need to store the point of
@@ -1605,13 +1513,9 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
return ERR_CAST(dp->clock);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dp->reg_base)) {
- ret = PTR_ERR(dp->reg_base);
- goto err_disable_clk;
- }
+ dp->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dp->reg_base))
+ return ERR_CAST(dp->reg_base);
dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
@@ -1623,8 +1527,7 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
if (IS_ERR(dp->hpd_gpiod)) {
dev_err(dev, "error getting HDP GPIO: %ld\n",
PTR_ERR(dp->hpd_gpiod));
- ret = PTR_ERR(dp->hpd_gpiod);
- goto err_disable_clk;
+ return ERR_CAST(dp->hpd_gpiod);
}
if (dp->hpd_gpiod) {
@@ -1636,16 +1539,15 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
* that we can get the current state of the GPIO.
*/
dp->irq = gpiod_to_irq(dp->hpd_gpiod);
- irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN;
} else {
dp->irq = platform_get_irq(pdev, 0);
- irq_flags = 0;
+ irq_flags = IRQF_NO_AUTOEN;
}
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
- ret = -ENODEV;
- goto err_disable_clk;
+ return ERR_PTR(-ENODEV);
}
ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1654,15 +1556,22 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_disable_clk;
+ return ERR_PTR(ret);
}
- disable_irq(dp->irq);
- return dp;
+ dp->aux.name = "DP-AUX";
+ dp->aux.transfer = analogix_dpaux_transfer;
+ dp->aux.wait_hpd_asserted = analogix_dpaux_wait_hpd_asserted;
+ dp->aux.dev = dp->dev;
+ drm_dp_aux_init(&dp->aux);
-err_disable_clk:
- clk_disable_unprepare(dp->clock);
- return ERR_PTR(ret);
+ pm_runtime_use_autosuspend(dp->dev);
+ pm_runtime_set_autosuspend_delay(dp->dev, 100);
+ ret = devm_pm_runtime_enable(dp->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dp;
}
EXPORT_SYMBOL_GPL(analogix_dp_probe);
@@ -1692,6 +1601,7 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
if (dp->plat_data->power_on)
dp->plat_data->power_on(dp->plat_data);
+ phy_set_mode(dp->phy, PHY_MODE_DP);
phy_power_on(dp->phy);
analogix_dp_init_dp(dp);
@@ -1707,28 +1617,15 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_use_autosuspend(dp->dev);
- pm_runtime_set_autosuspend_delay(dp->dev, 100);
- pm_runtime_enable(dp->dev);
- } else {
- ret = analogix_dp_resume(dp);
- if (ret)
- return ret;
- }
-
- dp->aux.name = "DP-AUX";
- dp->aux.transfer = analogix_dpaux_transfer;
- dp->aux.dev = dp->dev;
dp->aux.drm_dev = drm_dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret) {
DRM_ERROR("failed to register AUX (%d)\n", ret);
- goto err_disable_pm_runtime;
+ return ret;
}
- ret = analogix_dp_create_bridge(drm_dev, dp);
+ ret = drm_bridge_attach(dp->encoder, &dp->bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to create bridge (%d)\n", ret);
goto err_unregister_aux;
@@ -1738,13 +1635,6 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
err_unregister_aux:
drm_dp_aux_unregister(&dp->aux);
-err_disable_pm_runtime:
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
return ret;
}
@@ -1752,22 +1642,12 @@ EXPORT_SYMBOL_GPL(analogix_dp_bind);
void analogix_dp_unbind(struct analogix_dp_device *dp)
{
- analogix_dp_bridge_disable(dp->bridge);
+ analogix_dp_bridge_disable(&dp->bridge);
dp->connector.funcs->destroy(&dp->connector);
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
+ drm_panel_unprepare(dp->plat_data->panel);
drm_dp_aux_unregister(&dp->aux);
-
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
@@ -1793,6 +1673,20 @@ int analogix_dp_stop_crc(struct drm_connector *connector)
}
EXPORT_SYMBOL_GPL(analogix_dp_stop_crc);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+
+ return dp->plat_data;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_aux_to_plat_data);
+
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp)
+{
+ return &dp->aux;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_get_aux);
+
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Analogix DP Core Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 774d11574b09..b86e93f30ed6 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -11,6 +11,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_bridge.h>
#define DP_TIMEOUT_LOOP_COUNT 100
#define MAX_CR_LOOP 5
@@ -154,7 +155,7 @@ struct analogix_dp_device {
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
struct drm_dp_aux aux;
struct clk *clock;
unsigned int irq;
@@ -169,9 +170,6 @@ struct analogix_dp_device {
bool fast_train_enable;
bool psr_supported;
- struct mutex panel_lock;
- bool panel_is_modeset;
-
struct analogix_dp_plat_data *plat_data;
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 3afc73c858c4..38fd8d5014d2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <drm/bridge/analogix_dp.h>
@@ -513,10 +514,24 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
{
u32 reg;
+ int ret;
reg = bwtype;
if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.link_rate =
+ drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100;
+ phy_cfg.dp.set_rate = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
@@ -530,9 +545,22 @@ void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
{
u32 reg;
+ int ret;
reg = count;
writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.lanes = dp->link_train.lane_count;
+ phy_cfg.dp.set_lanes = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
@@ -546,10 +574,34 @@ void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp)
{
u8 lane;
+ int ret;
for (lane = 0; lane < dp->link_train.lane_count; lane++)
writel(dp->link_train.training_lane[lane],
dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ for (lane = 0; lane < dp->link_train.lane_count; lane++) {
+ u8 training_lane = dp->link_train.training_lane[lane];
+ u8 vs, pe;
+
+ vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ phy_cfg.dp.voltage[lane] = vs;
+ phy_cfg.dp.pre[lane] = pe;
+ }
+
+ phy_cfg.dp.set_voltages = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane)
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 4be34d5c7a3b..6f3fdcb6afdb 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1257,10 +1257,10 @@ static void anx7625_power_on(struct anx7625_data *ctx)
usleep_range(11000, 12000);
/* Power on pin enable */
- gpiod_set_value(ctx->pdata.gpio_p_on, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 1);
usleep_range(10000, 11000);
/* Power reset pin enable */
- gpiod_set_value(ctx->pdata.gpio_reset, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 1);
usleep_range(10000, 11000);
DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
@@ -1280,9 +1280,9 @@ static void anx7625_power_standby(struct anx7625_data *ctx)
return;
}
- gpiod_set_value(ctx->pdata.gpio_reset, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 0);
usleep_range(1000, 1100);
- gpiod_set_value(ctx->pdata.gpio_p_on, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 0);
usleep_range(1000, 1100);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->pdata.supplies),
@@ -1814,9 +1814,6 @@ static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n");
- if (ctx->pdata.panel_bridge)
- return connector_status_connected;
-
return ctx->hpd_status ? connector_status_connected :
connector_status_disconnected;
}
@@ -2141,6 +2138,7 @@ static void hdcp_check_work_func(struct work_struct *work)
}
static int anx7625_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
@@ -2159,7 +2157,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
}
if (ctx->pdata.panel_bridge) {
- err = drm_bridge_attach(bridge->encoder,
+ err = drm_bridge_attach(encoder,
ctx->pdata.panel_bridge,
&ctx->bridge, flags);
if (err)
@@ -2380,7 +2378,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
}
static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *state)
+ struct drm_atomic_state *state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
@@ -2389,7 +2387,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
dev_dbg(dev, "drm atomic enable\n");
- connector = drm_atomic_get_new_connector_for_encoder(state->base.state,
+ connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (!connector)
return;
@@ -2401,7 +2399,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
anx7625_dp_start(ctx);
- conn_state = drm_atomic_get_new_connector_state(state->base.state, connector);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
@@ -2419,7 +2417,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old)
+ struct drm_atomic_state *state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
@@ -2450,7 +2448,7 @@ anx7625_audio_update_connector_status(struct anx7625_data *ctx,
enum drm_connector_status status);
static enum drm_connector_status
-anx7625_bridge_detect(struct drm_bridge *bridge)
+anx7625_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
@@ -2474,6 +2472,22 @@ static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge
return anx7625_edid_read(ctx);
}
+static void anx7625_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_get_sync(dev);
+}
+
+static void anx7625_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_put_sync(dev);
+}
+
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.attach = anx7625_bridge_attach,
.detach = anx7625_bridge_detach,
@@ -2487,6 +2501,8 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.detect = anx7625_bridge_detect,
.edid_read = anx7625_bridge_edid_read,
+ .hpd_enable = anx7625_bridge_hpd_enable,
+ .hpd_disable = anx7625_bridge_hpd_disable,
};
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
@@ -2568,12 +2584,6 @@ static const struct dev_pm_ops anx7625_pm_ops = {
anx7625_runtime_pm_resume, NULL)
};
-static void anx7625_runtime_disable(void *data)
-{
- pm_runtime_dont_use_autosuspend(data);
- pm_runtime_disable(data);
-}
-
static int anx7625_link_bridge(struct drm_dp_aux *aux)
{
struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux);
@@ -2586,16 +2596,15 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
return ret;
}
- platform->bridge.funcs = &anx7625_bridge_funcs;
platform->bridge.of_node = dev->of_node;
if (!anx7625_of_panel_on_aux_bus(dev))
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
- if (!platform->pdata.panel_bridge)
- platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
- DRM_BRIDGE_OP_DETECT;
+ if (!platform->pdata.panel_bridge || !anx7625_of_panel_on_aux_bus(dev))
+ platform->bridge.ops |= DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT;
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
+ platform->bridge.support_hdcp = true;
drm_bridge_add(&platform->bridge);
@@ -2621,10 +2630,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
return -ENODEV;
}
- platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
- if (!platform) {
+ platform = devm_drm_bridge_alloc(dev, struct anx7625_data, bridge, &anx7625_bridge_funcs);
+ if (IS_ERR(platform)) {
DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
- return -ENOMEM;
+ return PTR_ERR(platform);
}
pdata = &platform->pdata;
@@ -2669,7 +2678,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@@ -2707,11 +2716,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
goto free_wq;
}
- pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, true);
- ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
goto free_wq;
@@ -2739,8 +2747,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
/* Add work function */
- if (platform->pdata.intp_irq)
+ if (platform->pdata.intp_irq) {
+ enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
+ }
if (platform->pdata.audio_en)
anx7625_register_audio(dev, platform);
@@ -2771,7 +2781,6 @@ static void anx7625_i2c_remove(struct i2c_client *client)
if (platform->hdcp_workqueue) {
cancel_delayed_work(&platform->hdcp_work);
- flush_workqueue(platform->hdcp_workqueue);
destroy_workqueue(platform->hdcp_workqueue);
}
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index 015983c015e5..b3e4cdff61d6 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -17,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
@@ -64,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent)
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
return ret;
@@ -86,6 +89,7 @@ struct drm_aux_bridge_data {
};
static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct drm_aux_bridge_data *data;
@@ -95,7 +99,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge,
data = container_of(bridge, struct drm_aux_bridge_data, bridge);
- return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ return drm_bridge_attach(encoder, data->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -108,9 +112,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev, struct drm_aux_bridge_data,
+ bridge, &drm_aux_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
@@ -118,7 +123,6 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
"failed to acquire drm_bridge\n");
- data->bridge.funcs = &drm_aux_bridge_funcs;
data->bridge.of_node = data->dev->of_node;
/* passthrough data, allow everything */
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index 48f297c78ee6..2e9c702c7087 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -64,10 +65,11 @@ struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, str
adev->id = ret;
adev->name = "dp_hpd_bridge";
adev->dev.parent = parent;
- adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_hpd_bridge_release;
adev->dev.platform_data = of_node_get(np);
+ device_set_of_node_from_dev(&adev->dev, parent);
+
ret = auxiliary_device_init(adev);
if (ret) {
of_node_put(adev->dev.platform_data);
@@ -156,6 +158,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta
EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
@@ -170,12 +173,13 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_hpd_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev,
+ struct drm_aux_hpd_bridge_data, bridge,
+ &drm_aux_hpd_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
- data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
data->bridge.of_node = dev_get_platdata(data->dev);
data->bridge.ops = DRM_BRIDGE_OP_HPD;
data->bridge.type = id->driver_data;
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index cced81633ddc..f1d8a8a151d8 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -6,6 +6,7 @@ config DRM_CDNS_DSI
select DRM_PANEL_BRIDGE
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
+ select VIDEOMODE_HELPERS
depends on OF
help
Support Cadence DPI to DSI bridge. This is an internal
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index c7a0247e06ad..09b289f0fcbf 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -9,6 +9,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <video/mipi_display.h>
+#include <video/videomode.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
@@ -417,7 +418,8 @@
#define DSI_OUTPUT_PORT 0
#define DSI_INPUT_PORT(inputid) (1 + (inputid))
-#define DSI_HBP_FRAME_OVERHEAD 12
+#define DSI_HBP_FRAME_PULSE_OVERHEAD 12
+#define DSI_HBP_FRAME_EVENT_OVERHEAD 16
#define DSI_HSA_FRAME_OVERHEAD 14
#define DSI_HFP_FRAME_OVERHEAD 6
#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
@@ -425,6 +427,17 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
+struct cdns_dsi_bridge_state {
+ struct drm_bridge_state base;
+ struct cdns_dsi_cfg dsi_cfg;
+};
+
+static inline struct cdns_dsi_bridge_state *
+to_cdns_dsi_bridge_state(struct drm_bridge_state *bridge_state)
+{
+ return container_of(bridge_state, struct cdns_dsi_bridge_state, base);
+}
+
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
return container_of(input, struct cdns_dsi, input);
@@ -441,15 +454,6 @@ bridge_to_cdns_dsi_input(struct drm_bridge *bridge)
return container_of(bridge, struct cdns_dsi_input, bridge);
}
-static unsigned int mode_to_dpi_hfp(const struct drm_display_mode *mode,
- bool mode_valid_check)
-{
- if (mode_valid_check)
- return mode->hsync_start - mode->hdisplay;
-
- return mode->crtc_hsync_start - mode->crtc_hdisplay;
-}
-
static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
unsigned int dpi_bpp,
unsigned int dsi_pkt_overhead)
@@ -465,120 +469,70 @@ static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
}
static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
- const struct drm_display_mode *mode,
- struct cdns_dsi_cfg *dsi_cfg,
- bool mode_valid_check)
+ const struct videomode *vm,
+ struct cdns_dsi_cfg *dsi_cfg)
{
struct cdns_dsi_output *output = &dsi->output;
- unsigned int tmp;
- bool sync_pulse = false;
+ u32 dpi_hsa, dpi_hbp, dpi_hfp, dpi_hact;
+ bool sync_pulse;
int bpp;
+ dpi_hsa = vm->hsync_len;
+ dpi_hbp = vm->hback_porch;
+ dpi_hfp = vm->hfront_porch;
+ dpi_hact = vm->hactive;
+
memset(dsi_cfg, 0, sizeof(*dsi_cfg));
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- sync_pulse = true;
+ sync_pulse = output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
- if (mode_valid_check)
- tmp = mode->htotal -
- (sync_pulse ? mode->hsync_end : mode->hsync_start);
- else
- tmp = mode->crtc_htotal -
- (sync_pulse ?
- mode->crtc_hsync_end : mode->crtc_hsync_start);
-
- dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD);
-
if (sync_pulse) {
- if (mode_valid_check)
- tmp = mode->hsync_end - mode->hsync_start;
- else
- tmp = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp, bpp,
+ DSI_HBP_FRAME_PULSE_OVERHEAD);
- dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp,
+ dsi_cfg->hsa = dpi_to_dsi_timing(dpi_hsa, bpp,
DSI_HSA_FRAME_OVERHEAD);
- }
-
- dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ?
- mode->hdisplay : mode->crtc_hdisplay,
- bpp, 0);
- dsi_cfg->hfp = dpi_to_dsi_timing(mode_to_dpi_hfp(mode, mode_valid_check),
- bpp, DSI_HFP_FRAME_OVERHEAD);
-
- return 0;
-}
-
-static int cdns_dsi_adjust_phy_config(struct cdns_dsi *dsi,
- struct cdns_dsi_cfg *dsi_cfg,
- struct phy_configure_opts_mipi_dphy *phy_cfg,
- const struct drm_display_mode *mode,
- bool mode_valid_check)
-{
- struct cdns_dsi_output *output = &dsi->output;
- unsigned long long dlane_bps;
- unsigned long adj_dsi_htotal;
- unsigned long dsi_htotal;
- unsigned long dpi_htotal;
- unsigned long dpi_hz;
- unsigned int dsi_hfp_ext;
- unsigned int lanes = output->dev->lanes;
-
- dsi_htotal = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
-
- dsi_htotal += dsi_cfg->hact;
- dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
+ } else {
+ dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp + dpi_hsa, bpp,
+ DSI_HBP_FRAME_EVENT_OVERHEAD);
- /*
- * Make sure DSI htotal is aligned on a lane boundary when calculating
- * the expected data rate. This is done by extending HFP in case of
- * misalignment.
- */
- adj_dsi_htotal = dsi_htotal;
- if (dsi_htotal % lanes)
- adj_dsi_htotal += lanes - (dsi_htotal % lanes);
+ dsi_cfg->hsa = 0;
+ }
- dpi_hz = (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000;
- dlane_bps = (unsigned long long)dpi_hz * adj_dsi_htotal;
+ dsi_cfg->hact = dpi_to_dsi_timing(dpi_hact, bpp, 0);
- /* data rate in bytes/sec is not an integer, refuse the mode. */
- dpi_htotal = mode_valid_check ? mode->htotal : mode->crtc_htotal;
- if (do_div(dlane_bps, lanes * dpi_htotal))
- return -EINVAL;
+ dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD);
- /* data rate was in bytes/sec, convert to bits/sec. */
- phy_cfg->hs_clk_rate = dlane_bps * 8;
+ dsi_cfg->htotal = dsi_cfg->hact + dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
- dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
- dsi_cfg->hfp += dsi_hfp_ext;
- dsi_cfg->htotal = dsi_htotal + dsi_hfp_ext;
+ if (sync_pulse) {
+ dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_PULSE_OVERHEAD;
+ dsi_cfg->htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+ } else {
+ dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_EVENT_OVERHEAD;
+ }
return 0;
}
static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
- const struct drm_display_mode *mode,
- struct cdns_dsi_cfg *dsi_cfg,
- bool mode_valid_check)
+ const struct videomode *vm,
+ struct cdns_dsi_cfg *dsi_cfg)
{
struct cdns_dsi_output *output = &dsi->output;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
- unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
int ret;
- ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
+ ret = cdns_dsi_mode2cfg(dsi, vm, dsi_cfg);
if (ret)
return ret;
- phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
- mipi_dsi_pixel_format_to_bpp(output->dev->format),
- nlanes, phy_cfg);
-
- ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
+ ret = phy_mipi_dphy_get_default_config(vm->pixelclock,
+ mipi_dsi_pixel_format_to_bpp(output->dev->format),
+ nlanes, phy_cfg);
if (ret)
return ret;
@@ -586,25 +540,11 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
if (ret)
return ret;
- dsi_hss_hsa_hse_hbp = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
-
- /*
- * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO
- * is empty before we start a receiving a new line on the DPI
- * interface.
- */
- if ((u64)phy_cfg->hs_clk_rate *
- mode_to_dpi_hfp(mode, mode_valid_check) * nlanes <
- (u64)dsi_hss_hsa_hse_hbp *
- (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000)
- return -EINVAL;
-
return 0;
}
static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
@@ -617,7 +557,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ return drm_bridge_attach(encoder, output->bridge, bridge,
flags);
}
@@ -629,8 +569,7 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
- struct cdns_dsi_cfg dsi_cfg;
- int bpp, ret;
+ int bpp;
/*
* VFP_DSI should be less than VFP_DPI and VFP_DSI should be at
@@ -648,19 +587,31 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
if ((mode->hdisplay * bpp) % 32)
return MODE_H_ILLEGAL;
- ret = cdns_dsi_check_conf(dsi, mode, &dsi_cfg, true);
- if (ret)
- return MODE_BAD;
-
return MODE_OK;
}
-static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
u32 val;
+ /*
+ * The cdns-dsi controller needs to be disabled after it's DPI source
+ * has stopped streaming. If this is not followed, there is a brief
+ * window before DPI source is disabled and after cdns-dsi controller
+ * has been disabled where the DPI stream is still on, but the cdns-dsi
+ * controller is not ready anymore to accept the incoming signals. This
+ * is one of the reasons why a shift in pixel colors is observed on
+ * displays that have cdns-dsi as one of the bridges.
+ *
+ * To mitigate this, disable this bridge from the bridge post_disable()
+ * hook, instead of the bridge _disable() hook. The bridge post_disable()
+ * hook gets called after the CRTC disable, where often many DPI sources
+ * disable their streams.
+ */
+
val = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN |
DISP_EOT_GEN);
@@ -672,13 +623,10 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
if (dsi->platform_ops && dsi->platform_ops->disable)
dsi->platform_ops->disable(dsi);
- pm_runtime_put(dsi->base.dev);
-}
-
-static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
-{
- struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
- struct cdns_dsi *dsi = input_to_dsi(input);
+ dsi->phy_initialized = false;
+ dsi->link_initialized = false;
+ phy_power_off(dsi->dphy);
+ phy_exit(dsi->dphy);
pm_runtime_put(dsi->base.dev);
}
@@ -752,31 +700,73 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct cdns_dsi_bridge_state *dsi_state;
+ struct drm_bridge_state *new_bridge_state;
struct drm_display_mode *mode;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
+ struct drm_connector *connector;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
- u32 tmp, reg_wakeup, div;
+ u32 tmp, reg_wakeup, div, status;
int nlanes;
+ /*
+ * The cdns-dsi controller needs to be enabled before it's DPI source
+ * has begun streaming. If this is not followed, there is a brief window
+ * after DPI source enable and before cdns-dsi controller enable where
+ * the DPI stream is on, but the cdns-dsi controller is not ready to
+ * accept the incoming signals. This is one of the reasons why a shift
+ * in pixel colors is observed on displays that have cdns-dsi as one of
+ * the bridges.
+ *
+ * To mitigate this, enable this bridge from the bridge pre_enable()
+ * hook, instead of the bridge _enable() hook. The bridge pre_enable()
+ * hook gets called before the CRTC enable, where often many DPI sources
+ * enable their streams.
+ */
+
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ new_bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ if (WARN_ON(!new_bridge_state))
+ return;
+
+ dsi_state = to_cdns_dsi_bridge_state(new_bridge_state);
+ dsi_cfg = dsi_state->dsi_cfg;
+
if (dsi->platform_ops && dsi->platform_ops->enable)
dsi->platform_ops->enable(dsi);
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
-
- cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ cdns_dsi_hs_init(dsi);
+
+ /*
+ * Now that the DSI Link and DSI Phy are initialized,
+ * wait for the CLK and Data Lanes to be ready.
+ */
+ tmp = CLK_LANE_RDY;
+ for (int i = 0; i < nlanes; i++)
+ tmp |= DATA_LANE_RDY(i);
+
+ if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
+ (tmp == (status & tmp)), 100, 500000))
+ dev_err(dsi->base.dev,
+ "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
dsi->regs + VID_HSIZE1);
@@ -812,7 +802,13 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8,
phy_cfg->hs_clk_rate);
- reg_wakeup = (phy_cfg->hs_prepare + phy_cfg->hs_zero) / tx_byte_period;
+
+ /*
+ * Estimated time [in clock cycles] to perform LP->HS on D-PHY.
+ * It is not clear how to calculate this, so for now,
+ * set it to 1/10 of the total number of clocks in a line.
+ */
+ reg_wakeup = dsi_cfg.htotal / nlanes / 10;
writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp),
dsi->regs + VID_DPHY_TIME);
@@ -892,25 +888,151 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_output *output = &dsi->output;
+ u32 *input_fmts;
- if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
- return;
+ *num_input_fmts = 0;
- cdns_dsi_init_link(dsi);
- cdns_dsi_hs_init(dsi);
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = drm_mipi_dsi_get_input_bus_fmt(output->dev->format);
+ if (!input_fmts[0])
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static long cdns_dsi_round_pclk(struct cdns_dsi *dsi, unsigned long pclk)
+{
+ struct cdns_dsi_output *output = &dsi->output;
+ unsigned int nlanes = output->dev->lanes;
+ union phy_configure_opts phy_opts = { 0 };
+ u32 bitspp;
+ int ret;
+
+ bitspp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+
+ ret = phy_mipi_dphy_get_default_config(pclk, bitspp, nlanes,
+ &phy_opts.mipi_dphy);
+ if (ret)
+ return ret;
+
+ ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &phy_opts);
+ if (ret)
+ return ret;
+
+ return div_u64((u64)phy_opts.mipi_dphy.hs_clk_rate * nlanes, bitspp);
+}
+
+static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+ struct videomode vm;
+ long pclk;
+
+ /* cdns-dsi requires negative syncs */
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC;
+
+ /*
+ * The DPHY PLL has quite a coarsely grained clock rate options. See
+ * what hsclk rate we can achieve based on the pixel clock, convert it
+ * back to pixel clock, set that to the adjusted_mode->clock. This is
+ * all in hopes that the CRTC will be able to provide us the requested
+ * clock, as otherwise the DPI and DSI clocks will be out of sync.
+ */
+
+ pclk = cdns_dsi_round_pclk(dsi, adjusted_mode->clock * 1000);
+ if (pclk < 0)
+ return (int)pclk;
+
+ adjusted_mode->clock = pclk / 1000;
+
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+
+ return cdns_dsi_check_conf(dsi, &vm, dsi_cfg);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state, *old_dsi_state;
+ struct drm_bridge_state *bridge_state;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ bridge_state = drm_priv_to_bridge_state(bridge->base.state);
+ old_dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &dsi_state->base);
+
+ memcpy(&dsi_state->dsi_cfg, &old_dsi_state->dsi_cfg,
+ sizeof(dsi_state->dsi_cfg));
+
+ return &dsi_state->base;
+}
+
+static void
+cdns_dsi_bridge_atomic_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = to_cdns_dsi_bridge_state(state);
+
+ kfree(dsi_state);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ memset(dsi_state, 0, sizeof(*dsi_state));
+ dsi_state->base.bridge = bridge;
+
+ return &dsi_state->base;
}
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .disable = cdns_dsi_bridge_disable,
- .pre_enable = cdns_dsi_bridge_pre_enable,
- .enable = cdns_dsi_bridge_enable,
- .post_disable = cdns_dsi_bridge_post_disable,
+ .atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
+ .atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
+ .atomic_check = cdns_dsi_bridge_atomic_check,
+ .atomic_reset = cdns_dsi_bridge_atomic_reset,
+ .atomic_duplicate_state = cdns_dsi_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = cdns_dsi_bridge_atomic_destroy_state,
+ .atomic_get_input_bus_fmts = cdns_dsi_bridge_get_input_bus_fmts,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
@@ -920,8 +1042,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
struct drm_bridge *bridge;
- struct drm_panel *panel;
- struct device_node *np;
int ret;
/*
@@ -932,33 +1052,13 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
if (output->dev)
return -EBUSY;
- /* We do not support burst mode yet. */
- if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- return -ENOTSUPP;
-
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
- * this representation, otherwise we use dsidev->dev.of_node which
- * should have been filled by the core.
+ * this representation.
*/
- np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
- dev->channel);
- if (!np)
- np = of_node_get(dev->dev.of_node);
-
- panel = of_drm_find_panel(np);
- if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DSI);
- } else {
- bridge = of_drm_find_bridge(dev->dev.of_node);
- if (!bridge)
- bridge = ERR_PTR(-EINVAL);
- }
-
- of_node_put(np);
-
+ bridge = devm_drm_of_get_bridge(dsi->base.dev, dsi->base.dev->of_node,
+ DSI_OUTPUT_PORT, dev->channel);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
dev_err(host->dev, "failed to add DSI device %s (err = %d)",
@@ -968,7 +1068,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
output->dev = dev;
output->bridge = bridge;
- output->panel = panel;
/*
* The DSI output has been properly configured, we can now safely
@@ -984,12 +1083,9 @@ static int cdns_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dev)
{
struct cdns_dsi *dsi = to_cdns_dsi(host);
- struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
drm_bridge_remove(&input->bridge);
- if (output->panel)
- drm_panel_bridge_remove(output->bridge);
return 0;
}
@@ -1152,7 +1248,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
clk_disable_unprepare(dsi->dsi_sys_clk);
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
- dsi->link_initialized = false;
return 0;
}
@@ -1166,9 +1261,10 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
int ret, irq;
u32 val;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct cdns_dsi, input.bridge,
+ &cdns_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
@@ -1226,7 +1322,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
* CDNS_DPI_INPUT.
*/
input->id = CDNS_DPI_INPUT;
- input->bridge.funcs = &cdns_dsi_bridge_funcs;
input->bridge.of_node = pdev->dev.of_node;
/* Mask all interrupts before registering the IRQ handler. */
@@ -1313,4 +1408,3 @@ MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
MODULE_DESCRIPTION("Cadence DSI driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cdns-dsi");
-
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
index ca7ea2da635c..5db5dbbbcaad 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -10,7 +10,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/bits.h>
#include <linux/completion.h>
@@ -21,7 +20,6 @@ struct reset_control;
struct cdns_dsi_output {
struct mipi_dsi_device *dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
union phy_configure_opts phy_opts;
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index d081850e3c03..38726ae1bf15 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -546,76 +546,6 @@ out:
}
/**
- * cdns_mhdp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * cdns_mhdp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
- struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* cdns_mhdp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
@@ -1453,7 +1383,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
- cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision);
cdns_mhdp_fill_sink_caps(mhdp, dpcd);
@@ -1500,7 +1430,7 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
if (mhdp->plugged)
- cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision);
mhdp->link_up = false;
}
@@ -1619,7 +1549,7 @@ bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
static
enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
@@ -1726,6 +1656,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
}
static int cdns_mhdp_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -1979,10 +1910,9 @@ static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
}
static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct cdns_mhdp_bridge_state *mhdp_state;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
@@ -2054,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
- if (!mhdp_state->current_mode)
- return;
+ if (!mhdp_state->current_mode) {
+ ret = -EINVAL;
+ goto out;
+ }
drm_mode_set_name(mhdp_state->current_mode);
@@ -2070,7 +2002,7 @@ out:
}
static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
u32 resp;
@@ -2213,7 +2145,8 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+cdns_mhdp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2306,7 +2239,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
* If everything looks fine, just return, as we don't handle
* DP IRQs.
*/
- if (ret > 0 &&
+ if (!ret &&
drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
goto out;
@@ -2459,13 +2392,14 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
int ret;
int irq;
- mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
- if (!mhdp)
- return -ENOMEM;
+ mhdp = devm_drm_bridge_alloc(dev, struct cdns_mhdp_device, bridge,
+ &cdns_mhdp_bridge_funcs);
+ if (IS_ERR(mhdp))
+ return PTR_ERR(mhdp);
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
- dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
+ dev_err(dev, "couldn't get and enable clk: %ld\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
@@ -2504,14 +2438,12 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->info = of_device_get_match_data(dev);
- clk_prepare_enable(clk);
-
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_resume_and_get failed\n");
pm_runtime_disable(dev);
- goto clk_disable;
+ return ret;
}
if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
@@ -2553,7 +2485,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->display_fmt.bpc = 8;
mhdp->bridge.of_node = pdev->dev.of_node;
- mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -2590,8 +2521,6 @@ plat_fini:
runtime_put:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-clk_disable:
- clk_disable_unprepare(mhdp->clk);
return ret;
}
@@ -2632,8 +2561,6 @@ static void cdns_mhdp_remove(struct platform_device *pdev)
cancel_work_sync(&mhdp->modeset_retry_work);
flush_work(&mhdp->hpd_work);
/* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
-
- clk_disable_unprepare(mhdp->clk);
}
static const struct of_device_id mhdp_ids[] = {
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index d47703559b0d..814713c5bea9 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -341,10 +341,9 @@ static void chipone_configure_pll(struct chipone *icn,
}
static void chipone_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_display_mode *mode = &icn->mode;
const struct drm_bridge_state *bridge_state;
u16 hfp, hbp, hsync;
@@ -445,7 +444,7 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
}
static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
int ret;
@@ -482,7 +481,7 @@ static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
}
static void chipone_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
@@ -581,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn)
return ret;
}
-static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int chipone_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
- return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags);
}
#define MAX_INPUT_SEL_FORMATS 1
@@ -690,9 +691,10 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
struct chipone *icn;
int ret;
- icn = devm_kzalloc(dev, sizeof(struct chipone), GFP_KERNEL);
- if (!icn)
- return -ENOMEM;
+ icn = devm_drm_bridge_alloc(dev, struct chipone, bridge,
+ &chipone_bridge_funcs);
+ if (IS_ERR(icn))
+ return PTR_ERR(icn);
icn->dev = dev;
@@ -700,7 +702,6 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
if (ret)
return ret;
- icn->bridge.funcs = &chipone_bridge_funcs;
icn->bridge.type = DRM_MODE_CONNECTOR_DPI;
icn->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index da17f0978a79..54d49d4882c8 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -215,7 +215,7 @@ static enum drm_connector_status ch7033_connector_detect(
{
struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
- return drm_bridge_detect(priv->next_bridge);
+ return drm_bridge_detect(priv->next_bridge, connector);
}
static const struct drm_connector_funcs ch7033_connector_funcs = {
@@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
}
static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
struct drm_connector *connector = &priv->connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, priv->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
@@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+ return drm_connector_attach_encoder(&priv->connector, encoder);
}
static void ch7033_bridge_detach(struct drm_bridge *bridge)
@@ -535,9 +536,10 @@ static int ch7033_probe(struct i2c_client *client)
unsigned int val;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct ch7033_priv, bridge,
+ &ch7033_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -574,7 +576,6 @@ static int ch7033_probe(struct i2c_client *client)
}
INIT_LIST_HEAD(&priv->bridge.list);
- priv->bridge.funcs = &ch7033_bridge_funcs;
priv->bridge.of_node = dev->of_node;
drm_bridge_add(&priv->bridge);
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index c8abd9920fee..a35dae9b56e2 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -103,9 +103,10 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
u8 buffer[4];
int ret;
- anx7688 = devm_kzalloc(dev, sizeof(*anx7688), GFP_KERNEL);
- if (!anx7688)
- return -ENOMEM;
+ anx7688 = devm_drm_bridge_alloc(dev, struct cros_ec_anx7688, bridge,
+ &cros_ec_anx7688_bridge_funcs);
+ if (IS_ERR(anx7688))
+ return PTR_ERR(anx7688);
anx7688->client = client;
i2c_set_clientdata(client, anx7688);
@@ -153,7 +154,6 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
DRM_WARN("Old ANX7688 FW version (0x%04x), not filtering\n",
fw_version);
- anx7688->bridge.funcs = &cros_ec_anx7688_bridge_funcs;
drm_bridge_add(&anx7688->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 72bc508d4e6e..e9f16dbc9535 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -34,13 +34,13 @@ to_display_connector(struct drm_bridge *bridge)
}
static int display_connector_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
}
-static enum drm_connector_status
-display_connector_detect(struct drm_bridge *bridge)
+static enum drm_connector_status display_connector_detect(struct drm_bridge *bridge)
{
struct display_connector *conn = to_display_connector(bridge);
@@ -81,6 +81,12 @@ display_connector_detect(struct drm_bridge *bridge)
}
}
+static enum drm_connector_status
+display_connector_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ return display_connector_detect(bridge);
+}
+
static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -102,7 +108,7 @@ static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge,
struct drm_connector_state *conn_state,
unsigned int *num_output_fmts)
{
- struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge);
struct drm_bridge_state *prev_bridge_state;
if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) {
@@ -145,7 +151,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts)
{
- struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge);
struct drm_bridge_state *prev_bridge_state;
if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) {
@@ -171,7 +177,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs display_connector_bridge_funcs = {
.attach = display_connector_attach,
- .detect = display_connector_detect,
+ .detect = display_connector_bridge_detect,
.edid_read = display_connector_edid_read,
.atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts,
.atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts,
@@ -209,9 +215,10 @@ static int display_connector_probe(struct platform_device *pdev)
const char *label = NULL;
int ret;
- conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
- if (!conn)
- return -ENOMEM;
+ conn = devm_drm_bridge_alloc(&pdev->dev, struct display_connector, bridge,
+ &display_connector_bridge_funcs);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
platform_set_drvdata(pdev, conn);
@@ -361,13 +368,13 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
- conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
if (conn->bridge.ddc)
conn->bridge.ops |= DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_DETECT;
- if (conn->hpd_gpio)
+ /* Detecting the monitor requires reading DPCD */
+ if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort)
conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
if (conn->hpd_irq >= 0)
conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 0fc8a14fd800..5c3cf37200bc 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -113,19 +113,19 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
}
static int fsl_ldb_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge,
+ return drm_bridge_attach(encoder, fsl_ldb->panel_bridge,
bridge, flags);
}
static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -181,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
configured_link_freq = clk_get_rate(fsl_ldb->clk);
if (configured_link_freq != requested_link_freq)
- dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
- configured_link_freq,
- requested_link_freq);
+ dev_warn(fsl_ldb->dev,
+ "Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
+ fsl_ldb->clk, configured_link_freq, requested_link_freq);
clk_prepare_enable(fsl_ldb->clk);
@@ -224,7 +224,7 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
}
static void fsl_ldb_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
@@ -298,16 +298,15 @@ static int fsl_ldb_probe(struct platform_device *pdev)
struct fsl_ldb *fsl_ldb;
int dual_link;
- fsl_ldb = devm_kzalloc(dev, sizeof(*fsl_ldb), GFP_KERNEL);
- if (!fsl_ldb)
- return -ENOMEM;
+ fsl_ldb = devm_drm_bridge_alloc(dev, struct fsl_ldb, bridge, &funcs);
+ if (IS_ERR(fsl_ldb))
+ return PTR_ERR(fsl_ldb);
fsl_ldb->devdata = of_device_get_match_data(dev);
if (!fsl_ldb->devdata)
return -EINVAL;
fsl_ldb->dev = &pdev->dev;
- fsl_ldb->bridge.funcs = &funcs;
fsl_ldb->bridge.of_node = dev->of_node;
fsl_ldb->clk = devm_clk_get(dev, "ldb");
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 9a480c6abb85..b9028a5e5a06 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -18,12 +18,23 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE
depends on OF
depends on COMMON_CLK
select DRM_DW_HDMI
+ imply DRM_IMX8MP_HDMI_PAI
imply DRM_IMX8MP_HDMI_PVI
imply PHY_FSL_SAMSUNG_HDMI_PHY
help
Choose this to enable support for the internal HDMI encoder found
on the i.MX8MP SoC.
+config DRM_IMX8MP_HDMI_PAI
+ tristate "Freescale i.MX8MP HDMI PAI bridge support"
+ depends on OF
+ select DRM_DW_HDMI
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Choose this to enable support for the internal HDMI TX Parallel
+ Audio Interface found on the Freescale i.MX8MP SoC.
+
config DRM_IMX8MP_HDMI_PVI
tristate "Freescale i.MX8MP HDMI PVI bridge support"
depends on OF
diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile
index dd5d48584806..8d01fda25451 100644
--- a/drivers/gpu/drm/bridge/imx/Makefile
+++ b/drivers/gpu/drm/bridge/imx/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o
obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o
obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o
+obj-$(CONFIG_DRM_IMX8MP_HDMI_PAI) += imx8mp-hdmi-pai.o
obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o
obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
index 9b5bebbe357d..6149ba141a38 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
@@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge)
}
EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
@@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
- ldb_ch->next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper);
@@ -191,8 +190,7 @@ int ldb_find_next_bridge_helper(struct ldb *ldb)
}
EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs)
+void ldb_add_bridge_helper(struct ldb *ldb)
{
struct ldb_channel *ldb_ch;
int i;
@@ -204,7 +202,6 @@ void ldb_add_bridge_helper(struct ldb *ldb,
continue;
ldb_ch->bridge.driver_private = ldb_ch;
- ldb_ch->bridge.funcs = bridge_funcs;
ldb_ch->bridge.of_node = ldb_ch->np;
drm_bridge_add(&ldb_ch->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
index a0a5cde27fbc..de187e326999 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
@@ -81,15 +81,14 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge);
void ldb_bridge_disable_helper(struct drm_bridge *bridge);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
int ldb_init_helper(struct ldb *ldb);
int ldb_find_next_bridge_helper(struct ldb *ldb);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs);
+void ldb_add_bridge_helper(struct ldb *ldb);
void ldb_remove_bridge_helper(struct ldb *ldb);
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index 3ebf0b9866de..0e31d5000e7c 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -5,6 +5,8 @@
* bridge driver for legacy DT bindings, utilizing display-timings node
*/
+#include <linux/export.h>
+
#include <drm/drm_bridge.h>
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
@@ -23,7 +25,8 @@ struct imx_legacy_bridge {
#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base)
static int imx_legacy_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
@@ -58,9 +61,10 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
struct imx_legacy_bridge *imx_bridge;
int ret;
- imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL);
- if (!imx_bridge)
- return ERR_PTR(-ENOMEM);
+ imx_bridge = devm_drm_bridge_alloc(dev, struct imx_legacy_bridge,
+ base, &imx_legacy_bridge_funcs);
+ if (IS_ERR(imx_bridge))
+ return ERR_CAST(imx_bridge);
ret = of_get_drm_display_mode(np,
&imx_bridge->mode,
@@ -71,14 +75,13 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER;
- imx_bridge->base.funcs = &imx_legacy_bridge_funcs;
imx_bridge->base.of_node = np;
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
- ret = devm_drm_bridge_add(dev, &imx_bridge->base);
- if (ret)
- return ERR_PTR(ret);
+ ret = devm_drm_bridge_add(dev, &imx_bridge->base);
+ if (ret)
+ return ERR_PTR(ret);
return &imx_bridge->base;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
new file mode 100644
index 000000000000..8d13a35b206a
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <drm/bridge/dw_hdmi.h>
+#include <sound/asoundef.h>
+
+#define HTX_PAI_CTRL 0x00
+#define ENABLE BIT(0)
+
+#define HTX_PAI_CTRL_EXT 0x04
+#define WTMK_HIGH_MASK GENMASK(31, 24)
+#define WTMK_LOW_MASK GENMASK(23, 16)
+#define NUM_CH_MASK GENMASK(10, 8)
+#define WTMK_HIGH(n) FIELD_PREP(WTMK_HIGH_MASK, (n))
+#define WTMK_LOW(n) FIELD_PREP(WTMK_LOW_MASK, (n))
+#define NUM_CH(n) FIELD_PREP(NUM_CH_MASK, (n) - 1)
+
+#define HTX_PAI_FIELD_CTRL 0x08
+#define PRE_SEL GENMASK(28, 24)
+#define D_SEL GENMASK(23, 20)
+#define V_SEL GENMASK(19, 15)
+#define U_SEL GENMASK(14, 10)
+#define C_SEL GENMASK(9, 5)
+#define P_SEL GENMASK(4, 0)
+
+struct imx8mp_hdmi_pai {
+ struct regmap *regmap;
+};
+
+static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel,
+ int width, int rate, int non_pcm,
+ int iec958)
+{
+ const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);
+ struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;
+ int val;
+
+ /* PAI set control extended */
+ val = WTMK_HIGH(3) | WTMK_LOW(3);
+ val |= NUM_CH(channel);
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL_EXT, val);
+
+ /* IEC60958 format */
+ if (iec958) {
+ val = FIELD_PREP_CONST(P_SEL,
+ __bf_shf(IEC958_SUBFRAME_PARITY));
+ val |= FIELD_PREP_CONST(C_SEL,
+ __bf_shf(IEC958_SUBFRAME_CHANNEL_STATUS));
+ val |= FIELD_PREP_CONST(U_SEL,
+ __bf_shf(IEC958_SUBFRAME_USER_DATA));
+ val |= FIELD_PREP_CONST(V_SEL,
+ __bf_shf(IEC958_SUBFRAME_VALIDITY));
+ val |= FIELD_PREP_CONST(D_SEL,
+ __bf_shf(IEC958_SUBFRAME_SAMPLE_24_MASK));
+ val |= FIELD_PREP_CONST(PRE_SEL,
+ __bf_shf(IEC958_SUBFRAME_PREAMBLE_MASK));
+ } else {
+ /*
+ * The allowed PCM widths are 24bit and 32bit, as they are supported
+ * by aud2htx module.
+ * for 24bit, D_SEL = 0, select all the bits.
+ * for 32bit, D_SEL = 8, select 24bit in MSB.
+ */
+ val = FIELD_PREP(D_SEL, width - 24);
+ }
+
+ regmap_write(hdmi_pai->regmap, HTX_PAI_FIELD_CTRL, val);
+
+ /* PAI start running */
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, ENABLE);
+}
+
+static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi)
+{
+ const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);
+ struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;
+
+ /* Stop PAI */
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0);
+}
+
+static const struct regmap_config imx8mp_hdmi_pai_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = HTX_PAI_FIELD_CTRL,
+};
+
+static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_hdmi_plat_data *plat_data = data;
+ struct imx8mp_hdmi_pai *hdmi_pai;
+ struct resource *res;
+ void __iomem *base;
+
+ hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL);
+ if (!hdmi_pai)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ hdmi_pai->regmap = devm_regmap_init_mmio_clk(dev, "apb", base,
+ &imx8mp_hdmi_pai_regmap_config);
+ if (IS_ERR(hdmi_pai->regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(hdmi_pai->regmap);
+ }
+
+ plat_data->enable_audio = imx8mp_hdmi_pai_enable;
+ plat_data->disable_audio = imx8mp_hdmi_pai_disable;
+ plat_data->priv_audio = hdmi_pai;
+
+ return 0;
+}
+
+static const struct component_ops imx8mp_hdmi_pai_ops = {
+ .bind = imx8mp_hdmi_pai_bind,
+};
+
+static int imx8mp_hdmi_pai_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx8mp_hdmi_pai_ops);
+}
+
+static void imx8mp_hdmi_pai_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx8mp_hdmi_pai_ops);
+}
+
+static const struct of_device_id imx8mp_hdmi_pai_of_table[] = {
+ { .compatible = "fsl,imx8mp-hdmi-pai" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pai_of_table);
+
+static struct platform_driver imx8mp_hdmi_pai_platform_driver = {
+ .probe = imx8mp_hdmi_pai_probe,
+ .remove = imx8mp_hdmi_pai_remove,
+ .driver = {
+ .name = "imx8mp-hdmi-pai",
+ .of_match_table = imx8mp_hdmi_pai_of_table,
+ },
+};
+module_platform_driver(imx8mp_hdmi_pai_platform_driver);
+
+MODULE_DESCRIPTION("i.MX8MP HDMI PAI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index 0d1ac3edcab4..3a6f8587a257 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -40,25 +40,27 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
}
static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
- return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
+ return drm_bridge_attach(encoder, pvi->next_bridge,
bridge, flags);
}
static void imx8mp_hdmi_pvi_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = bridge_state->base.state;
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
struct drm_connector_state *conn_state;
+ struct drm_bridge_state *bridge_state;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
u32 bus_flags = 0, val;
+ bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
conn_state = drm_atomic_get_new_connector_state(state, connector);
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -88,7 +90,7 @@ static void imx8mp_hdmi_pvi_bridge_enable(struct drm_bridge *bridge,
}
static void imx8mp_hdmi_pvi_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
@@ -138,9 +140,10 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
struct device_node *remote;
struct imx8mp_hdmi_pvi *pvi;
- pvi = devm_kzalloc(&pdev->dev, sizeof(*pvi), GFP_KERNEL);
- if (!pvi)
- return -ENOMEM;
+ pvi = devm_drm_bridge_alloc(&pdev->dev, struct imx8mp_hdmi_pvi,
+ bridge, &imx_hdmi_pvi_bridge_funcs);
+ if (IS_ERR(pvi))
+ return PTR_ERR(pvi);
platform_set_drvdata(pdev, pvi);
pvi->dev = &pdev->dev;
@@ -164,7 +167,6 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
/* Register the bridge. */
- pvi->bridge.funcs = &imx_hdmi_pvi_bridge_funcs;
pvi->bridge.of_node = pdev->dev.of_node;
pvi->bridge.timings = pvi->next_bridge->timings;
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
index 1e7a789ec289..32fd3554e267 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
@@ -5,11 +5,13 @@
*/
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_modes.h>
+#include <drm/drm_of.h>
struct imx8mp_hdmi {
struct dw_hdmi_plat_data plat_data;
@@ -79,10 +81,45 @@ static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = {
.update_hpd = dw_hdmi_phy_update_hpd,
};
+static int imx8mp_dw_hdmi_bind(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = component_bind_all(dev, &hdmi->plat_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "component_bind_all failed!\n");
+
+ hdmi->dw_hdmi = dw_hdmi_probe(pdev, &hdmi->plat_data);
+ if (IS_ERR(hdmi->dw_hdmi)) {
+ component_unbind_all(dev, &hdmi->plat_data);
+ return PTR_ERR(hdmi->dw_hdmi);
+ }
+
+ return 0;
+}
+
+static void imx8mp_dw_hdmi_unbind(struct device *dev)
+{
+ struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_remove(hdmi->dw_hdmi);
+
+ component_unbind_all(dev, &hdmi->plat_data);
+}
+
+static const struct component_master_ops imx8mp_dw_hdmi_ops = {
+ .bind = imx8mp_dw_hdmi_bind,
+ .unbind = imx8mp_dw_hdmi_unbind,
+};
+
static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_hdmi_plat_data *plat_data;
+ struct component_match *match = NULL;
+ struct device_node *remote;
struct imx8mp_hdmi *hdmi;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
@@ -102,20 +139,38 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
plat_data->priv_data = hdmi;
plat_data->phy_force_vendor = true;
- hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
- if (IS_ERR(hdmi->dw_hdmi))
- return PTR_ERR(hdmi->dw_hdmi);
-
platform_set_drvdata(pdev, hdmi);
+ /* port@2 is for hdmi_pai device */
+ remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0);
+ if (!remote) {
+ hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
+ if (IS_ERR(hdmi->dw_hdmi))
+ return PTR_ERR(hdmi->dw_hdmi);
+ } else {
+ drm_of_component_match_add(dev, &match, component_compare_of, remote);
+
+ of_node_put(remote);
+
+ return component_master_add_with_match(dev, &imx8mp_dw_hdmi_ops, match);
+ }
+
return 0;
}
static void imx8mp_dw_hdmi_remove(struct platform_device *pdev)
{
struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev);
+ struct device_node *remote;
- dw_hdmi_remove(hdmi->dw_hdmi);
+ remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0);
+ if (remote) {
+ of_node_put(remote);
+
+ component_master_del(&pdev->dev, &imx8mp_dw_hdmi_ops);
+ } else {
+ dw_hdmi_remove(hdmi->dw_hdmi);
+ }
}
static int imx8mp_dw_hdmi_pm_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index dd5823f04c70..47aa65938e6a 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -47,7 +47,7 @@ struct imx8qm_ldb_channel {
struct imx8qm_ldb {
struct ldb base;
struct device *dev;
- struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qm_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
int active_chno;
@@ -107,7 +107,7 @@ static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
@@ -158,7 +158,7 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_configure(imx8qm_ldb_ch->phy, &opts);
@@ -200,9 +200,8 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
CH_HSYNC_M(chno), CH_PHSYNC(chno));
}
-static void
-imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -227,13 +226,13 @@ imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
}
if (is_split) {
- ret = phy_power_on(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel0 PHY: %d\n",
ret);
- ret = phy_power_on(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel1 PHY: %d\n",
@@ -247,9 +246,8 @@ imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
ldb_bridge_enable_helper(bridge);
}
-static void
-imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -263,12 +261,12 @@ imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
ldb_bridge_disable_helper(bridge);
if (is_split) {
- ret = phy_power_off(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel0 PHY: %d\n",
ret);
- ret = phy_power_off(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel1 PHY: %d\n",
@@ -414,7 +412,7 @@ static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb)
int i, ret;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (!ldb_ch->is_available)
@@ -450,6 +448,14 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
if (!imx8qm_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qm_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qm_ldb_channel, base.bridge,
+ &imx8qm_ldb_bridge_funcs);
+ if (IS_ERR(imx8qm_ldb->channel[i]))
+ return PTR_ERR(imx8qm_ldb->channel[i]);
+ }
+
imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qm_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qm_ldb->clk_pixel);
@@ -475,7 +481,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qm_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qm_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -501,12 +507,12 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
}
imx8qm_ldb->active_chno = 0;
- imx8qm_ldb_ch = &imx8qm_ldb->channel[0];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[0];
ldb_ch = &imx8qm_ldb_ch->base;
ldb_ch->link_type = pixel_order;
} else {
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -527,7 +533,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qm_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 7bce2305d676..122502968927 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -44,7 +44,7 @@ struct imx8qxp_ldb_channel {
struct imx8qxp_ldb {
struct ldb base;
struct device *dev;
- struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qxp_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
struct drm_bridge *companion;
@@ -203,9 +203,8 @@ imx8qxp_ldb_bridge_mode_set(struct drm_bridge *bridge,
companion->funcs->mode_set(companion, mode, adjusted_mode);
}
-static void
-imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -217,12 +216,11 @@ imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
clk_prepare_enable(imx8qxp_ldb->clk_bypass);
if (is_split && companion)
- companion->funcs->atomic_pre_enable(companion, old_bridge_state);
+ companion->funcs->atomic_pre_enable(companion, state);
}
-static void
-imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -252,12 +250,11 @@ imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
DRM_DEV_ERROR(dev, "failed to power on PHY: %d\n", ret);
if (is_split && companion)
- companion->funcs->atomic_enable(companion, old_bridge_state);
+ companion->funcs->atomic_enable(companion, state);
}
-static void
-imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -283,7 +280,7 @@ imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
clk_disable_unprepare(imx8qxp_ldb->clk_pixel);
if (is_split && companion)
- companion->funcs->atomic_disable(companion, old_bridge_state);
+ companion->funcs->atomic_disable(companion, state);
ret = pm_runtime_put(dev);
if (ret < 0)
@@ -413,7 +410,7 @@ static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = {
static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct device_node *ep, *remote;
struct device *dev = imx8qxp_ldb->dev;
@@ -459,7 +456,7 @@ imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link)
static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct ldb_channel *companion_ldb_ch;
struct device_node *companion;
@@ -589,6 +586,14 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
if (!imx8qxp_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qxp_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qxp_ldb_channel, base.bridge,
+ &imx8qxp_ldb_bridge_funcs);
+ if (IS_ERR(imx8qxp_ldb->channel[i]))
+ return PTR_ERR(imx8qxp_ldb->channel[i]);
+ }
+
imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qxp_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qxp_ldb->clk_pixel);
@@ -614,7 +619,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qxp_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qxp_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -630,7 +635,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
}
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i];
+ imx8qxp_ldb_ch = imx8qxp_ldb->channel[i];
ldb_ch = &imx8qxp_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -663,9 +668,9 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qxp_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
- return ret;
+ return 0;
}
static void imx8qxp_ldb_remove(struct platform_device *pdev)
@@ -678,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int imx8qxp_ldb_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
static int imx8qxp_ldb_runtime_resume(struct device *dev)
{
struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev);
@@ -695,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8qxp_ldb_pm_ops = {
- RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL)
+ RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL)
};
static const struct of_device_id imx8qxp_ldb_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1812bd106261..8517b1c953d4 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -63,12 +63,11 @@ struct imx8qxp_pc_channel {
struct drm_bridge *next_bridge;
struct imx8qxp_pc *pc;
unsigned int stream_id;
- bool is_available;
};
struct imx8qxp_pc {
struct device *dev;
- struct imx8qxp_pc_channel ch[2];
+ struct imx8qxp_pc_channel *ch[2];
struct clk *clk_apb;
void __iomem *base;
};
@@ -108,6 +107,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge,
}
static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
@@ -119,7 +119,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
ch->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -176,9 +176,8 @@ imx8qxp_pc_bridge_mode_set(struct drm_bridge *bridge,
clk_disable_unprepare(pc->clk_apb);
}
-static void
-imx8qxp_pc_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pc_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
struct imx8qxp_pc *pc = ch->pc;
@@ -307,7 +306,14 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
goto free_child;
}
- ch = &pc->ch[i];
+ ch = devm_drm_bridge_alloc(dev, struct imx8qxp_pc_channel, bridge,
+ &imx8qxp_pc_bridge_funcs);
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ goto free_child;
+ }
+
+ pc->ch[i] = ch;
ch->pc = pc;
ch->stream_id = i;
@@ -333,9 +339,7 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
of_node_put(remote);
ch->bridge.driver_private = ch;
- ch->bridge.funcs = &imx8qxp_pc_bridge_funcs;
ch->bridge.of_node = child;
- ch->is_available = true;
drm_bridge_add(&ch->bridge);
}
@@ -345,8 +349,8 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
free_child:
of_node_put(child);
- if (i == 1 && pc->ch[0].next_bridge)
- drm_bridge_remove(&pc->ch[0].bridge);
+ if (i == 1 && pc->ch[0]->next_bridge)
+ drm_bridge_remove(&pc->ch[0]->bridge);
pm_runtime_disable(dev);
return ret;
@@ -359,13 +363,10 @@ static void imx8qxp_pc_bridge_remove(struct platform_device *pdev)
int i;
for (i = 0; i < 2; i++) {
- ch = &pc->ch[i];
-
- if (!ch->is_available)
- continue;
+ ch = pc->ch[i];
- drm_bridge_remove(&ch->bridge);
- ch->is_available = false;
+ if (ch)
+ drm_bridge_remove(&ch->bridge);
}
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index 4b0715ed6f38..e5943506981d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl)
}
static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
pl->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -153,9 +154,8 @@ imx8qxp_pixel_link_bridge_mode_set(struct drm_bridge *bridge,
imx8qxp_pixel_link_set_mst_addr(pl);
}
-static void
-imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -164,9 +164,8 @@ imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
imx8qxp_pixel_link_enable_sync(pl);
}
-static void
-imx8qxp_pixel_link_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pixel_link_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -328,9 +327,10 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- pl = devm_kzalloc(dev, sizeof(*pl), GFP_KERNEL);
- if (!pl)
- return -ENOMEM;
+ pl = devm_drm_bridge_alloc(dev, struct imx8qxp_pixel_link, bridge,
+ &imx8qxp_pixel_link_bridge_funcs);
+ if (IS_ERR(pl))
+ return PTR_ERR(pl);
ret = imx_scu_get_handle(&pl->ipc_handle);
if (ret) {
@@ -385,7 +385,6 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pl);
pl->bridge.driver_private = pl;
- pl->bridge.funcs = &imx8qxp_pixel_link_bridge_funcs;
pl->bridge.of_node = np;
drm_bridge_add(&pl->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index 65cf3a6c8ec6..111310acab2c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi {
#define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge)
static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
@@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
p2d->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
@@ -122,9 +123,8 @@ imx8qxp_pxl2dpi_bridge_mode_set(struct drm_bridge *bridge,
}
}
-static void
-imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
int ret;
@@ -134,8 +134,7 @@ imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_ERROR(p2d->dev, "failed to put runtime PM: %d\n", ret);
if (p2d->companion)
- p2d->companion->funcs->atomic_disable(p2d->companion,
- old_bridge_state);
+ p2d->companion->funcs->atomic_disable(p2d->companion, state);
}
static const u32 imx8qxp_pxl2dpi_bus_output_fmts[] = {
@@ -393,9 +392,10 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- p2d = devm_kzalloc(dev, sizeof(*p2d), GFP_KERNEL);
- if (!p2d)
- return -ENOMEM;
+ p2d = devm_drm_bridge_alloc(dev, struct imx8qxp_pxl2dpi, bridge,
+ &imx8qxp_pxl2dpi_bridge_funcs);
+ if (IS_ERR(p2d))
+ return PTR_ERR(p2d);
p2d->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(p2d->regmap)) {
@@ -442,7 +442,6 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
p2d->bridge.driver_private = p2d;
- p2d->bridge.funcs = &imx8qxp_pxl2dpi_bridge_funcs;
p2d->bridge.of_node = np;
drm_bridge_add(&p2d->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
index bea8346515b8..8f7a0d46601a 100644
--- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
@@ -492,14 +492,12 @@ static int imx93_dsi_get_phy_configure_opts(struct imx93_dsi *dsi,
static enum drm_mode_status
imx93_dsi_validate_mode(struct imx93_dsi *dsi, const struct drm_display_mode *mode)
{
- struct drm_bridge *bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
+ struct drm_bridge *dmd_bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
+ struct drm_bridge *last_bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_last_bridge(dmd_bridge->encoder);
- /* Get the last bridge */
- while (drm_bridge_get_next_bridge(bridge))
- bridge = drm_bridge_get_next_bridge(bridge);
-
- if ((bridge->ops & DRM_BRIDGE_OP_DETECT) &&
- (bridge->ops & DRM_BRIDGE_OP_EDID)) {
+ if ((last_bridge->ops & DRM_BRIDGE_OP_DETECT) &&
+ (last_bridge->ops & DRM_BRIDGE_OP_EDID)) {
unsigned long pixel_clock_rate = mode->clock * 1000;
unsigned long rounded_rate;
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index 45af49a1e90f..2eb8fba7016c 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -146,6 +146,7 @@
#define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4)
#define HDMI_REG_PKT_GENERAL_CTRL 0xc6
+#define HDMI_REG_PKT_NULL_CTRL 0xc9
#define HDMI_REG_AVI_INFOFRM_CTRL 0xcd
#define ENABLE_PKT BIT(0)
#define REPEAT_PKT BIT(1)
@@ -154,6 +155,12 @@
* 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers)
*/
+/* NULL packet registers */
+/* Header Byte(HB): n = 0 ~ 2 */
+#define HDMI_REG_PKT_HB(n) (0x138 + (n))
+/* Packet Byte(PB): n = 0 ~ 27(HDMI_MAX_INFOFRAME_SIZE), n = 0 for checksum */
+#define HDMI_REG_PKT_PB(n) (0x13b + (n))
+
/* AVI packet registers */
#define HDMI_REG_AVI_DB1 0x158
#define HDMI_REG_AVI_DB2 0x159
@@ -224,7 +231,9 @@ static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg)
case HDMI_REG_HDMI_MODE:
case HDMI_REG_GCP:
case HDMI_REG_PKT_GENERAL_CTRL:
+ case HDMI_REG_PKT_NULL_CTRL:
case HDMI_REG_AVI_INFOFRM_CTRL:
+ case HDMI_REG_PKT_HB(0) ... HDMI_REG_PKT_PB(HDMI_MAX_INFOFRAME_SIZE):
case HDMI_REG_AVI_DB1:
case HDMI_REG_AVI_DB2:
case HDMI_REG_AVI_DB3:
@@ -569,9 +578,8 @@ static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len)
return 0;
}
-static void
-it6263_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void it6263_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct it6263 *it = bridge_to_it6263(bridge);
@@ -581,11 +589,9 @@ it6263_bridge_atomic_disable(struct drm_bridge *bridge,
AFE_DRV_RST | AFE_DRV_PWD);
}
-static void
-it6263_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void it6263_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct it6263 *it = bridge_to_it6263(bridge);
const struct drm_crtc_state *crtc_state;
struct regmap *regmap = it->hdmi_regmap;
@@ -668,13 +674,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge,
}
static int it6263_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6263 *it = bridge_to_it6263(bridge);
struct drm_connector *connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, it->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -682,7 +689,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
- connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ connector = drm_bridge_connector_init(bridge->dev, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
dev_err(it->dev, "failed to initialize bridge connector: %d\n",
@@ -690,12 +697,13 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(connector, bridge->encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
-static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+it6263_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it6263 *it = bridge_to_it6263(bridge);
@@ -756,10 +764,16 @@ static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge,
{
struct it6263 *it = bridge_to_it6263(bridge);
- if (type == HDMI_INFOFRAME_TYPE_AVI)
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0);
- else
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ regmap_write(it->hdmi_regmap, HDMI_REG_PKT_NULL_CTRL, 0);
+ break;
+ default:
dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
+ }
return 0;
}
@@ -771,27 +785,36 @@ static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge,
struct it6263 *it = bridge_to_it6263(bridge);
struct regmap *regmap = it->hdmi_regmap;
- if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* write the first AVI infoframe data byte chunk(DB1-DB5) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_AVI_DB_CHUNK1_SIZE);
+
+ /* write the second AVI infoframe data byte chunk(DB6-DB13) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE +
+ HDMI_AVI_DB_CHUNK1_SIZE],
+ HDMI_AVI_DB_CHUNK2_SIZE);
+
+ /* write checksum */
+ regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
+
+ regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL,
+ ENABLE_PKT | REPEAT_PKT);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ /* write header and payload */
+ regmap_bulk_write(regmap, HDMI_REG_PKT_HB(0), buffer, len);
+
+ regmap_write(regmap, HDMI_REG_PKT_NULL_CTRL,
+ ENABLE_PKT | REPEAT_PKT);
+ break;
+ default:
dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
- return 0;
}
- /* write the first AVI infoframe data byte chunk(DB1-DB5) */
- regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
- &buffer[HDMI_INFOFRAME_HEADER_SIZE],
- HDMI_AVI_DB_CHUNK1_SIZE);
-
- /* write the second AVI infoframe data byte chunk(DB6-DB13) */
- regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
- &buffer[HDMI_INFOFRAME_HEADER_SIZE +
- HDMI_AVI_DB_CHUNK1_SIZE],
- HDMI_AVI_DB_CHUNK2_SIZE);
-
- /* write checksum */
- regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
-
- regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT);
-
return 0;
}
@@ -818,9 +841,10 @@ static int it6263_probe(struct i2c_client *client)
struct it6263 *it;
int ret;
- it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
- if (!it)
- return -ENOMEM;
+ it = devm_drm_bridge_alloc(dev, struct it6263, bridge,
+ &it6263_bridge_funcs);
+ if (IS_ERR(it))
+ return PTR_ERR(it);
it->dev = dev;
it->hdmi_i2c = client;
@@ -854,8 +878,8 @@ static int it6263_probe(struct i2c_client *client)
it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter,
LVDS_INPUT_CTRL_I2C_ADDR);
if (IS_ERR(it->lvds_i2c))
- dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
- "failed to allocate I2C device for LVDS\n");
+ return dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
+ "failed to allocate I2C device for LVDS\n");
it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c,
&it6263_lvds_regmap_config);
@@ -868,7 +892,6 @@ static int it6263_probe(struct i2c_client *client)
i2c_set_clientdata(client, it);
- it->bridge.funcs = &it6263_bridge_funcs;
it->bridge.of_node = dev->of_node;
/* IT6263 chip doesn't support HPD interrupt. */
it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 88ef76a37fe6..a094803ba7aa 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -21,7 +21,7 @@
#include <linux/wait.h>
#include <linux/bitfield.h>
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -771,40 +771,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_MODE_ARG(&it6505->video_info));
}
-static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
- struct it6505_drm_dp_link *link,
- u8 mode)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < DPCD_V_1_1)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= mode;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- if (mode == DP_SET_POWER_D0) {
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
static void it6505_clear_int(struct it6505 *it6505)
{
it6505_write(it6505, INT_STATUS_01, 0xFF);
@@ -2141,35 +2107,6 @@ static void it6505_hdcp_part1_auth(struct it6505 *it6505)
it6505->hdcp_status = HDCP_AUTH_GOING;
}
-static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
- unsigned int size, u8 *output_av)
-{
- struct shash_desc *desc;
- struct crypto_shash *tfm;
- int err;
- struct device *dev = it6505->dev;
-
- tfm = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(tfm)) {
- dev_err(dev, "crypto_alloc_shash sha1 failed");
- return PTR_ERR(tfm);
- }
- desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
- if (!desc) {
- crypto_free_shash(tfm);
- return -ENOMEM;
- }
-
- desc->tfm = tfm;
- err = crypto_shash_digest(desc, sha1_input, size, output_av);
- if (err)
- dev_err(dev, "crypto_shash_digest sha1 failed");
-
- crypto_free_shash(tfm);
- kfree(desc);
- return err;
-}
-
static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
{
struct device *dev = it6505->dev;
@@ -2239,7 +2176,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
return false;
}
- it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
+ sha1(it6505->sha1_input, i, (u8 *)av);
/*1B-05 V' must retry 3 times */
for (retry = 0; retry < 3; retry++) {
err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
@@ -2250,12 +2187,13 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
continue;
}
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 5; i++)
if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
- av[i][1] != av[i][2] || bv[i][0] != av[i][3])
+ bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
break;
- DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
+ if (i == 5) {
+ DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d", retry);
return true;
}
}
@@ -2577,8 +2515,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
}
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
@@ -2909,8 +2846,7 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
}
if (it6505->hpd_state) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
@@ -3123,6 +3059,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge)
}
static int it6505_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3182,11 +3119,10 @@ it6505_bridge_mode_valid(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
- struct drm_atomic_state *state = old_state->base.state;
struct hdmi_avi_infoframe frame;
struct drm_crtc_state *crtc_state;
struct drm_connector_state *conn_state;
@@ -3233,12 +3169,11 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
it6505_int_mask_enable(it6505);
it6505_video_reset(it6505);
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
@@ -3246,14 +3181,13 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "start");
if (it6505->powered) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D3);
+ drm_dp_link_power_down(&it6505->aux, it6505->link.revision);
it6505_video_disable(it6505);
}
}
static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
@@ -3264,7 +3198,7 @@ static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
@@ -3275,7 +3209,7 @@ static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
}
static enum drm_connector_status
-it6505_bridge_detect(struct drm_bridge *bridge)
+it6505_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3620,9 +3554,10 @@ static int it6505_i2c_probe(struct i2c_client *client)
struct extcon_dev *extcon;
int err;
- it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
- if (!it6505)
- return -ENOMEM;
+ it6505 = devm_drm_bridge_alloc(&client->dev, struct it6505, bridge,
+ &it6505_bridge_funcs);
+ if (IS_ERR(it6505))
+ return PTR_ERR(it6505);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
@@ -3697,7 +3632,6 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505->aux.transfer = it6505_aux_transfer;
drm_dp_aux_init(&it6505->aux);
- it6505->bridge.funcs = &it6505_bridge_funcs;
it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 23edcde6b9a7..0185f61e6e59 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -287,6 +287,7 @@
enum chip_id {
ID_IT6610,
ID_IT66121,
+ ID_IT66122,
};
struct it66121_chip_info {
@@ -312,7 +313,7 @@ struct it66121_ctx {
u8 swl;
bool auto_cts;
} audio;
- const struct it66121_chip_info *info;
+ enum chip_id id;
};
static const struct regmap_range_cfg it66121_regmap_banks[] = {
@@ -402,7 +403,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
- if (ctx->info->id == ID_IT66121) {
+ if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_EC1, 0);
if (ret)
@@ -428,7 +429,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
- if (ctx->info->id == ID_IT66121) {
+ if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_EC1,
IT66121_AFE_IP_EC1);
@@ -449,7 +450,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
- if (ctx->info->id == ID_IT6610) {
+ if (ctx->id == ID_IT6610) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
IT6610_AFE_XP_BYPASS,
IT6610_AFE_XP_BYPASS);
@@ -586,6 +587,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
}
static int it66121_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -594,11 +596,11 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
if (ret)
return ret;
- if (ctx->info->id == ID_IT66121) {
+ if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_RCLK, 0);
if (ret)
@@ -721,10 +723,9 @@ static u32 *it66121_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
}
static void it66121_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
ctx->connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
@@ -732,7 +733,7 @@ static void it66121_bridge_enable(struct drm_bridge *bridge,
}
static void it66121_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -748,7 +749,7 @@ static int it66121_bridge_check(struct drm_bridge *bridge,
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- if (ctx->info->id == ID_IT6610) {
+ if (ctx->id == ID_IT6610) {
/* The IT6610 only supports these settings */
bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
@@ -802,7 +803,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI))
goto unlock;
- if (ctx->info->id == ID_IT66121 &&
+ if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) &&
regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_TXCLK,
IT66121_CLK_BANK_PWROFF_TXCLK)) {
@@ -815,7 +816,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (it66121_configure_afe(ctx, adjusted_mode))
goto unlock;
- if (ctx->info->id == ID_IT66121 &&
+ if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) &&
regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_TXCLK, 0)) {
goto unlock;
@@ -843,7 +844,8 @@ static enum drm_mode_status it66121_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static enum drm_connector_status it66121_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+it66121_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -1383,8 +1385,6 @@ static int it66121_audio_startup(struct device *dev, void *data)
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mutex_lock(&ctx->lock);
ret = it661221_audio_output_enable(ctx, true);
if (ret)
@@ -1400,8 +1400,6 @@ static void it66121_audio_shutdown(struct device *dev, void *data)
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mutex_lock(&ctx->lock);
ret = it661221_audio_output_enable(ctx, false);
if (ret)
@@ -1478,8 +1476,6 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
.no_capture_mute = 1,
};
- dev_dbg(dev, "%s\n", __func__);
-
if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_info(dev, "No \"#sound-dai-cells\", no audio\n");
return 0;
@@ -1503,22 +1499,30 @@ static const char * const it66121_supplies[] = {
"vcn33", "vcn18", "vrf12"
};
+static const struct it66121_chip_info it66xx_chip_info[] = {
+ {.id = ID_IT6610, .vid = 0xca00, .pid = 0x0611 },
+ {.id = ID_IT66121, .vid = 0x4954, .pid = 0x0612 },
+ {.id = ID_IT66122, .vid = 0x4954, .pid = 0x0622 },
+};
+
static int it66121_probe(struct i2c_client *client)
{
u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
- int ret;
+ int ret, i;
struct it66121_ctx *ctx;
struct device *dev = &client->dev;
+ const struct it66121_chip_info *chip_info;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(dev, "I2C check functionality failed.\n");
return -ENXIO;
}
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct it66121_ctx, bridge,
+ &it66121_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!ep)
@@ -1526,7 +1530,6 @@ static int it66121_probe(struct i2c_client *client)
ctx->dev = dev;
ctx->client = client;
- ctx->info = i2c_get_match_data(client);
of_property_read_u32(ep, "bus-width", &ctx->bus_width);
of_node_put(ep);
@@ -1572,12 +1575,18 @@ static int it66121_probe(struct i2c_client *client)
revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]);
device_ids[1] &= IT66121_DEVICE_ID1_MASK;
- if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid ||
- (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) {
- return -ENODEV;
+ for (i = 0; i < ARRAY_SIZE(it66xx_chip_info); i++) {
+ chip_info = &it66xx_chip_info[i];
+ if ((vendor_ids[1] << 8 | vendor_ids[0]) == chip_info->vid &&
+ (device_ids[1] << 8 | device_ids[0]) == chip_info->pid) {
+ ctx->id = chip_info->id;
+ break;
+ }
}
- ctx->bridge.funcs = &it66121_bridge_funcs;
+ if (i == ARRAY_SIZE(it66xx_chip_info))
+ return -ENODEV;
+
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
@@ -1611,28 +1620,18 @@ static void it66121_remove(struct i2c_client *client)
mutex_destroy(&ctx->lock);
}
-static const struct it66121_chip_info it66121_chip_info = {
- .id = ID_IT66121,
- .vid = 0x4954,
- .pid = 0x0612,
-};
-
-static const struct it66121_chip_info it6610_chip_info = {
- .id = ID_IT6610,
- .vid = 0xca00,
- .pid = 0x0611,
-};
-
static const struct of_device_id it66121_dt_match[] = {
- { .compatible = "ite,it66121", &it66121_chip_info },
- { .compatible = "ite,it6610", &it6610_chip_info },
+ { .compatible = "ite,it6610" },
+ { .compatible = "ite,it66121" },
+ { .compatible = "ite,it66122" },
{ }
};
MODULE_DEVICE_TABLE(of, it66121_dt_match);
static const struct i2c_device_id it66121_id[] = {
- { "it66121", (kernel_ulong_t) &it66121_chip_info },
- { "it6610", (kernel_ulong_t) &it6610_chip_info },
+ { .name = "it6610" },
+ { .name = "it66121" },
+ { .name = "it66122" },
{ }
};
MODULE_DEVICE_TABLE(i2c, it66121_id);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 52da204f5740..342374cb8fc6 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -408,7 +408,7 @@ lt8912_connector_detect(struct drm_connector *connector, bool force)
struct lt8912 *lt = connector_to_lt8912(connector);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
- return drm_bridge_detect(lt->hdmi_port);
+ return drm_bridge_detect(lt->hdmi_port, connector);
return lt8912_check_cable_status(lt);
}
@@ -543,12 +543,13 @@ exit:
}
static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
+ ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0) {
dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
@@ -606,12 +607,12 @@ lt8912_bridge_mode_valid(struct drm_bridge *bridge,
}
static enum drm_connector_status
-lt8912_bridge_detect(struct drm_bridge *bridge)
+lt8912_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
- return drm_bridge_detect(lt->hdmi_port);
+ return drm_bridge_detect(lt->hdmi_port, connector);
return lt8912_check_cable_status(lt);
}
@@ -760,9 +761,10 @@ static int lt8912_probe(struct i2c_client *client)
int ret = 0;
struct device *dev = &client->dev;
- lt = devm_kzalloc(dev, sizeof(struct lt8912), GFP_KERNEL);
- if (!lt)
- return -ENOMEM;
+ lt = devm_drm_bridge_alloc(dev, struct lt8912, bridge,
+ &lt8912_bridge_funcs);
+ if (IS_ERR(lt))
+ return PTR_ERR(lt);
lt->dev = dev;
lt->i2c_client[0] = client;
@@ -777,7 +779,6 @@ static int lt8912_probe(struct i2c_client *client)
i2c_set_clientdata(client, lt);
- lt->bridge.funcs = &lt8912_bridge_funcs;
lt->bridge.of_node = dev->of_node;
lt->bridge.ops = (DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_DETECT);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 999ddebb832d..03fc8fd10f20 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
}
static int lt9211_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
@@ -120,8 +121,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx)
}
/* Test for known Chip ID. */
- if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE ||
- chipid[2] != REG_CHIPID2_VALUE) {
+ if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) {
dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",
chipid[0], chipid[1], chipid[2]);
return -EINVAL;
@@ -455,10 +455,9 @@ static int lt9211_configure_tx(struct lt9211 *ctx, bool jeida,
}
static void lt9211_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -553,7 +552,7 @@ static void lt9211_atomic_enable(struct drm_bridge *bridge,
}
static void lt9211_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
int ret;
@@ -727,9 +726,9 @@ static int lt9211_probe(struct i2c_client *client)
struct lt9211 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct lt9211, bridge, &lt9211_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -755,7 +754,6 @@ static int lt9211_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &lt9211_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index e650cd83fc8d..a2d032ee4744 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -543,7 +543,8 @@ static int lt9611_regulator_enable(struct lt9611 *lt9611)
return 0;
}
-static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+lt9611_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
unsigned int reg_val = 0;
@@ -640,12 +641,10 @@ lt9611_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
}
/* bridge funcs */
-static void
-lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -689,9 +688,8 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
regmap_write(lt9611->regmap, 0x8130, 0xea);
}
-static void
-lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
int ret;
@@ -743,11 +741,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
}
static int lt9611_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
+ return drm_bridge_attach(encoder, lt9611->next_bridge,
bridge, flags);
}
@@ -767,7 +766,7 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
}
static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
static const struct reg_sequence reg_cfg[] = {
@@ -786,9 +785,8 @@ static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
lt9611->sleep = false;
}
-static void
-lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -939,8 +937,8 @@ lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
return MODE_OK;
}
-static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static int lt9611_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -955,8 +953,8 @@ static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
return 0;
}
-static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+static int lt9611_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
@@ -977,8 +975,8 @@ static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
&hparms->cea);
}
-static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static void lt9611_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -1075,9 +1073,10 @@ static int lt9611_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611 = devm_kzalloc(dev, sizeof(*lt9611), GFP_KERNEL);
- if (!lt9611)
- return -ENOMEM;
+ lt9611 = devm_drm_bridge_alloc(dev, struct lt9611, bridge,
+ &lt9611_bridge_funcs);
+ if (IS_ERR(lt9611))
+ return PTR_ERR(lt9611);
lt9611->dev = dev;
lt9611->client = client;
@@ -1130,11 +1129,10 @@ static int lt9611_probe(struct i2c_client *client)
/* Disable Audio InfoFrame, enabled by default */
regmap_update_bits(lt9611->regmap, 0x843d, LT9611_INFOFRAME_AUDIO, 0);
- lt9611->bridge.funcs = &lt9611_bridge_funcs;
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
- DRM_BRIDGE_OP_HDMI;
+ DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO;
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
lt9611->bridge.vendor = "Lontium";
lt9611->bridge.product = "LT9611";
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4c3ff1fdc69..38fb8776c0f4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
}
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ return drm_bridge_attach(encoder, lt9611uxc->next_bridge,
bridge, flags);
}
@@ -352,7 +353,8 @@ static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge,
lt9611uxc_unlock(lt9611uxc);
}
-static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+lt9611uxc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
@@ -774,9 +776,9 @@ static int lt9611uxc_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL);
- if (!lt9611uxc)
- return -ENOMEM;
+ lt9611uxc = devm_drm_bridge_alloc(dev, struct lt9611uxc, bridge, &lt9611uxc_bridge_funcs);
+ if (IS_ERR(lt9611uxc))
+ return PTR_ERR(lt9611uxc);
lt9611uxc->dev = dev;
lt9611uxc->client = client;
@@ -855,7 +857,6 @@ retry:
i2c_set_clientdata(client, lt9611uxc);
- lt9611uxc->bridge.funcs = &lt9611uxc_bridge_funcs;
lt9611uxc->bridge.of_node = client->dev.of_node;
lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
if (lt9611uxc->hpd_supported)
@@ -880,7 +881,11 @@ retry:
}
}
- return lt9611uxc_audio_init(dev, lt9611uxc);
+ ret = lt9611uxc_audio_init(dev, lt9611uxc);
+ if (ret)
+ goto err_remove_bridge;
+
+ return 0;
err_remove_bridge:
free_irq(client->irq, lt9611uxc);
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 389af0233fcd..e6a7147e141b 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
}
static int lvds_codec_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
- return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+ return drm_bridge_attach(encoder, lvds_codec->panel_bridge,
bridge, flags);
}
@@ -117,9 +118,10 @@ static int lvds_codec_probe(struct platform_device *pdev)
u32 val;
int ret;
- lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
- if (!lvds_codec)
- return -ENOMEM;
+ lvds_codec = devm_drm_bridge_alloc(dev, struct lvds_codec, bridge,
+ &funcs);
+ if (IS_ERR(lvds_codec))
+ return PTR_ERR(lvds_codec);
lvds_codec->dev = &pdev->dev;
lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
@@ -155,8 +157,6 @@ static int lvds_codec_probe(struct platform_device *pdev)
if (IS_ERR(lvds_codec->panel_bridge))
return PTR_ERR(lvds_codec->panel_bridge);
- lvds_codec->bridge.funcs = &funcs;
-
/*
* Decoder input LVDS format is a property of the decoder chip or even
* its strapping. Handle data-mapping the same way lvds-panel does. In
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index a3dcee62e7a5..c9e6505cbd88 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -115,19 +115,13 @@ static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
return num_modes;
}
-static enum drm_mode_status ge_b850v3_lvds_mode_valid(
- struct drm_connector *connector, struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static const struct
drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = {
.get_modes = ge_b850v3_lvds_get_modes,
- .mode_valid = ge_b850v3_lvds_mode_valid,
};
-static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct i2c_client *stdp4028_i2c =
ge_b850v3_lvds_ptr->stdp4028_i2c;
@@ -148,7 +142,7 @@ static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge
static enum drm_connector_status ge_b850v3_lvds_detect(struct drm_connector *connector,
bool force)
{
- return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge);
+ return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge, connector);
}
static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
@@ -197,6 +191,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
}
static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct i2c_client *stdp4028_i2c
@@ -231,13 +226,11 @@ static int ge_b850v3_lvds_init(struct device *dev)
if (ge_b850v3_lvds_ptr)
goto success;
- ge_b850v3_lvds_ptr = devm_kzalloc(dev,
- sizeof(*ge_b850v3_lvds_ptr),
- GFP_KERNEL);
-
- if (!ge_b850v3_lvds_ptr) {
+ ge_b850v3_lvds_ptr = devm_drm_bridge_alloc(dev, struct ge_b850v3_lvds, bridge,
+ &ge_b850v3_lvds_funcs);
+ if (IS_ERR(ge_b850v3_lvds_ptr)) {
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
- return -ENOMEM;
+ return PTR_ERR(ge_b850v3_lvds_ptr);
}
success:
@@ -270,7 +263,6 @@ static int ge_b850v3_register(void)
struct device *dev = &stdp4028_i2c->dev;
/* drm bridge initialization */
- ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
ge_b850v3_lvds_ptr->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
ge_b850v3_lvds_ptr->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index b8313dad6072..9f4ff82bc6b4 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds)
}
static int mchp_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mchp_lvds *lvds = bridge_to_lvds(bridge);
- return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
+ return drm_bridge_attach(encoder, lvds->panel_bridge,
bridge, flags);
}
@@ -156,14 +157,14 @@ static int mchp_lvds_probe(struct platform_device *pdev)
if (!dev->of_node)
return -ENODEV;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct mchp_lvds, bridge,
+ &mchp_lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
- lvds->regs = devm_ioremap_resource(lvds->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
@@ -192,7 +193,6 @@ static int mchp_lvds_probe(struct platform_device *pdev)
lvds->bridge.of_node = dev->of_node;
lvds->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- lvds->bridge.funcs = &mchp_lvds_bridge_funcs;
dev_set_drvdata(dev, lvds);
ret = devm_pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 1e5b2a37cb8c..2f7429b24fc2 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -736,9 +736,8 @@ static int nwl_dsi_disable(struct nwl_dsi *dsi)
return 0;
}
-static void
-nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -898,9 +897,8 @@ runtime_put:
pm_runtime_put_sync(dev);
}
-static void
-nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -912,6 +910,7 @@ nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
}
static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
@@ -921,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
- return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, panel_bridge, bridge, flags);
}
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
@@ -1150,9 +1149,10 @@ static int nwl_dsi_probe(struct platform_device *pdev)
struct nwl_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct nwl_dsi, bridge,
+ &nwl_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
@@ -1181,9 +1181,9 @@ static int nwl_dsi_probe(struct platform_device *pdev)
dsi->quirks = (uintptr_t)attr->data;
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.timings = &nwl_dsi_timings;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
dev_set_drvdata(dev, dsi);
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 44e36ae66db4..7acb11f16dc1 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -15,7 +15,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -215,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
};
static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
/* Let this driver create connector if requested */
- ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge,
bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -240,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
drm_connector_attach_encoder(&ptn_bridge->connector,
- bridge->encoder);
+ encoder);
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
@@ -261,10 +261,10 @@ static int ptn3460_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
- if (!ptn_bridge) {
- return -ENOMEM;
- }
+ ptn_bridge = devm_drm_bridge_alloc(dev, struct ptn3460_bridge, bridge,
+ &ptn3460_bridge_funcs);
+ if (IS_ERR(ptn_bridge))
+ return PTR_ERR(ptn_bridge);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -300,7 +300,6 @@ static int ptn3460_probe(struct i2c_client *client)
return ret;
}
- ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
ptn_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
ptn_bridge->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ptn_bridge->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 6e88339dec0f..184a8b7049a7 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -58,6 +59,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
};
static int panel_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
@@ -81,7 +83,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_panel_bridge_set_orientation(connector, bridge);
drm_connector_attach_encoder(&panel_bridge->connector,
- bridge->encoder);
+ encoder);
if (bridge->dev->registered) {
if (connector->funcs->reset)
@@ -109,10 +111,9 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
}
static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -129,10 +130,9 @@ static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -149,10 +149,9 @@ static void panel_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -169,10 +168,9 @@ static void panel_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -290,18 +288,18 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
if (!panel)
return ERR_PTR(-EINVAL);
- panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
- GFP_KERNEL);
- if (!panel_bridge)
- return ERR_PTR(-ENOMEM);
+ panel_bridge = devm_drm_bridge_alloc(panel->dev, struct panel_bridge, bridge,
+ &panel_bridge_bridge_funcs);
+ if (IS_ERR(panel_bridge))
+ return (void *)panel_bridge;
panel_bridge->connector_type = connector_type;
panel_bridge->panel = panel;
- panel_bridge->bridge.funcs = &panel_bridge_bridge_funcs;
panel_bridge->bridge.of_node = panel->dev->of_node;
panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
panel_bridge->bridge.type = connector_type;
+ panel_bridge->bridge.pre_enable_prev_first = panel->prepare_prev_first;
drm_bridge_add(&panel_bridge->bridge);
@@ -322,13 +320,16 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
if (!bridge)
return;
- if (bridge->funcs != &panel_bridge_bridge_funcs)
+ if (!drm_bridge_is_panel(bridge)) {
+ drm_warn(bridge->dev, "%s: called on non-panel bridge!\n", __func__);
return;
+ }
panel_bridge = drm_bridge_to_panel_bridge(bridge);
drm_bridge_remove(bridge);
- devm_kfree(panel_bridge->panel->dev, bridge);
+ /* TODO remove this after reworking panel_bridge lifetime */
+ devm_drm_put_bridge(panel_bridge->panel->dev, bridge);
}
EXPORT_SYMBOL(drm_panel_bridge_remove);
@@ -414,8 +415,6 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
return bridge;
}
- bridge->pre_enable_prev_first = panel->prepare_prev_first;
-
*ptr = bridge;
devres_add(dev, ptr);
@@ -457,8 +456,6 @@ struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
if (ret)
return ERR_PTR(ret);
- bridge->pre_enable_prev_first = panel->prepare_prev_first;
-
return bridge;
}
EXPORT_SYMBOL(drmm_panel_bridge_add);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index ae3ab9262ef1..f879a1df077d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -19,7 +19,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -419,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
}
static int ps8622_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
@@ -449,9 +449,10 @@ static int ps8622_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
- if (!ps8622)
- return -ENOMEM;
+ ps8622 = devm_drm_bridge_alloc(dev, struct ps8622_bridge, bridge,
+ &ps8622_bridge_funcs);
+ if (IS_ERR(ps8622))
+ return PTR_ERR(ps8622);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -509,7 +510,6 @@ static int ps8622_probe(struct i2c_client *client)
ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
}
- ps8622->bridge.funcs = &ps8622_bridge_funcs;
ps8622->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ps8622->bridge.of_node = dev->of_node;
drm_bridge_add(&ps8622->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 14d4dcf239da..825777a5758f 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -20,7 +20,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#define PAGE0_AUXCH_CFG3 0x76
@@ -438,7 +437,7 @@ static const struct dev_pm_ops ps8640_pm_ops = {
};
static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
@@ -473,7 +472,7 @@ static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
}
static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -495,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -519,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the panel-bridge to the dsi bridge */
- ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
if (ret)
goto err_bridge_attach;
@@ -636,9 +636,10 @@ static int ps8640_probe(struct i2c_client *client)
int ret;
u32 i;
- ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
- if (!ps_bridge)
- return -ENOMEM;
+ ps_bridge = devm_drm_bridge_alloc(dev, struct ps8640, bridge,
+ &ps8640_bridge_funcs);
+ if (IS_ERR(ps_bridge))
+ return PTR_ERR(ps_bridge);
mutex_init(&ps_bridge->aux_lock);
@@ -662,7 +663,6 @@ static int ps8640_probe(struct i2c_client *client)
if (IS_ERR(ps_bridge->gpio_reset))
return PTR_ERR(ps_bridge->gpio_reset);
- ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
ps_bridge->bridge.of_node = dev->of_node;
ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index f8b4fb835765..eabc4c32f6ab 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -14,11 +14,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/irq.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/units.h>
#include <video/mipi_display.h>
@@ -29,11 +31,10 @@
/* returns true iff both arguments logically differs */
#define NEQV(a, b) (!(a) ^ !(b))
-/* DSIM_STATUS */
+/* DSIM_STATUS or DSIM_DPHY_STATUS */
#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
#define DSIM_STOP_STATE_CLK BIT(8)
#define DSIM_TX_READY_HS_CLK BIT(10)
-#define DSIM_PLL_STABLE BIT(31)
/* DSIM_SWRST */
#define DSIM_FUNCRST BIT(16)
@@ -44,17 +45,13 @@
#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
/* DSIM_CLKCTRL */
-#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
-#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
-#define DSIM_LANE_ESC_CLK_EN_CLK BIT(19)
-#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
-#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
-#define DSIM_BYTE_CLKEN BIT(24)
-#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
-#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
-#define DSIM_PLL_BYPASS BIT(27)
-#define DSIM_ESC_CLKEN BIT(28)
-#define DSIM_TX_REQUEST_HSCLK BIT(31)
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x, offset) (((x) & 0xf) << offset)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK(offset) (0xf << offset)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS BIT(27)
/* DSIM_CONFIG */
#define DSIM_LANE_EN_CLK BIT(0)
@@ -89,7 +86,6 @@
*/
#define DSIM_HSE_DISABLE_MODE BIT(23)
#define DSIM_AUTO_MODE BIT(24)
-#define DSIM_VIDEO_MODE BIT(25)
#define DSIM_BURST_MODE BIT(26)
#define DSIM_SYNC_INFORM BIT(27)
#define DSIM_EOT_DISABLE BIT(28)
@@ -127,9 +123,9 @@
#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
/* DSIM_MSYNC */
-#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_VSA(x, offset) ((x) << offset)
#define DSIM_MAIN_HSA(x) ((x) << 0)
-#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_VSA_MASK(offset) ((0x3ff) << offset)
#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
/* DSIM_SDRESOL */
@@ -155,6 +151,11 @@
#define DSIM_INT_RX_ECC_ERR BIT(15)
#define DSIM_INT_RX_CRC_ERR BIT(14)
+/* DSIM_SFRCTRL */
+#define DSIM_SFR_CTRL_STAND_BY BIT(4)
+#define DSIM_SFR_CTRL_SHADOW_UPDATE BIT(1)
+#define DSIM_SFR_CTRL_SHADOW_EN BIT(0)
+
/* DSIM_FIFOCTRL */
#define DSIM_RX_DATA_FULL BIT(25)
#define DSIM_RX_DATA_EMPTY BIT(24)
@@ -189,9 +190,7 @@
#define DSIM_PLL_DPDNSWAP_DAT (1 << 24)
#define DSIM_FREQ_BAND(x) ((x) << 24)
#define DSIM_PLL_EN BIT(23)
-#define DSIM_PLL_P(x, offset) ((x) << (offset))
-#define DSIM_PLL_M(x) ((x) << 4)
-#define DSIM_PLL_S(x) ((x) << 1)
+#define DSIM_PLL(x, offset) ((x) << (offset))
/* DSIM_PHYCTRL */
#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
@@ -220,25 +219,42 @@
#define DSI_XFER_TIMEOUT_MS 100
#define DSI_RX_FIFO_EMPTY 0x30800002
-#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-
#define PS_TO_CYCLE(ps, hz) DIV64_U64_ROUND_CLOSEST(((ps) * (hz)), 1000000000000ULL)
-static const char *const clk_names[5] = {
- "bus_clk",
- "sclk_mipi",
- "phyclk_mipidphy0_bitclkdiv8",
- "phyclk_mipidphy0_rxclkesc0",
- "sclk_rgb_vclk_to_dsim0"
-};
-
enum samsung_dsim_transfer_type {
EXYNOS_DSI_TX,
EXYNOS_DSI_RX,
};
+static struct clk_bulk_data exynos3_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "pll_clk" },
+};
+
+static struct clk_bulk_data exynos4_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "sclk_mipi" },
+};
+
+static struct clk_bulk_data exynos5433_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "sclk_mipi" },
+ { .id = "phyclk_mipidphy0_bitclkdiv8" },
+ { .id = "phyclk_mipidphy0_rxclkesc0" },
+ { .id = "sclk_rgb_vclk_to_dsim0" },
+};
+
+static struct clk_bulk_data exynos7870_clk_bulk_data[] = {
+ { .id = "bus" },
+ { .id = "pll" },
+ { .id = "byte" },
+ { .id = "esc" },
+};
+
enum reg_idx {
- DSIM_STATUS_REG, /* Status register */
+ DSIM_STATUS_REG, /* Status register (legacy) */
+ DSIM_LINK_STATUS_REG, /* Link status register */
+ DSIM_DPHY_STATUS_REG, /* D-PHY status register */
DSIM_SWRST_REG, /* Software reset register */
DSIM_CLKCTRL_REG, /* Clock control register */
DSIM_TIMEOUT_REG, /* Time out register */
@@ -253,6 +269,7 @@ enum reg_idx {
DSIM_PKTHDR_REG, /* Packet Header FIFO register */
DSIM_PAYLOAD_REG, /* Payload FIFO register */
DSIM_RXFIFO_REG, /* Read FIFO register */
+ DSIM_SFRCTRL_REG, /* SFR standby and shadow control register */
DSIM_FIFOCTRL_REG, /* FIFO status and control register */
DSIM_PLLCTRL_REG, /* PLL control register */
DSIM_PHYCTRL_REG,
@@ -310,6 +327,32 @@ static const unsigned int exynos5433_reg_ofs[] = {
[DSIM_PHYTIMING2_REG] = 0xBC,
};
+static const unsigned int exynos7870_reg_ofs[] = {
+ [DSIM_LINK_STATUS_REG] = 0x04,
+ [DSIM_DPHY_STATUS_REG] = 0x08,
+ [DSIM_SWRST_REG] = 0x0C,
+ [DSIM_CLKCTRL_REG] = 0x10,
+ [DSIM_TIMEOUT_REG] = 0x14,
+ [DSIM_ESCMODE_REG] = 0x1C,
+ [DSIM_MDRESOL_REG] = 0x20,
+ [DSIM_MVPORCH_REG] = 0x24,
+ [DSIM_MHPORCH_REG] = 0x28,
+ [DSIM_MSYNC_REG] = 0x2C,
+ [DSIM_CONFIG_REG] = 0x30,
+ [DSIM_INTSRC_REG] = 0x34,
+ [DSIM_INTMSK_REG] = 0x38,
+ [DSIM_PKTHDR_REG] = 0x3C,
+ [DSIM_PAYLOAD_REG] = 0x40,
+ [DSIM_RXFIFO_REG] = 0x44,
+ [DSIM_SFRCTRL_REG] = 0x48,
+ [DSIM_FIFOCTRL_REG] = 0x4C,
+ [DSIM_PLLCTRL_REG] = 0x94,
+ [DSIM_PHYCTRL_REG] = 0xA4,
+ [DSIM_PHYTIMING_REG] = 0xB4,
+ [DSIM_PHYTIMING1_REG] = 0xB8,
+ [DSIM_PHYTIMING2_REG] = 0xBC,
+};
+
enum reg_value_idx {
RESET_TYPE,
PLL_TIMER,
@@ -382,6 +425,24 @@ static const unsigned int exynos5433_reg_values[] = {
[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
};
+static const unsigned int exynos7870_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 80000,
+ [STOP_STATE_CNT] = 0xa,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x177),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x08),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2b),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0f),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
+};
+
static const unsigned int imx8mm_dsim_reg_values[] = {
[RESET_TYPE] = DSIM_SWRST,
[PLL_TIMER] = 500,
@@ -403,13 +464,26 @@ static const unsigned int imx8mm_dsim_reg_values[] = {
static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
+ .has_legacy_status_reg = 1,
.has_freqband = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -422,13 +496,26 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
+ .has_legacy_status_reg = 1,
.has_freqband = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos4_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos4_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -441,11 +528,24 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58,
- .num_clks = 2,
+ .has_legacy_status_reg = 1,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -457,12 +557,25 @@ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 5,
+ .clk_data = exynos5433_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos5433_clk_bulk_data),
.max_freq = 1500,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 0,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = exynos5433_reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -474,12 +587,25 @@ static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1500,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = exynos5422_reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -488,19 +614,62 @@ static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
.min_freq = 500,
};
+static const struct samsung_dsim_driver_data exynos7870_dsi_driver_data = {
+ .reg_ofs = exynos7870_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .has_sfrctrl = 1,
+ .clk_data = exynos7870_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos7870_clk_bulk_data),
+ .max_freq = 1500,
+ .wait_for_hdr_fifo = 0,
+ .wait_for_reset = 1,
+ .num_bits_resol = 12,
+ .video_mode_bit = 18,
+ .pll_stable_bit = 24,
+ .esc_clken_bit = 16,
+ .byte_clken_bit = 17,
+ .tx_req_hsclk_bit = 20,
+ .lane_esc_clk_bit = 8,
+ .lane_esc_data_offset = 9,
+ .pll_p_offset = 13,
+ .pll_m_offset = 3,
+ .pll_s_offset = 0,
+ .main_vsa_offset = 16,
+ .reg_values = exynos7870_reg_values,
+ .pll_fin_min = 6,
+ .pll_fin_max = 12,
+ .m_min = 41,
+ .m_max = 125,
+ .min_freq = 500,
+};
+
static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos4_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos4_clk_bulk_data),
.max_freq = 2100,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 0,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
/*
* Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus
* downstream driver - drivers/gpu/drm/bridge/sec-dsim.c
*/
.pll_p_offset = 14,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = imx8mm_dsim_reg_values,
.pll_fin_min = 2,
.pll_fin_max = 30,
@@ -516,6 +685,7 @@ samsung_dsim_types[DSIM_TYPE_COUNT] = {
[DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data,
[DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data,
[DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS7870] = &exynos7870_dsi_driver_data,
[DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data,
[DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data,
};
@@ -557,10 +727,6 @@ static void samsung_dsim_reset(struct samsung_dsim *dsi)
samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val);
}
-#ifndef MHZ
-#define MHZ (1000 * 1000)
-#endif
-
static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
unsigned long fin,
unsigned long fout,
@@ -574,8 +740,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
u16 _m, best_m;
u8 _s, best_s;
- p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * MHZ));
- p_max = fin / (driver_data->pll_fin_min * MHZ);
+ p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * HZ_PER_MHZ));
+ p_max = fin / (driver_data->pll_fin_min * HZ_PER_MHZ);
for (_p = p_min; _p <= p_max; ++_p) {
for (_s = 0; _s <= 5; ++_s) {
@@ -590,8 +756,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
tmp = (u64)_m * fin;
do_div(tmp, _p);
- if (tmp < driver_data->min_freq * MHZ ||
- tmp > driver_data->max_freq * MHZ)
+ if (tmp < driver_data->min_freq * HZ_PER_MHZ ||
+ tmp > driver_data->max_freq * HZ_PER_MHZ)
continue;
tmp = (u64)_m * fin;
@@ -634,7 +800,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
* limit.
*/
fin = clk_get_rate(clk_get_parent(dsi->pll_clk));
- while (fin > driver_data->pll_fin_max * MHZ)
+ while (fin > driver_data->pll_fin_max * HZ_PER_MHZ)
fin /= 2;
clk_set_rate(dsi->pll_clk, fin);
@@ -655,15 +821,17 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
writel(driver_data->reg_values[PLL_TIMER],
dsi->reg_base + driver_data->plltmr_reg);
- reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) |
- DSIM_PLL_M(m) | DSIM_PLL_S(s);
+ reg = DSIM_PLL_EN | DSIM_PLL(p, driver_data->pll_p_offset)
+ | DSIM_PLL(m, driver_data->pll_m_offset)
+ | DSIM_PLL(s, driver_data->pll_s_offset);
if (driver_data->has_freqband) {
static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ 100 * HZ_PER_MHZ, 120 * HZ_PER_MHZ, 160 * HZ_PER_MHZ,
+ 200 * HZ_PER_MHZ, 270 * HZ_PER_MHZ, 320 * HZ_PER_MHZ,
+ 390 * HZ_PER_MHZ, 450 * HZ_PER_MHZ, 510 * HZ_PER_MHZ,
+ 560 * HZ_PER_MHZ, 640 * HZ_PER_MHZ, 690 * HZ_PER_MHZ,
+ 770 * HZ_PER_MHZ, 870 * HZ_PER_MHZ, 950 * HZ_PER_MHZ,
};
int band;
@@ -683,14 +851,17 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
- timeout = 1000;
+ timeout = 3000;
do {
if (timeout-- == 0) {
dev_err(dsi->dev, "PLL failed to stabilize\n");
return 0;
}
- reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
- } while ((reg & DSIM_PLL_STABLE) == 0);
+ if (driver_data->has_legacy_status_reg)
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ else
+ reg = samsung_dsim_read(dsi, DSIM_LINK_STATUS_REG);
+ } while ((reg & BIT(driver_data->pll_stable_bit)) == 0);
dsi->hs_clock = fout;
@@ -699,6 +870,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
unsigned long hs_clk, byte_clk, esc_clk, pix_clk;
unsigned long esc_div;
u32 reg;
@@ -723,7 +895,7 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
esc_clk = byte_clk / esc_div;
- if (esc_clk > 20 * MHZ) {
+ if (esc_clk > 20 * HZ_PER_MHZ) {
++esc_div;
esc_clk = byte_clk / esc_div;
}
@@ -732,15 +904,17 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
hs_clk, byte_clk, esc_clk);
reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
- | DSIM_BYTE_CLK_SRC_MASK);
- reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
- | DSIM_ESC_PRESCALER(esc_div)
- | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
- | DSIM_BYTE_CLK_SRC(0)
- | DSIM_TX_REQUEST_HSCLK;
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK(driver_data->lane_esc_data_offset)
+ | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= BIT(driver_data->esc_clken_bit) | BIT(driver_data->byte_clken_bit)
+ | DSIM_ESC_PRESCALER(esc_div)
+ | BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1,
+ driver_data->lane_esc_data_offset)
+ | DSIM_BYTE_CLK_SRC(0)
+ | BIT(driver_data->tx_req_hsclk_bit);
samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
return 0;
@@ -844,11 +1018,14 @@ static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
static void samsung_dsim_disable_clock(struct samsung_dsim *dsi)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
u32 reg;
reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
- | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ reg &= ~(BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK(driver_data->lane_esc_data_offset)
+ | BIT(driver_data->esc_clken_bit)
+ | BIT(driver_data->byte_clken_bit));
samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG);
@@ -892,14 +1069,12 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* mode, otherwise it will support command mode.
*/
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg |= DSIM_VIDEO_MODE;
+ reg |= BIT(driver_data->video_mode_bit);
/*
* The user manual describes that following bits are ignored in
* command mode.
*/
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
- reg |= DSIM_MFLUSH_VS;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
reg |= DSIM_SYNC_INFORM;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -965,7 +1140,10 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
return -EFAULT;
}
- reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ if (driver_data->has_legacy_status_reg)
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ else
+ reg = samsung_dsim_read(dsi, DSIM_DPHY_STATUS_REG);
if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
!= DSIM_STOP_STATE_DAT(lanes_mask))
continue;
@@ -986,6 +1164,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
{
struct drm_display_mode *m = &dsi->mode;
unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
+ unsigned int main_vsa_offset = dsi->driver_data->main_vsa_offset;
u32 reg;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
@@ -1012,7 +1191,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
reg = DSIM_MAIN_HFP(hfp) | DSIM_MAIN_HBP(hbp);
samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg);
- reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
+ reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start, main_vsa_offset)
| DSIM_MAIN_HSA(hsa);
samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg);
}
@@ -1026,6 +1205,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
u32 reg;
reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG);
@@ -1034,6 +1214,15 @@ static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enabl
else
reg &= ~DSIM_MAIN_STAND_BY;
samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+
+ if (driver_data->has_sfrctrl) {
+ reg = samsung_dsim_read(dsi, DSIM_SFRCTRL_REG);
+ if (enable)
+ reg |= DSIM_SFR_CTRL_STAND_BY;
+ else
+ reg &= ~DSIM_SFR_CTRL_STAND_BY;
+ samsung_dsim_write(dsi, DSIM_SFRCTRL_REG, reg);
+ }
}
static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
@@ -1090,12 +1279,13 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
{
struct device *dev = dsi->dev;
struct mipi_dsi_packet *pkt = &xfer->packet;
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
const u8 *payload = pkt->payload + xfer->tx_done;
u16 length = pkt->payload_length - xfer->tx_done;
bool first = !xfer->tx_done;
u32 reg;
- dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
+ dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
if (length > DSI_TX_FIFO_SIZE)
@@ -1130,9 +1320,11 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
return;
reg = get_unaligned_le32(pkt->header);
- if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
- dev_err(dev, "waiting for header FIFO timed out\n");
- return;
+ if (driver_data->wait_for_hdr_fifo) {
+ if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
}
if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
@@ -1235,43 +1427,34 @@ static void samsung_dsim_transfer_start(struct samsung_dsim *dsi)
{
unsigned long flags;
struct samsung_dsim_transfer *xfer;
- bool start = false;
-again:
spin_lock_irqsave(&dsi->transfer_lock, flags);
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return;
- }
-
- xfer = list_first_entry(&dsi->transfer_list,
- struct samsung_dsim_transfer, list);
+ while (!list_empty(&dsi->transfer_list)) {
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- if (xfer->packet.payload_length &&
- xfer->tx_done == xfer->packet.payload_length)
- /* waiting for RX */
- return;
+ if (xfer->packet.payload_length &&
+ xfer->tx_done == xfer->packet.payload_length)
+ /* waiting for RX */
+ return;
- samsung_dsim_send_to_fifo(dsi, xfer);
+ samsung_dsim_send_to_fifo(dsi, xfer);
- if (xfer->packet.payload_length || xfer->rx_len)
- return;
+ if (xfer->packet.payload_length || xfer->rx_len)
+ return;
- xfer->result = 0;
- complete(&xfer->completed);
+ xfer->result = 0;
+ complete(&xfer->completed);
- spin_lock_irqsave(&dsi->transfer_lock, flags);
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
+ list_del_init(&xfer->list);
+ }
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (start)
- goto again;
}
static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
@@ -1293,7 +1476,7 @@ static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
dev_dbg(dsi->dev,
- "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+ "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
xfer->rx_done);
@@ -1457,7 +1640,7 @@ static int samsung_dsim_init(struct samsung_dsim *dsi)
}
static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
int ret;
@@ -1485,7 +1668,7 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
@@ -1496,7 +1679,7 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
@@ -1508,7 +1691,7 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
@@ -1640,11 +1823,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge,
}
static int samsung_dsim_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge, bridge,
flags);
}
@@ -1933,11 +2117,11 @@ int samsung_dsim_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct samsung_dsim *dsi;
- int ret, i;
+ int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
@@ -1957,23 +2141,11 @@ int samsung_dsim_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks,
- sizeof(*dsi->clks), GFP_KERNEL);
- if (!dsi->clks)
- return -ENOMEM;
-
- for (i = 0; i < dsi->driver_data->num_clks; i++) {
- dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
- if (IS_ERR(dsi->clks[i])) {
- if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME);
- if (!IS_ERR(dsi->clks[i]))
- continue;
- }
-
- dev_info(dev, "failed to get the clock: %s\n", clk_names[i]);
- return PTR_ERR(dsi->clks[i]);
- }
+ ret = devm_clk_bulk_get(dev, dsi->driver_data->num_clks,
+ dsi->driver_data->clk_data);
+ if (ret) {
+ dev_err(dev, "failed to get clocks in bulk (%d)\n", ret);
+ return ret;
}
dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
@@ -2007,7 +2179,6 @@ int samsung_dsim_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
@@ -2047,7 +2218,7 @@ static int samsung_dsim_suspend(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
- int ret, i;
+ int ret;
usleep_range(10000, 20000);
@@ -2063,8 +2234,7 @@ static int samsung_dsim_suspend(struct device *dev)
phy_power_off(dsi->phy);
- for (i = driver_data->num_clks - 1; i > -1; i--)
- clk_disable_unprepare(dsi->clks[i]);
+ clk_bulk_disable_unprepare(driver_data->num_clks, driver_data->clk_data);
ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
if (ret < 0)
@@ -2077,7 +2247,7 @@ static int samsung_dsim_resume(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
- int ret, i;
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
if (ret < 0) {
@@ -2085,11 +2255,9 @@ static int samsung_dsim_resume(struct device *dev)
return ret;
}
- for (i = 0; i < driver_data->num_clks; i++) {
- ret = clk_prepare_enable(dsi->clks[i]);
- if (ret < 0)
- goto err_clk;
- }
+ ret = clk_bulk_prepare_enable(driver_data->num_clks, driver_data->clk_data);
+ if (ret < 0)
+ goto err_clk;
ret = phy_power_on(dsi->phy);
if (ret < 0) {
@@ -2100,8 +2268,7 @@ static int samsung_dsim_resume(struct device *dev)
return 0;
err_clk:
- while (--i > -1)
- clk_disable_unprepare(dsi->clks[i]);
+ clk_bulk_disable_unprepare(driver_data->num_clks, driver_data->clk_data);
regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
return ret;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index bf2d1632b020..1f0aba28ad1e 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -179,7 +179,6 @@ struct sii902x {
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
- bool sink_is_hdmi;
u32 bus_width;
/*
@@ -315,8 +314,6 @@ static int sii902x_get_modes(struct drm_connector *connector)
drm_edid_free(drm_edid);
}
- sii902x->sink_is_hdmi = connector->display_info.is_hdmi;
-
return num;
}
@@ -325,7 +322,7 @@ static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs =
};
static void sii902x_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -339,12 +336,20 @@ static void sii902x_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void sii902x_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ struct drm_connector *connector;
+ u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (connector && connector->display_info.is_hdmi)
+ output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
mutex_lock(&sii902x->mutex);
+ regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL,
SII902X_AVI_POWER_STATE_MSK,
SII902X_AVI_POWER_STATE_D(0));
@@ -359,16 +364,12 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *adj)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
- u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
struct regmap *regmap = sii902x->regmap;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
u16 pixel_clock_10kHz = adj->clock / 10;
int ret;
- if (sii902x->sink_is_hdmi)
- output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
-
buf[0] = pixel_clock_10kHz & 0xff;
buf[1] = pixel_clock_10kHz >> 8;
buf[2] = drm_mode_vrefresh(adj);
@@ -384,11 +385,6 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&sii902x->mutex);
- ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
- if (ret)
- goto out;
-
ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
if (ret)
goto out;
@@ -416,6 +412,7 @@ out:
}
static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -424,7 +421,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
+ return drm_bridge_attach(encoder, sii902x->next_bridge,
bridge, flags);
drm_connector_helper_add(&sii902x->connector,
@@ -452,12 +449,13 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, encoder);
return 0;
}
-static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+sii902x_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -887,7 +885,7 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
lanes[0] = 0;
} else if (num_lanes < 0) {
dev_err(dev,
- "%s: Error gettin \"sil,i2s-data-lanes\": %d\n",
+ "%s: Error getting \"sil,i2s-data-lanes\": %d\n",
__func__, num_lanes);
return num_lanes;
}
@@ -1134,10 +1132,10 @@ static int sii902x_init(struct sii902x *sii902x)
if (ret)
goto err_unreg_audio;
- sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ sii902x->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
if (sii902x->i2c->irq > 0)
sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
@@ -1168,9 +1166,9 @@ static int sii902x_probe(struct i2c_client *client)
return -EIO;
}
- sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
- if (!sii902x)
- return -ENOMEM;
+ sii902x = devm_drm_bridge_alloc(dev, struct sii902x, bridge, &sii902x_bridge_funcs);
+ if (IS_ERR(sii902x))
+ return PTR_ERR(sii902x);
sii902x->i2c = client;
sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index cd7837c9a6e0..bb1bed03eb5b 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -888,9 +888,10 @@ static int sii9234_probe(struct i2c_client *client)
struct device *dev = &client->dev;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii9234, bridge,
+ &sii9234_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -921,7 +922,6 @@ static int sii9234_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii9234_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 28a2e1ee04b2..9e48ad39e1cc 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
}
static int sii8620_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
@@ -2290,9 +2291,10 @@ static int sii8620_probe(struct i2c_client *client)
struct sii8620 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii8620, bridge,
+ &sii8620_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -2335,7 +2337,6 @@ static int sii8620_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii8620_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index ab0b0e36e97a..2cd1847ba776 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -90,7 +90,7 @@ simple_bridge_connector_detect(struct drm_connector *connector, bool force)
{
struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
- return drm_bridge_detect(sbridge->next_bridge);
+ return drm_bridge_detect(sbridge->next_bridge, connector);
}
static const struct drm_connector_funcs simple_bridge_con_funcs = {
@@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = {
};
static int simple_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(&sbridge->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sbridge->connector, encoder);
return 0;
}
@@ -167,9 +168,10 @@ static int simple_bridge_probe(struct platform_device *pdev)
struct simple_bridge *sbridge;
struct device_node *remote;
- sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
- if (!sbridge)
- return -ENOMEM;
+ sbridge = devm_drm_bridge_alloc(&pdev->dev, struct simple_bridge,
+ bridge, &simple_bridge_bridge_funcs);
+ if (IS_ERR(sbridge))
+ return PTR_ERR(sbridge);
sbridge->info = of_device_get_match_data(&pdev->dev);
@@ -203,7 +205,6 @@ static int simple_bridge_probe(struct platform_device *pdev)
"Unable to retrieve enable GPIO\n");
/* Register the bridge. */
- sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
sbridge->bridge.of_node = pdev->dev.of_node;
sbridge->bridge.timings = sbridge->info->timings;
@@ -261,6 +262,26 @@ static const struct of_device_id simple_bridge_match[] = {
.connector_type = DRM_MODE_CONNECTOR_VGA,
},
}, {
+ .compatible = "asl-tek,cs5263",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
+ .compatible = "parade,ps185hdm",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
+ .compatible = "radxa,ra620",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
+ .compatible = "realtek,rtd2171",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
.compatible = "ti,opa362",
.data = &(const struct simple_bridge_info) {
.connector_type = DRM_MODE_CONNECTOR_Composite,
diff --git a/drivers/gpu/drm/bridge/ssd2825.c b/drivers/gpu/drm/bridge/ssd2825.c
new file mode 100644
index 000000000000..f2fdbf7c117d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ssd2825.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+
+#define SSD2825_DEVICE_ID_REG 0xb0
+#define SSD2825_RGB_INTERFACE_CTRL_REG_1 0xb1
+#define SSD2825_RGB_INTERFACE_CTRL_REG_2 0xb2
+#define SSD2825_RGB_INTERFACE_CTRL_REG_3 0xb3
+#define SSD2825_RGB_INTERFACE_CTRL_REG_4 0xb4
+#define SSD2825_RGB_INTERFACE_CTRL_REG_5 0xb5
+#define SSD2825_RGB_INTERFACE_CTRL_REG_6 0xb6
+#define SSD2825_NON_BURST_EV BIT(2)
+#define SSD2825_BURST BIT(3)
+#define SSD2825_PCKL_HIGH BIT(13)
+#define SSD2825_HSYNC_HIGH BIT(14)
+#define SSD2825_VSYNC_HIGH BIT(15)
+#define SSD2825_CONFIGURATION_REG 0xb7
+#define SSD2825_CONF_REG_HS BIT(0)
+#define SSD2825_CONF_REG_CKE BIT(1)
+#define SSD2825_CONF_REG_SLP BIT(2)
+#define SSD2825_CONF_REG_VEN BIT(3)
+#define SSD2825_CONF_REG_HCLK BIT(4)
+#define SSD2825_CONF_REG_CSS BIT(5)
+#define SSD2825_CONF_REG_DCS BIT(6)
+#define SSD2825_CONF_REG_REN BIT(7)
+#define SSD2825_CONF_REG_ECD BIT(8)
+#define SSD2825_CONF_REG_EOT BIT(9)
+#define SSD2825_CONF_REG_LPE BIT(10)
+#define SSD2825_VC_CTRL_REG 0xb8
+#define SSD2825_PLL_CTRL_REG 0xb9
+#define SSD2825_PLL_CONFIGURATION_REG 0xba
+#define SSD2825_CLOCK_CTRL_REG 0xbb
+#define SSD2825_PACKET_SIZE_CTRL_REG_1 0xbc
+#define SSD2825_PACKET_SIZE_CTRL_REG_2 0xbd
+#define SSD2825_PACKET_SIZE_CTRL_REG_3 0xbe
+#define SSD2825_PACKET_DROP_REG 0xbf
+#define SSD2825_OPERATION_CTRL_REG 0xc0
+#define SSD2825_MAX_RETURN_SIZE_REG 0xc1
+#define SSD2825_RETURN_DATA_COUNT_REG 0xc2
+#define SSD2825_ACK_RESPONSE_REG 0xc3
+#define SSD2825_LINE_CTRL_REG 0xc4
+#define SSD2825_INTERRUPT_CTRL_REG 0xc5
+#define SSD2825_INTERRUPT_STATUS_REG 0xc6
+#define SSD2825_ERROR_STATUS_REG 0xc7
+#define SSD2825_DATA_FORMAT_REG 0xc8
+#define SSD2825_DELAY_ADJ_REG_1 0xc9
+#define SSD2825_DELAY_ADJ_REG_2 0xca
+#define SSD2825_DELAY_ADJ_REG_3 0xcb
+#define SSD2825_DELAY_ADJ_REG_4 0xcc
+#define SSD2825_DELAY_ADJ_REG_5 0xcd
+#define SSD2825_DELAY_ADJ_REG_6 0xce
+#define SSD2825_HS_TX_TIMER_REG_1 0xcf
+#define SSD2825_HS_TX_TIMER_REG_2 0xd0
+#define SSD2825_LP_RX_TIMER_REG_1 0xd1
+#define SSD2825_LP_RX_TIMER_REG_2 0xd2
+#define SSD2825_TE_STATUS_REG 0xd3
+#define SSD2825_SPI_READ_REG 0xd4
+#define SSD2825_SPI_READ_REG_RESET 0xfa
+#define SSD2825_PLL_LOCK_REG 0xd5
+#define SSD2825_TEST_REG 0xd6
+#define SSD2825_TE_COUNT_REG 0xd7
+#define SSD2825_ANALOG_CTRL_REG_1 0xd8
+#define SSD2825_ANALOG_CTRL_REG_2 0xd9
+#define SSD2825_ANALOG_CTRL_REG_3 0xda
+#define SSD2825_ANALOG_CTRL_REG_4 0xdb
+#define SSD2825_INTERRUPT_OUT_CTRL_REG 0xdc
+#define SSD2825_RGB_INTERFACE_CTRL_REG_7 0xdd
+#define SSD2825_LANE_CONFIGURATION_REG 0xde
+#define SSD2825_DELAY_ADJ_REG_7 0xdf
+#define SSD2825_INPUT_PIN_CTRL_REG_1 0xe0
+#define SSD2825_INPUT_PIN_CTRL_REG_2 0xe1
+#define SSD2825_BIDIR_PIN_CTRL_REG_1 0xe2
+#define SSD2825_BIDIR_PIN_CTRL_REG_2 0xe3
+#define SSD2825_BIDIR_PIN_CTRL_REG_3 0xe4
+#define SSD2825_BIDIR_PIN_CTRL_REG_4 0xe5
+#define SSD2825_BIDIR_PIN_CTRL_REG_5 0xe6
+#define SSD2825_BIDIR_PIN_CTRL_REG_6 0xe7
+#define SSD2825_BIDIR_PIN_CTRL_REG_7 0xe8
+#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_1 0xe9
+#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_2 0xea
+#define SSD2825_CABC_BRIGHTNESS_STATUS_REG 0xeb
+#define SSD2825_READ_REG 0xff
+
+#define SSD2825_COM_BYTE 0x00
+#define SSD2825_DAT_BYTE 0x01
+
+#define SSD2828_LP_CLOCK_DIVIDER(n) (((n) - 1) & 0x3f)
+#define SSD2825_LP_MIN_CLK 5000 /* KHz */
+#define SSD2825_REF_MIN_CLK 2000 /* KHz */
+
+static const struct regulator_bulk_data ssd2825_supplies[] = {
+ { .supply = "dvdd" },
+ { .supply = "avdd" },
+ { .supply = "vddio" },
+};
+
+struct ssd2825_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+};
+
+struct ssd2825_priv {
+ struct spi_device *spi;
+ struct device *dev;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+
+ struct clk *tx_clk;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct ssd2825_dsi_output output;
+
+ struct mutex mlock; /* for host transfer operations */
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
+
+ /* Parameters for PLL programming */
+ u32 pll_freq_kbps; /* PLL in kbps */
+ u32 nibble_freq_khz; /* PLL div by 4 */
+
+ u32 hzd; /* HS Zero Delay in ns*/
+ u32 hpd; /* HS Prepare Delay is ns */
+};
+
+static inline struct ssd2825_priv *dsi_host_to_ssd2825(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct ssd2825_priv, dsi_host);
+}
+
+static inline struct ssd2825_priv *bridge_to_ssd2825(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ssd2825_priv, bridge);
+}
+
+static int ssd2825_write_raw(struct ssd2825_priv *priv, u8 high_byte, u8 low_byte)
+{
+ struct spi_device *spi = priv->spi;
+ u8 tx_buf[2];
+
+ /*
+ * Low byte is the value, high byte defines type of
+ * write cycle, 0 for command and 1 for data.
+ */
+ tx_buf[0] = low_byte;
+ tx_buf[1] = high_byte;
+
+ return spi_write(spi, tx_buf, 2);
+}
+
+static int ssd2825_write_reg(struct ssd2825_priv *priv, u8 reg, u16 command)
+{
+ u8 datal = (command & 0x00FF);
+ u8 datah = (command & 0xFF00) >> 8;
+ int ret;
+
+ /* Command write cycle */
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg);
+ if (ret)
+ return ret;
+
+ /* Data write cycle bits 7-0 */
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datal);
+ if (ret)
+ return ret;
+
+ /* Data write cycle bits 15-8 */
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datah);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ssd2825_write_dsi(struct ssd2825_priv *priv, const u8 *command, int len)
+{
+ int ret, i;
+
+ ret = ssd2825_write_reg(priv, SSD2825_PACKET_SIZE_CTRL_REG_1, len);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, SSD2825_PACKET_DROP_REG);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; i++) {
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, command[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ssd2825_read_raw(struct ssd2825_priv *priv, u8 cmd, u16 *data)
+{
+ struct spi_device *spi = priv->spi;
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ u8 tx_buf[2];
+ u8 rx_buf[2];
+ int ret;
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ tx_buf[1] = (cmd & 0xFF00) >> 8;
+ tx_buf[0] = (cmd & 0x00FF);
+
+ xfer[0].tx_buf = tx_buf;
+ xfer[0].bits_per_word = 9;
+ xfer[0].len = 2;
+
+ xfer[1].rx_buf = rx_buf;
+ xfer[1].bits_per_word = 16;
+ xfer[1].len = 2;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+ spi_message_add_tail(&xfer[1], &msg);
+
+ ret = spi_sync(spi, &msg);
+ if (ret) {
+ dev_err(&spi->dev, "ssd2825 read raw failed %d\n", ret);
+ return ret;
+ }
+
+ *data = rx_buf[1] | (rx_buf[0] << 8);
+
+ return 0;
+}
+
+static int ssd2825_read_reg(struct ssd2825_priv *priv, u8 reg, u16 *data)
+{
+ int ret;
+
+ /* Reset the read register */
+ ret = ssd2825_write_reg(priv, SSD2825_SPI_READ_REG, SSD2825_SPI_READ_REG_RESET);
+ if (ret)
+ return ret;
+
+ /* Push the address to read */
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg);
+ if (ret)
+ return ret;
+
+ /* Perform a reading cycle */
+ ret = ssd2825_read_raw(priv, SSD2825_SPI_READ_REG_RESET, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ssd2825_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct device_node *ep;
+ int ret;
+
+ if (dev->lanes > 4) {
+ dev_err(priv->dev, "unsupported number of data lanes(%u)\n", dev->lanes);
+ return -EINVAL;
+ }
+
+ /*
+ * ssd2825 supports both Video and Pulse mode, but the driver only
+ * implements Video (event) mode currently
+ */
+ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel, &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ priv->output.dev = dev;
+ priv->output.bridge = bridge;
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+ ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+ if (ep) {
+ ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines);
+ of_node_put(ep);
+ }
+
+ if (ret)
+ priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ drm_bridge_add(&priv->bridge);
+
+ return 0;
+}
+
+static int ssd2825_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+
+ drm_bridge_remove(&priv->bridge);
+ if (priv->output.panel)
+ drm_panel_bridge_remove(priv->output.bridge);
+
+ return 0;
+}
+
+static ssize_t ssd2825_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+ u16 config;
+ int ret;
+
+ if (msg->rx_len) {
+ dev_warn(priv->dev, "MIPI rx is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ guard(mutex)(&priv->mlock);
+
+ ret = ssd2825_read_reg(priv, SSD2825_CONFIGURATION_REG, &config);
+ if (ret)
+ return ret;
+
+ switch (msg->type) {
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ config |= SSD2825_CONF_REG_DCS;
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ config &= ~SSD2825_CONF_REG_DCS;
+ break;
+ case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ default:
+ return 0;
+ }
+
+ ret = ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_dsi(priv, msg->tx_buf, msg->tx_len);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops ssd2825_dsi_host_ops = {
+ .attach = ssd2825_dsi_host_attach,
+ .detach = ssd2825_dsi_host_detach,
+ .transfer = ssd2825_dsi_host_transfer,
+};
+
+static void ssd2825_hw_reset(struct ssd2825_priv *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(5000, 6000);
+}
+
+/*
+ * PLL configuration register settings.
+ *
+ * See the "PLL Configuration Register Description" in the SSD2825 datasheet.
+ */
+static u16 construct_pll_config(struct ssd2825_priv *priv,
+ u32 desired_pll_freq_kbps, u32 reference_freq_khz)
+{
+ u32 div_factor = 1, mul_factor, fr = 0;
+
+ while (reference_freq_khz / (div_factor + 1) >= SSD2825_REF_MIN_CLK)
+ div_factor++;
+ if (div_factor > 31)
+ div_factor = 31;
+
+ mul_factor = DIV_ROUND_UP(desired_pll_freq_kbps * div_factor,
+ reference_freq_khz);
+
+ priv->pll_freq_kbps = reference_freq_khz * mul_factor / div_factor;
+ priv->nibble_freq_khz = priv->pll_freq_kbps / 4;
+
+ if (priv->pll_freq_kbps >= 501000)
+ fr = 3;
+ else if (priv->pll_freq_kbps >= 251000)
+ fr = 2;
+ else if (priv->pll_freq_kbps >= 126000)
+ fr = 1;
+
+ return (fr << 14) | (div_factor << 8) | mul_factor;
+}
+
+static int ssd2825_setup_pll(struct ssd2825_priv *priv,
+ const struct drm_display_mode *mode)
+{
+ u16 pll_config, lp_div;
+ u32 nibble_delay, pclk_mult, tx_freq_khz;
+ u8 hzd, hpd;
+
+ tx_freq_khz = clk_get_rate(priv->tx_clk) / KILO;
+ if (!tx_freq_khz)
+ tx_freq_khz = SSD2825_REF_MIN_CLK;
+
+ pclk_mult = priv->pd_lines / priv->dsi_lanes + 1;
+ pll_config = construct_pll_config(priv, pclk_mult * mode->clock,
+ tx_freq_khz);
+
+ lp_div = priv->pll_freq_kbps / (SSD2825_LP_MIN_CLK * 8);
+
+ /* nibble_delay in nanoseconds */
+ nibble_delay = MICRO / priv->nibble_freq_khz;
+
+ hzd = priv->hzd / nibble_delay;
+ hpd = (priv->hpd - 4 * nibble_delay) / nibble_delay;
+
+ /* Disable PLL */
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0000);
+ ssd2825_write_reg(priv, SSD2825_LINE_CTRL_REG, 0x0001);
+
+ /* Set delays */
+ ssd2825_write_reg(priv, SSD2825_DELAY_ADJ_REG_1, (hzd << 8) | hpd);
+
+ /* Set PLL coefficients */
+ ssd2825_write_reg(priv, SSD2825_PLL_CONFIGURATION_REG, pll_config);
+
+ /* Clock Control Register */
+ ssd2825_write_reg(priv, SSD2825_CLOCK_CTRL_REG,
+ SSD2828_LP_CLOCK_DIVIDER(lp_div));
+
+ /* Enable PLL */
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ return 0;
+}
+
+static void ssd2825_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ const struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ u32 input_bus_flags = bridge->timings->input_bus_flags;
+ u16 flags = 0, config;
+ u8 pixel_format;
+ int ret;
+
+ /* Power Sequence */
+ ret = clk_prepare_enable(priv->tx_clk);
+ if (ret)
+ dev_err(priv->dev, "error enabling tx_clk (%d)\n", ret);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ssd2825_supplies), priv->supplies);
+ if (ret)
+ dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+ usleep_range(1000, 2000);
+
+ ssd2825_hw_reset(priv);
+
+ /* Perform SW reset */
+ ssd2825_write_reg(priv, SSD2825_OPERATION_CTRL_REG, 0x0100);
+
+ /* Set pixel format */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB565:
+ pixel_format = 0x00;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ pixel_format = 0x01;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ pixel_format = 0x02;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ pixel_format = 0x03;
+ break;
+ }
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ mode = &crtc_state->adjusted_mode;
+
+ /* Set panel timings */
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_1,
+ ((mode->vtotal - mode->vsync_end) << 8) |
+ (mode->htotal - mode->hsync_end));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_2,
+ ((mode->vtotal - mode->vsync_start) << 8) |
+ (mode->htotal - mode->hsync_start));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_3,
+ ((mode->vsync_start - mode->vdisplay) << 8) |
+ (mode->hsync_start - mode->hdisplay));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_4, mode->hdisplay);
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_5, mode->vdisplay);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ flags |= SSD2825_HSYNC_HIGH;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ flags |= SSD2825_VSYNC_HIGH;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+ flags |= SSD2825_NON_BURST_EV;
+
+ if (input_bus_flags & DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE)
+ flags |= SSD2825_PCKL_HIGH;
+
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_6, flags | pixel_format);
+ ssd2825_write_reg(priv, SSD2825_LANE_CONFIGURATION_REG, dsi_dev->lanes - 1);
+ ssd2825_write_reg(priv, SSD2825_TEST_REG, 0x0004);
+
+ /* Call PLL configuration */
+ ssd2825_setup_pll(priv, mode);
+
+ usleep_range(10000, 11000);
+
+ config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_CKE | SSD2825_CONF_REG_DCS |
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_LPM)
+ config &= ~SSD2825_CONF_REG_HS;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ config &= ~SSD2825_CONF_REG_EOT;
+
+ /* Initial DSI configuration register set */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ if (priv->output.panel)
+ drm_panel_enable(priv->output.panel);
+}
+
+static void ssd2825_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ u16 config;
+
+ config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_DCS |
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+ config |= SSD2825_CONF_REG_VEN;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ config &= ~SSD2825_CONF_REG_EOT;
+
+ /* Complete configuration after DSI commands were sent */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000);
+}
+
+static void ssd2825_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ int ret;
+
+ msleep(100);
+
+ /* Exit DSI configuration register set */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG,
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ /* HW disable */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ssd2825_supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+ clk_disable_unprepare(priv->tx_clk);
+}
+
+static int ssd2825_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+
+ return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ flags);
+}
+
+static enum drm_mode_status
+ssd2825_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->hdisplay > 1366)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > 1366)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static bool ssd2825_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Default to positive sync */
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+
+ return true;
+}
+
+static const struct drm_bridge_funcs ssd2825_bridge_funcs = {
+ .attach = ssd2825_bridge_attach,
+ .mode_valid = ssd2825_bridge_mode_valid,
+ .mode_fixup = ssd2825_mode_fixup,
+
+ .atomic_pre_enable = ssd2825_bridge_atomic_pre_enable,
+ .atomic_enable = ssd2825_bridge_atomic_enable,
+ .atomic_disable = ssd2825_bridge_atomic_disable,
+
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+};
+
+static const struct drm_bridge_timings default_ssd2825_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static int ssd2825_probe(struct spi_device *spi)
+{
+ struct ssd2825_priv *priv;
+ struct device *dev = &spi->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /* Driver supports only 8 bit 3 Wire mode */
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ priv = devm_drm_bridge_alloc(dev, struct ssd2825_priv, bridge, &ssd2825_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ spi_set_drvdata(spi, priv);
+
+ priv->spi = spi;
+ priv->dev = dev;
+
+ mutex_init(&priv->mlock);
+
+ priv->tx_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->tx_clk),
+ "can't retrieve bridge tx_clk\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(ssd2825_supplies),
+ ssd2825_supplies, &priv->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ priv->hzd = 133; /* ns */
+ device_property_read_u32(dev, "solomon,hs-zero-delay-ns", &priv->hzd);
+
+ priv->hpd = 40; /* ns */
+ device_property_read_u32(dev, "solomon,hs-prep-delay-ns", &priv->hpd);
+
+ priv->dsi_host.dev = dev;
+ priv->dsi_host.ops = &ssd2825_dsi_host_ops;
+
+ priv->bridge.timings = &default_ssd2825_timings;
+ priv->bridge.of_node = np;
+
+ return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static void ssd2825_remove(struct spi_device *spi)
+{
+ struct ssd2825_priv *priv = spi_get_drvdata(spi);
+
+ mipi_dsi_host_unregister(&priv->dsi_host);
+}
+
+static const struct of_device_id ssd2825_of_match[] = {
+ { .compatible = "solomon,ssd2825" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ssd2825_of_match);
+
+static struct spi_driver ssd2825_driver = {
+ .driver = {
+ .name = "ssd2825",
+ .of_match_table = ssd2825_of_match,
+ },
+ .probe = ssd2825_probe,
+ .remove = ssd2825_remove,
+};
+module_spi_driver(ssd2825_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Solomon SSD2825 RGB to MIPI-DSI bridge driver SPI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index f3ab2f985f8c..a46df7583bcf 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,4 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_DW_DP
+ tristate
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_MMIO
+
config DRM_DW_HDMI
tristate
select DRM_DISPLAY_HDMI_HELPER
@@ -54,6 +61,14 @@ config DRM_DW_HDMI_QP
select DRM_KMS_HELPER
select REGMAP_MMIO
+config DRM_DW_HDMI_QP_CEC
+ bool "Synopsis Designware QP CEC interface"
+ depends on DRM_DW_HDMI_QP
+ select DRM_DISPLAY_HDMI_CEC_HELPER
+ help
+ Support the CEC interface which is part of the Synopsys
+ Designware HDMI QP block.
+
config DRM_DW_MIPI_DSI
tristate
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 9dc376d220ad..4dada44029ac 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_DW_DP) += dw-dp.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
new file mode 100644
index 000000000000..82aaf74e1bc0
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
@@ -0,0 +1,2097 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare Cores DisplayPort Transmitter Controller
+ *
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/media-bus-format.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/unaligned.h>
+
+#include <drm/bridge/dw_dp.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define DW_DP_VERSION_NUMBER 0x0000
+#define DW_DP_VERSION_TYPE 0x0004
+#define DW_DP_ID 0x0008
+
+#define DW_DP_CONFIG_REG1 0x0100
+#define DW_DP_CONFIG_REG2 0x0104
+#define DW_DP_CONFIG_REG3 0x0108
+
+#define DW_DP_CCTL 0x0200
+#define FORCE_HPD BIT(4)
+#define DEFAULT_FAST_LINK_TRAIN_EN BIT(2)
+#define ENHANCE_FRAMING_EN BIT(1)
+#define SCRAMBLE_DIS BIT(0)
+#define DW_DP_SOFT_RESET_CTRL 0x0204
+#define VIDEO_RESET BIT(5)
+#define AUX_RESET BIT(4)
+#define AUDIO_SAMPLER_RESET BIT(3)
+#define HDCP_MODULE_RESET BIT(2)
+#define PHY_SOFT_RESET BIT(1)
+#define CONTROLLER_RESET BIT(0)
+
+#define DW_DP_VSAMPLE_CTRL 0x0300
+#define PIXEL_MODE_SELECT GENMASK(22, 21)
+#define VIDEO_MAPPING GENMASK(20, 16)
+#define VIDEO_STREAM_ENABLE BIT(5)
+
+#define DW_DP_VSAMPLE_STUFF_CTRL1 0x0304
+
+#define DW_DP_VSAMPLE_STUFF_CTRL2 0x0308
+
+#define DW_DP_VINPUT_POLARITY_CTRL 0x030c
+#define DE_IN_POLARITY BIT(2)
+#define HSYNC_IN_POLARITY BIT(1)
+#define VSYNC_IN_POLARITY BIT(0)
+
+#define DW_DP_VIDEO_CONFIG1 0x0310
+#define HACTIVE GENMASK(31, 16)
+#define HBLANK GENMASK(15, 2)
+#define I_P BIT(1)
+#define R_V_BLANK_IN_OSC BIT(0)
+
+#define DW_DP_VIDEO_CONFIG2 0x0314
+#define VBLANK GENMASK(31, 16)
+#define VACTIVE GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG3 0x0318
+#define H_SYNC_WIDTH GENMASK(31, 16)
+#define H_FRONT_PORCH GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG4 0x031c
+#define V_SYNC_WIDTH GENMASK(31, 16)
+#define V_FRONT_PORCH GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG5 0x0320
+#define INIT_THRESHOLD_HI GENMASK(22, 21)
+#define AVERAGE_BYTES_PER_TU_FRAC GENMASK(19, 16)
+#define INIT_THRESHOLD GENMASK(13, 7)
+#define AVERAGE_BYTES_PER_TU GENMASK(6, 0)
+
+#define DW_DP_VIDEO_MSA1 0x0324
+#define VSTART GENMASK(31, 16)
+#define HSTART GENMASK(15, 0)
+
+#define DW_DP_VIDEO_MSA2 0x0328
+#define MISC0 GENMASK(31, 24)
+
+#define DW_DP_VIDEO_MSA3 0x032c
+#define MISC1 GENMASK(31, 24)
+
+#define DW_DP_VIDEO_HBLANK_INTERVAL 0x0330
+#define HBLANK_INTERVAL_EN BIT(16)
+#define HBLANK_INTERVAL GENMASK(15, 0)
+
+#define DW_DP_AUD_CONFIG1 0x0400
+#define AUDIO_TIMESTAMP_VERSION_NUM GENMASK(29, 24)
+#define AUDIO_PACKET_ID GENMASK(23, 16)
+#define AUDIO_MUTE BIT(15)
+#define NUM_CHANNELS GENMASK(14, 12)
+#define HBR_MODE_ENABLE BIT(10)
+#define AUDIO_DATA_WIDTH GENMASK(9, 5)
+#define AUDIO_DATA_IN_EN GENMASK(4, 1)
+#define AUDIO_INF_SELECT BIT(0)
+
+#define DW_DP_SDP_VERTICAL_CTRL 0x0500
+#define EN_VERTICAL_SDP BIT(2)
+#define EN_AUDIO_STREAM_SDP BIT(1)
+#define EN_AUDIO_TIMESTAMP_SDP BIT(0)
+#define DW_DP_SDP_HORIZONTAL_CTRL 0x0504
+#define EN_HORIZONTAL_SDP BIT(2)
+#define DW_DP_SDP_STATUS_REGISTER 0x0508
+#define DW_DP_SDP_MANUAL_CTRL 0x050c
+#define DW_DP_SDP_STATUS_EN 0x0510
+
+#define DW_DP_SDP_REGISTER_BANK 0x0600
+#define SDP_REGS GENMASK(31, 0)
+
+#define DW_DP_PHYIF_CTRL 0x0a00
+#define PHY_WIDTH BIT(25)
+#define PHY_POWERDOWN GENMASK(20, 17)
+#define PHY_BUSY GENMASK(15, 12)
+#define SSC_DIS BIT(16)
+#define XMIT_ENABLE GENMASK(11, 8)
+#define PHY_LANES GENMASK(7, 6)
+#define PHY_RATE GENMASK(5, 4)
+#define TPS_SEL GENMASK(3, 0)
+
+#define DW_DP_PHY_TX_EQ 0x0a04
+#define DW_DP_CUSTOMPAT0 0x0a08
+#define DW_DP_CUSTOMPAT1 0x0a0c
+#define DW_DP_CUSTOMPAT2 0x0a10
+#define DW_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET 0x0a14
+#define DW_DP_PHYIF_PWRDOWN_CTRL 0x0a18
+
+#define DW_DP_AUX_CMD 0x0b00
+#define AUX_CMD_TYPE GENMASK(31, 28)
+#define AUX_ADDR GENMASK(27, 8)
+#define I2C_ADDR_ONLY BIT(4)
+#define AUX_LEN_REQ GENMASK(3, 0)
+
+#define DW_DP_AUX_STATUS 0x0b04
+#define AUX_TIMEOUT BIT(17)
+#define AUX_BYTES_READ GENMASK(23, 19)
+#define AUX_STATUS GENMASK(7, 4)
+
+#define DW_DP_AUX_DATA0 0x0b08
+#define DW_DP_AUX_DATA1 0x0b0c
+#define DW_DP_AUX_DATA2 0x0b10
+#define DW_DP_AUX_DATA3 0x0b14
+
+#define DW_DP_GENERAL_INTERRUPT 0x0d00
+#define VIDEO_FIFO_OVERFLOW_STREAM0 BIT(6)
+#define AUDIO_FIFO_OVERFLOW_STREAM0 BIT(5)
+#define SDP_EVENT_STREAM0 BIT(4)
+#define AUX_CMD_INVALID BIT(3)
+#define HDCP_EVENT BIT(2)
+#define AUX_REPLY_EVENT BIT(1)
+#define HPD_EVENT BIT(0)
+
+#define DW_DP_GENERAL_INTERRUPT_ENABLE 0x0d04
+#define HDCP_EVENT_EN BIT(2)
+#define AUX_REPLY_EVENT_EN BIT(1)
+#define HPD_EVENT_EN BIT(0)
+
+#define DW_DP_HPD_STATUS 0x0d08
+#define HPD_STATE GENMASK(11, 9)
+#define HPD_STATUS BIT(8)
+#define HPD_HOT_UNPLUG BIT(2)
+#define HPD_HOT_PLUG BIT(1)
+#define HPD_IRQ BIT(0)
+
+#define DW_DP_HPD_INTERRUPT_ENABLE 0x0d0c
+#define HPD_UNPLUG_ERR_EN BIT(3)
+#define HPD_UNPLUG_EN BIT(2)
+#define HPD_PLUG_EN BIT(1)
+#define HPD_IRQ_EN BIT(0)
+
+#define DW_DP_HDCP_CFG 0x0e00
+#define DPCD12PLUS BIT(7)
+#define CP_IRQ BIT(6)
+#define BYPENCRYPTION BIT(5)
+#define HDCP_LOCK BIT(4)
+#define ENCRYPTIONDISABLE BIT(3)
+#define ENABLE_HDCP_13 BIT(2)
+#define ENABLE_HDCP BIT(1)
+
+#define DW_DP_HDCP_OBS 0x0e04
+#define HDCP22_RE_AUTHENTICATION_REQ BIT(31)
+#define HDCP22_AUTHENTICATION_FAILED BIT(30)
+#define HDCP22_AUTHENTICATION_SUCCESS BIT(29)
+#define HDCP22_CAPABLE_SINK BIT(28)
+#define HDCP22_SINK_CAP_CHECK_COMPLETE BIT(27)
+#define HDCP22_STATE GENMASK(26, 24)
+#define HDCP22_BOOTED BIT(23)
+#define HDCP13_BSTATUS GENMASK(22, 19)
+#define REPEATER BIT(18)
+#define HDCP_CAPABLE BIT(17)
+#define STATEE GENMASK(16, 14)
+#define STATEOEG GENMASK(13, 11)
+#define STATER GENMASK(10, 8)
+#define STATEA GENMASK(7, 4)
+#define SUBSTATEA GENMASK(3, 1)
+#define HDCPENGAGED BIT(0)
+
+#define DW_DP_HDCP_APIINTCLR 0x0e08
+#define DW_DP_HDCP_APIINTSTAT 0x0e0c
+#define DW_DP_HDCP_APIINTMSK 0x0e10
+#define HDCP22_GPIOINT BIT(8)
+#define HDCP_ENGAGED BIT(7)
+#define HDCP_FAILED BIT(6)
+#define KSVSHA1CALCDONEINT BIT(5)
+#define AUXRESPNACK7TIMES BIT(4)
+#define AUXRESPTIMEOUT BIT(3)
+#define AUXRESPDEFER7TIMES BIT(2)
+#define KSVACCESSINT BIT(0)
+
+#define DW_DP_HDCP_KSVMEMCTRL 0x0e18
+#define KSVSHA1STATUS BIT(4)
+#define KSVMEMACCESS BIT(1)
+#define KSVMEMREQUEST BIT(0)
+
+#define DW_DP_HDCP_REG_BKSV0 0x3600
+#define DW_DP_HDCP_REG_BKSV1 0x3604
+#define DW_DP_HDCP_REG_ANCONF 0x3608
+#define AN_BYPASS BIT(0)
+
+#define DW_DP_HDCP_REG_AN0 0x360c
+#define DW_DP_HDCP_REG_AN1 0x3610
+#define DW_DP_HDCP_REG_RMLCTL 0x3614
+#define ODPK_DECRYPT_ENABLE BIT(0)
+
+#define DW_DP_HDCP_REG_RMLSTS 0x3618
+#define IDPK_WR_OK_STS BIT(6)
+#define IDPK_DATA_INDEX GENMASK(5, 0)
+#define DW_DP_HDCP_REG_SEED 0x361c
+#define DW_DP_HDCP_REG_DPK0 0x3620
+#define DW_DP_HDCP_REG_DPK1 0x3624
+#define DW_DP_HDCP22_GPIOSTS 0x3628
+#define DW_DP_HDCP22_GPIOCHNGSTS 0x362c
+#define DW_DP_HDCP_REG_DPK_CRC 0x3630
+
+#define DW_DP_MAX_REGISTER DW_DP_HDCP_REG_DPK_CRC
+
+#define SDP_REG_BANK_SIZE 16
+
+struct dw_dp_link_caps {
+ bool enhanced_framing;
+ bool tps3_supported;
+ bool tps4_supported;
+ bool fast_training;
+ bool channel_coding;
+ bool ssc;
+};
+
+struct dw_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ bool voltage_max_reached[4];
+ bool pre_max_reached[4];
+};
+
+struct dw_dp_link_train {
+ struct dw_dp_link_train_set adjust;
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+struct dw_dp_link {
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int lanes;
+ u8 sink_count;
+ u8 vsc_sdp_supported;
+ struct dw_dp_link_caps caps;
+ struct dw_dp_link_train train;
+ struct drm_dp_desc desc;
+};
+
+struct dw_dp_bridge_state {
+ struct drm_bridge_state base;
+ struct drm_display_mode mode;
+ u8 video_mapping;
+ u8 color_format;
+ u8 bpc;
+ u8 bpp;
+};
+
+struct dw_dp_sdp {
+ struct dp_sdp base;
+ unsigned long flags;
+};
+
+struct dw_dp_hotplug {
+ bool long_hpd;
+};
+
+struct dw_dp {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct regmap *regmap;
+ struct phy *phy;
+ struct clk *apb_clk;
+ struct clk *aux_clk;
+ struct clk *i2s_clk;
+ struct clk *spdif_clk;
+ struct clk *hdcp_clk;
+ struct reset_control *rstc;
+ struct completion complete;
+ int irq;
+ struct work_struct hpd_work;
+ struct dw_dp_hotplug hotplug;
+ /* Serialize hpd status access */
+ struct mutex irq_lock;
+
+ struct drm_dp_aux aux;
+
+ struct dw_dp_link link;
+ struct dw_dp_plat_data plat_data;
+ u8 pixel_mode;
+
+ DECLARE_BITMAP(sdp_reg_bank, SDP_REG_BANK_SIZE);
+};
+
+enum {
+ DW_DP_RGB_6BIT,
+ DW_DP_RGB_8BIT,
+ DW_DP_RGB_10BIT,
+ DW_DP_RGB_12BIT,
+ DW_DP_RGB_16BIT,
+ DW_DP_YCBCR444_8BIT,
+ DW_DP_YCBCR444_10BIT,
+ DW_DP_YCBCR444_12BIT,
+ DW_DP_YCBCR444_16BIT,
+ DW_DP_YCBCR422_8BIT,
+ DW_DP_YCBCR422_10BIT,
+ DW_DP_YCBCR422_12BIT,
+ DW_DP_YCBCR422_16BIT,
+ DW_DP_YCBCR420_8BIT,
+ DW_DP_YCBCR420_10BIT,
+ DW_DP_YCBCR420_12BIT,
+ DW_DP_YCBCR420_16BIT,
+};
+
+enum {
+ DW_DP_MP_SINGLE_PIXEL,
+ DW_DP_MP_DUAL_PIXEL,
+ DW_DP_MP_QUAD_PIXEL,
+};
+
+enum {
+ DW_DP_SDP_VERTICAL_INTERVAL = BIT(0),
+ DW_DP_SDP_HORIZONTAL_INTERVAL = BIT(1),
+};
+
+enum {
+ DW_DP_HPD_STATE_IDLE,
+ DW_DP_HPD_STATE_UNPLUG,
+ DP_DP_HPD_STATE_TIMEOUT = 4,
+ DW_DP_HPD_STATE_PLUG = 7
+};
+
+enum {
+ DW_DP_PHY_PATTERN_NONE,
+ DW_DP_PHY_PATTERN_TPS_1,
+ DW_DP_PHY_PATTERN_TPS_2,
+ DW_DP_PHY_PATTERN_TPS_3,
+ DW_DP_PHY_PATTERN_TPS_4,
+ DW_DP_PHY_PATTERN_SERM,
+ DW_DP_PHY_PATTERN_PBRS7,
+ DW_DP_PHY_PATTERN_CUSTOM_80BIT,
+ DW_DP_PHY_PATTERN_CP2520_1,
+ DW_DP_PHY_PATTERN_CP2520_2,
+};
+
+struct dw_dp_output_format {
+ u32 bus_format;
+ u32 color_format;
+ u8 video_mapping;
+ u8 bpc;
+ u8 bpp;
+};
+
+#define to_dw_dp_bridge_state(s) container_of(s, struct dw_dp_bridge_state, base)
+
+static const struct dw_dp_output_format dw_dp_output_formats[] = {
+ { MEDIA_BUS_FMT_RGB101010_1X30, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_10BIT, 10, 30 },
+ { MEDIA_BUS_FMT_RGB888_1X24, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_8BIT, 8, 24 },
+ { MEDIA_BUS_FMT_YUV10_1X30, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_10BIT, 10, 30 },
+ { MEDIA_BUS_FMT_YUV8_1X24, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_8BIT, 8, 24},
+ { MEDIA_BUS_FMT_YUYV10_1X20, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_10BIT, 10, 20 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_8BIT, 8, 16 },
+ { MEDIA_BUS_FMT_UYYVYY10_0_5X30, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_10BIT, 10, 15 },
+ { MEDIA_BUS_FMT_UYYVYY8_0_5X24, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_8BIT, 8, 12 },
+ { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_6BIT, 6, 18 },
+};
+
+static const struct dw_dp_output_format *dw_dp_get_output_format(u32 bus_format)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++)
+ if (dw_dp_output_formats[i].bus_format == bus_format)
+ return &dw_dp_output_formats[i];
+
+ return NULL;
+}
+
+static inline struct dw_dp *bridge_to_dp(struct drm_bridge *b)
+{
+ return container_of(b, struct dw_dp, bridge);
+}
+
+static struct dw_dp_bridge_state *dw_dp_get_bridge_state(struct dw_dp *dp)
+{
+ struct dw_dp_bridge_state *dw_bridge_state;
+ struct drm_bridge_state *state;
+
+ state = drm_priv_to_bridge_state(dp->bridge.base.state);
+ if (!state)
+ return NULL;
+
+ dw_bridge_state = to_dw_dp_bridge_state(state);
+ if (!dw_bridge_state)
+ return NULL;
+
+ return dw_bridge_state;
+}
+
+static inline void dw_dp_phy_set_pattern(struct dw_dp *dp, u32 pattern)
+{
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, TPS_SEL,
+ FIELD_PREP(TPS_SEL, pattern));
+}
+
+static void dw_dp_phy_xmit_enable(struct dw_dp *dp, u32 lanes)
+{
+ u32 xmit_enable;
+
+ switch (lanes) {
+ case 4:
+ case 2:
+ case 1:
+ xmit_enable = GENMASK(lanes - 1, 0);
+ break;
+ case 0:
+ default:
+ xmit_enable = 0;
+ break;
+ }
+
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, XMIT_ENABLE,
+ FIELD_PREP(XMIT_ENABLE, xmit_enable));
+}
+
+static bool dw_dp_bandwidth_ok(struct dw_dp *dp,
+ const struct drm_display_mode *mode, u32 bpp,
+ unsigned int lanes, unsigned int rate)
+{
+ u32 max_bw, req_bw;
+
+ req_bw = mode->clock * bpp / 8;
+ max_bw = lanes * rate;
+ if (req_bw > max_bw)
+ return false;
+
+ return true;
+}
+
+static bool dw_dp_hpd_detect(struct dw_dp *dp)
+{
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value);
+
+ return FIELD_GET(HPD_STATE, value) == DW_DP_HPD_STATE_PLUG;
+}
+
+static void dw_dp_link_caps_reset(struct dw_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->tps4_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+}
+
+static void dw_dp_link_reset(struct dw_dp_link *link)
+{
+ link->vsc_sdp_supported = 0;
+ link->sink_count = 0;
+ link->revision = 0;
+ link->rate = 0;
+ link->lanes = 0;
+
+ dw_dp_link_caps_reset(&link->caps);
+ memset(link->dpcd, 0, sizeof(link->dpcd));
+}
+
+static int dw_dp_link_parse(struct dw_dp *dp, struct drm_connector *connector)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+ dw_dp_link_reset(link);
+
+ ret = drm_dp_read_dpcd_caps(&dp->aux, link->dpcd);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_read_desc(&dp->aux, &link->desc, drm_dp_is_branch(link->dpcd));
+
+ if (drm_dp_read_sink_count_cap(connector, link->dpcd, &link->desc)) {
+ ret = drm_dp_read_sink_count(&dp->aux);
+ if (ret < 0)
+ return ret;
+
+ link->sink_count = ret;
+
+ /* Dongle connected, but no display */
+ if (!link->sink_count)
+ return -ENODEV;
+ }
+
+ link->vsc_sdp_supported = drm_dp_vsc_sdp_supported(&dp->aux, link->dpcd);
+
+ link->revision = link->dpcd[DP_DPCD_REV];
+ link->rate = min_t(u32, min(dp->plat_data.max_link_rate,
+ dp->phy->attrs.max_link_rate * 100),
+ drm_dp_max_link_rate(link->dpcd));
+ link->lanes = min_t(u8, phy_get_bus_width(dp->phy),
+ drm_dp_max_lane_count(link->dpcd));
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(link->dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(link->dpcd);
+ link->caps.tps4_supported = drm_dp_tps4_supported(link->dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(link->dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(link->dpcd);
+ link->caps.ssc = !!(link->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
+
+ return 0;
+}
+
+static int dw_dp_link_train_update_vs_emph(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_link_train_set *train_set = &link->train.adjust;
+ unsigned int lanes = dp->link.lanes;
+ union phy_configure_opts phy_cfg;
+ unsigned int *vs, *pe;
+ int i, ret;
+ u8 buf[4];
+
+ vs = train_set->voltage_swing;
+ pe = train_set->pre_emphasis;
+
+ for (i = 0; i < lanes; i++) {
+ phy_cfg.dp.voltage[i] = vs[i];
+ phy_cfg.dp.pre[i] = pe[i];
+ }
+
+ phy_cfg.dp.set_lanes = false;
+ phy_cfg.dp.set_rate = false;
+ phy_cfg.dp.set_voltages = true;
+
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lanes; i++) {
+ buf[i] = (vs[i] << DP_TRAIN_VOLTAGE_SWING_SHIFT) |
+ (pe[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ if (train_set->voltage_max_reached[i])
+ buf[i] |= DP_TRAIN_MAX_SWING_REACHED;
+ if (train_set->pre_max_reached[i])
+ buf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf, lanes);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dw_dp_phy_configure(struct dw_dp *dp, unsigned int rate,
+ unsigned int lanes, bool ssc)
+{
+ union phy_configure_opts phy_cfg;
+ int ret;
+
+ /* Move PHY to P3 */
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN,
+ FIELD_PREP(PHY_POWERDOWN, 0x3));
+
+ phy_cfg.dp.lanes = lanes;
+ phy_cfg.dp.link_rate = rate / 100;
+ phy_cfg.dp.ssc = ssc;
+ phy_cfg.dp.set_lanes = true;
+ phy_cfg.dp.set_rate = true;
+ phy_cfg.dp.set_voltages = false;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_LANES,
+ FIELD_PREP(PHY_LANES, lanes / 2));
+
+ /* Move PHY to P0 */
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN,
+ FIELD_PREP(PHY_POWERDOWN, 0x0));
+
+ dw_dp_phy_xmit_enable(dp, lanes);
+
+ return 0;
+}
+
+static int dw_dp_link_configure(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 buf[2];
+ int ret;
+
+ ret = dw_dp_phy_configure(dp, link->rate, link->lanes, link->caps.ssc);
+ if (ret)
+ return ret;
+
+ buf[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ buf[1] = link->lanes;
+
+ if (link->caps.enhanced_framing) {
+ buf[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN,
+ FIELD_PREP(ENHANCE_FRAMING_EN, 1));
+ } else {
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN,
+ FIELD_PREP(ENHANCE_FRAMING_EN, 0));
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ buf[0] = link->caps.ssc ? DP_SPREAD_AMP_0_5 : 0;
+ buf[1] = link->caps.channel_coding ? DP_SET_ANSI_8B10B : 0;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void dw_dp_link_train_init(struct dw_dp_link_train *train)
+{
+ struct dw_dp_link_train_set *adj = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ adj->voltage_swing[i] = 0;
+ adj->pre_emphasis[i] = 0;
+ adj->voltage_max_reached[i] = false;
+ adj->pre_max_reached[i] = false;
+ }
+
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool dw_dp_link_train_valid(const struct dw_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int dw_dp_link_train_set_pattern(struct dw_dp *dp, u32 pattern)
+{
+ u8 buf = 0;
+ int ret;
+
+ if (pattern && pattern != DP_TRAINING_PATTERN_4) {
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS,
+ FIELD_PREP(SCRAMBLE_DIS, 1));
+ } else {
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS,
+ FIELD_PREP(SCRAMBLE_DIS, 0));
+ }
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_NONE);
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_1);
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_2);
+ break;
+ case DP_TRAINING_PATTERN_3:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_3);
+ break;
+ case DP_TRAINING_PATTERN_4:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ buf | pattern);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u8 dw_dp_voltage_max(u8 preemph)
+{
+ switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ default:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ }
+}
+
+static bool dw_dp_link_get_adjustments(struct dw_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct dw_dp_link_train_set *adj = &link->train.adjust;
+ unsigned int i;
+ bool changed = false;
+ u8 v = 0;
+ u8 p = 0;
+
+ for (i = 0; i < link->lanes; i++) {
+ v = drm_dp_get_adjust_request_voltage(status, i);
+ v >>= DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p = drm_dp_get_adjust_request_pre_emphasis(status, i);
+ p >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (v != adj->voltage_swing[i] || p != adj->pre_emphasis[i])
+ changed = true;
+
+ if (p >= (DP_TRAIN_PRE_EMPH_LEVEL_3 >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) {
+ adj->pre_emphasis[i] = DP_TRAIN_PRE_EMPH_LEVEL_3 >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ adj->pre_max_reached[i] = true;
+ } else {
+ adj->pre_emphasis[i] = p;
+ adj->pre_max_reached[i] = false;
+ }
+
+ v = min(v, dw_dp_voltage_max(p));
+ if (v >= (DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >> DP_TRAIN_VOLTAGE_SWING_SHIFT)) {
+ adj->voltage_swing[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ adj->voltage_max_reached[i] = true;
+ } else {
+ adj->voltage_swing[i] = v;
+ adj->voltage_max_reached[i] = false;
+ }
+ }
+
+ return changed;
+}
+
+static int dw_dp_link_clock_recovery(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 status[DP_LINK_STATUS_SIZE];
+ unsigned int tries = 0;
+ int ret;
+ bool adj_changed;
+
+ ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
+
+ for (;;) {
+ ret = dw_dp_link_train_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(&dp->aux, link->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to read link status: %d\n", ret);
+ return ret;
+ }
+
+ if (drm_dp_clock_recovery_ok(status, link->lanes)) {
+ link->train.clock_recovered = true;
+ break;
+ }
+
+ /*
+ * According to DP spec 1.4, if current ADJ is the same
+ * with previous REQ, we need to retry 5 times.
+ */
+ adj_changed = dw_dp_link_get_adjustments(link, status);
+ if (!adj_changed)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == 5)
+ break;
+ }
+
+ return 0;
+}
+
+static int dw_dp_link_channel_equalization(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 status[DP_LINK_STATUS_SIZE], pattern;
+ unsigned int tries;
+ int ret;
+
+ if (link->caps.tps4_supported)
+ pattern = DP_TRAINING_PATTERN_4;
+ else if (link->caps.tps3_supported)
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+ ret = dw_dp_link_train_set_pattern(dp, pattern);
+ if (ret)
+ return ret;
+
+ for (tries = 1; tries < 5; tries++) {
+ ret = dw_dp_link_train_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(&dp->aux, link->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0)
+ return ret;
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ dev_err(dp->dev, "clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(status, link->lanes)) {
+ link->train.channel_equalized = true;
+ break;
+ }
+
+ dw_dp_link_get_adjustments(link, status);
+ }
+
+ return 0;
+}
+
+static int dw_dp_link_downgrade(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+
+ state = dw_dp_get_bridge_state(dp);
+
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+ case 270000:
+ link->rate = 162000;
+ break;
+ case 540000:
+ link->rate = 270000;
+ break;
+ case 810000:
+ link->rate = 540000;
+ break;
+ }
+
+ if (!dw_dp_bandwidth_ok(dp, &state->mode, state->bpp, link->lanes,
+ link->rate))
+ return -E2BIG;
+
+ return 0;
+}
+
+static int dw_dp_link_train_full(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+retry:
+ dw_dp_link_train_init(&link->train);
+
+ dev_dbg(dp->dev, "full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100);
+
+ ret = dw_dp_link_configure(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to configure DP link: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_dp_link_clock_recovery(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "clock recovery failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ dev_err(dp->dev, "clock recovery failed, downgrading link\n");
+
+ ret = dw_dp_link_downgrade(dp);
+ if (ret < 0)
+ goto out;
+ else
+ goto retry;
+ }
+
+ dev_dbg(dp->dev, "clock recovery succeeded\n");
+
+ ret = dw_dp_link_channel_equalization(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "channel equalization failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ dev_err(dp->dev, "channel equalization failed, downgrading link\n");
+
+ ret = dw_dp_link_downgrade(dp);
+ if (ret < 0)
+ goto out;
+ else
+ goto retry;
+ }
+
+ dev_dbg(dp->dev, "channel equalization succeeded\n");
+
+out:
+ dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+ return ret;
+}
+
+static int dw_dp_link_train_fast(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+ u8 status[DP_LINK_STATUS_SIZE];
+ u8 pattern;
+
+ dw_dp_link_train_init(&link->train);
+
+ dev_dbg(dp->dev, "fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100);
+
+ ret = dw_dp_link_configure(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to configure DP link: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ if (link->caps.tps4_supported)
+ pattern = DP_TRAINING_PATTERN_4;
+ else if (link->caps.tps3_supported)
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+ ret = dw_dp_link_train_set_pattern(dp, pattern);
+ if (ret)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to read link status: %d\n", ret);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ dev_err(dp->dev, "clock recovery failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ dev_err(dp->dev, "channel equalization failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+ return ret;
+}
+
+static int dw_dp_link_train(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+ if (link->caps.fast_training) {
+ if (dw_dp_link_train_valid(&link->train)) {
+ ret = dw_dp_link_train_fast(dp);
+ if (ret < 0)
+ dev_err(dp->dev, "fast link training failed: %d\n", ret);
+ else
+ return 0;
+ }
+ }
+
+ ret = dw_dp_link_train_full(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "full link training failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_dp_send_sdp(struct dw_dp *dp, struct dw_dp_sdp *sdp)
+{
+ const u8 *payload = sdp->base.db;
+ u32 reg;
+ int i, nr;
+
+ nr = find_first_zero_bit(dp->sdp_reg_bank, SDP_REG_BANK_SIZE);
+ if (nr < SDP_REG_BANK_SIZE)
+ set_bit(nr, dp->sdp_reg_bank);
+ else
+ return -EBUSY;
+
+ reg = DW_DP_SDP_REGISTER_BANK + nr * 9 * 4;
+
+ /* SDP header */
+ regmap_write(dp->regmap, reg, get_unaligned_le32(&sdp->base.sdp_header));
+
+ /* SDP data payload */
+ for (i = 1; i < 9; i++, payload += 4)
+ regmap_write(dp->regmap, reg + i * 4,
+ FIELD_PREP(SDP_REGS, get_unaligned_le32(payload)));
+
+ if (sdp->flags & DW_DP_SDP_VERTICAL_INTERVAL)
+ regmap_update_bits(dp->regmap, DW_DP_SDP_VERTICAL_CTRL,
+ EN_VERTICAL_SDP << nr,
+ EN_VERTICAL_SDP << nr);
+
+ if (sdp->flags & DW_DP_SDP_HORIZONTAL_INTERVAL)
+ regmap_update_bits(dp->regmap, DW_DP_SDP_HORIZONTAL_CTRL,
+ EN_HORIZONTAL_SDP << nr,
+ EN_HORIZONTAL_SDP << nr);
+
+ return 0;
+}
+
+static int dw_dp_send_vsc_sdp(struct dw_dp *dp)
+{
+ struct dw_dp_bridge_state *state;
+ struct dw_dp_sdp sdp = {};
+ struct drm_dp_vsc_sdp vsc = {};
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ vsc.bpc = state->bpc;
+
+ vsc.sdp_type = DP_SDP_VSC;
+ vsc.revision = 0x5;
+ vsc.length = 0x13;
+ vsc.content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+
+ sdp.flags = DW_DP_SDP_VERTICAL_INTERVAL;
+
+ switch (state->color_format) {
+ case DRM_COLOR_FORMAT_YCBCR444:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV444;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR420:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV420;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR422:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV422;
+ break;
+ case DRM_COLOR_FORMAT_RGB444:
+ default:
+ vsc.pixelformat = DP_PIXELFORMAT_RGB;
+ break;
+ }
+
+ if (state->color_format == DRM_COLOR_FORMAT_RGB444) {
+ vsc.colorimetry = DP_COLORIMETRY_DEFAULT;
+ vsc.dynamic_range = DP_DYNAMIC_RANGE_VESA;
+ } else {
+ vsc.colorimetry = DP_COLORIMETRY_BT709_YCC;
+ vsc.dynamic_range = DP_DYNAMIC_RANGE_CTA;
+ }
+
+ drm_dp_vsc_sdp_pack(&vsc, &sdp.base);
+
+ return dw_dp_send_sdp(dp, &sdp);
+}
+
+static int dw_dp_video_set_pixel_mode(struct dw_dp *dp)
+{
+ switch (dp->pixel_mode) {
+ case DW_DP_MP_SINGLE_PIXEL:
+ case DW_DP_MP_DUAL_PIXEL:
+ case DW_DP_MP_QUAD_PIXEL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, PIXEL_MODE_SELECT,
+ FIELD_PREP(PIXEL_MODE_SELECT, dp->pixel_mode));
+
+ return 0;
+}
+
+static bool dw_dp_video_need_vsc_sdp(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ if (!link->vsc_sdp_supported)
+ return false;
+
+ if (state->color_format == DRM_COLOR_FORMAT_YCBCR420)
+ return true;
+
+ return false;
+}
+
+static int dw_dp_video_set_msa(struct dw_dp *dp, u8 color_format, u8 bpc,
+ u16 vstart, u16 hstart)
+{
+ u16 misc = 0;
+
+ if (dw_dp_video_need_vsc_sdp(dp))
+ misc |= DP_MSA_MISC_COLOR_VSC_SDP;
+
+ switch (color_format) {
+ case DRM_COLOR_FORMAT_RGB444:
+ misc |= DP_MSA_MISC_COLOR_RGB;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR444:
+ misc |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR422:
+ misc |= DP_MSA_MISC_COLOR_YCBCR_422_BT709;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR420:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (bpc) {
+ case 6:
+ misc |= DP_MSA_MISC_6_BPC;
+ break;
+ case 8:
+ misc |= DP_MSA_MISC_8_BPC;
+ break;
+ case 10:
+ misc |= DP_MSA_MISC_10_BPC;
+ break;
+ case 12:
+ misc |= DP_MSA_MISC_12_BPC;
+ break;
+ case 16:
+ misc |= DP_MSA_MISC_16_BPC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA1,
+ FIELD_PREP(VSTART, vstart) | FIELD_PREP(HSTART, hstart));
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA2, FIELD_PREP(MISC0, misc));
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA3, FIELD_PREP(MISC1, misc >> 8));
+
+ return 0;
+}
+
+static void dw_dp_video_disable(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE,
+ FIELD_PREP(VIDEO_STREAM_ENABLE, 0));
+}
+
+static int dw_dp_video_enable(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+ struct drm_display_mode *mode;
+ u8 color_format, bpc, bpp;
+ u8 init_threshold, vic;
+ u32 hstart, hactive, hblank, h_sync_width, h_front_porch;
+ u32 vstart, vactive, vblank, v_sync_width, v_front_porch;
+ u32 peak_stream_bandwidth, link_bandwidth;
+ u32 average_bytes_per_tu, average_bytes_per_tu_frac;
+ u32 ts, hblank_interval;
+ u32 value;
+ int ret;
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ bpc = state->bpc;
+ bpp = state->bpp;
+ color_format = state->color_format;
+ mode = &state->mode;
+
+ vstart = mode->vtotal - mode->vsync_start;
+ hstart = mode->htotal - mode->hsync_start;
+
+ ret = dw_dp_video_set_pixel_mode(dp);
+ if (ret)
+ return ret;
+
+ ret = dw_dp_video_set_msa(dp, color_format, bpc, vstart, hstart);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_MAPPING,
+ FIELD_PREP(VIDEO_MAPPING, state->video_mapping));
+
+ /* Configure DW_DP_VINPUT_POLARITY_CTRL register */
+ value = 0;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value |= FIELD_PREP(HSYNC_IN_POLARITY, 1);
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value |= FIELD_PREP(VSYNC_IN_POLARITY, 1);
+ regmap_write(dp->regmap, DW_DP_VINPUT_POLARITY_CTRL, value);
+
+ /* Configure DW_DP_VIDEO_CONFIG1 register */
+ hactive = mode->hdisplay;
+ hblank = mode->htotal - mode->hdisplay;
+ value = FIELD_PREP(HACTIVE, hactive) | FIELD_PREP(HBLANK, hblank);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ value |= FIELD_PREP(I_P, 1);
+ vic = drm_match_cea_mode(mode);
+ if (vic == 5 || vic == 6 || vic == 7 ||
+ vic == 10 || vic == 11 || vic == 20 ||
+ vic == 21 || vic == 22 || vic == 39 ||
+ vic == 25 || vic == 26 || vic == 40 ||
+ vic == 44 || vic == 45 || vic == 46 ||
+ vic == 50 || vic == 51 || vic == 54 ||
+ vic == 55 || vic == 58 || vic == 59)
+ value |= R_V_BLANK_IN_OSC;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG1, value);
+
+ /* Configure DW_DP_VIDEO_CONFIG2 register */
+ vblank = mode->vtotal - mode->vdisplay;
+ vactive = mode->vdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG2,
+ FIELD_PREP(VBLANK, vblank) | FIELD_PREP(VACTIVE, vactive));
+
+ /* Configure DW_DP_VIDEO_CONFIG3 register */
+ h_sync_width = mode->hsync_end - mode->hsync_start;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG3,
+ FIELD_PREP(H_SYNC_WIDTH, h_sync_width) |
+ FIELD_PREP(H_FRONT_PORCH, h_front_porch));
+
+ /* Configure DW_DP_VIDEO_CONFIG4 register */
+ v_sync_width = mode->vsync_end - mode->vsync_start;
+ v_front_porch = mode->vsync_start - mode->vdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG4,
+ FIELD_PREP(V_SYNC_WIDTH, v_sync_width) |
+ FIELD_PREP(V_FRONT_PORCH, v_front_porch));
+
+ /* Configure DW_DP_VIDEO_CONFIG5 register */
+ peak_stream_bandwidth = mode->clock * bpp / 8;
+ link_bandwidth = (link->rate / 1000) * link->lanes;
+ ts = peak_stream_bandwidth * 64 / link_bandwidth;
+ average_bytes_per_tu = ts / 1000;
+ average_bytes_per_tu_frac = ts / 100 - average_bytes_per_tu * 10;
+ if (dp->pixel_mode == DW_DP_MP_SINGLE_PIXEL) {
+ if (average_bytes_per_tu < 6)
+ init_threshold = 32;
+ else if (hblank <= 80 && color_format != DRM_COLOR_FORMAT_YCBCR420)
+ init_threshold = 12;
+ else if (hblank <= 40 && color_format == DRM_COLOR_FORMAT_YCBCR420)
+ init_threshold = 3;
+ else
+ init_threshold = 16;
+ } else {
+ u32 t1 = 0, t2 = 0, t3 = 0;
+
+ switch (bpc) {
+ case 6:
+ t1 = (4 * 1000 / 9) * link->lanes;
+ break;
+ case 8:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422) {
+ t1 = (1000 / 2) * link->lanes;
+ } else {
+ if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 3) * link->lanes;
+ else
+ t1 = (3000 / 16) * link->lanes;
+ }
+ break;
+ case 10:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422)
+ t1 = (2000 / 5) * link->lanes;
+ else
+ t1 = (4000 / 15) * link->lanes;
+ break;
+ case 12:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422) {
+ if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 6) * link->lanes;
+ else
+ t1 = (1000 / 3) * link->lanes;
+ } else {
+ t1 = (2000 / 9) * link->lanes;
+ }
+ break;
+ case 16:
+ if (color_format != DRM_COLOR_FORMAT_YCBCR422 &&
+ dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 6) * link->lanes;
+ else
+ t1 = (1000 / 4) * link->lanes;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (color_format == DRM_COLOR_FORMAT_YCBCR420)
+ t2 = (link->rate / 4) * 1000 / (mode->clock / 2);
+ else
+ t2 = (link->rate / 4) * 1000 / mode->clock;
+
+ if (average_bytes_per_tu_frac)
+ t3 = average_bytes_per_tu + 1;
+ else
+ t3 = average_bytes_per_tu;
+ init_threshold = t1 * t2 * t3 / (1000 * 1000);
+ if (init_threshold <= 16 || average_bytes_per_tu < 10)
+ init_threshold = 40;
+ }
+
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG5,
+ FIELD_PREP(INIT_THRESHOLD_HI, init_threshold >> 6) |
+ FIELD_PREP(AVERAGE_BYTES_PER_TU_FRAC, average_bytes_per_tu_frac) |
+ FIELD_PREP(INIT_THRESHOLD, init_threshold) |
+ FIELD_PREP(AVERAGE_BYTES_PER_TU, average_bytes_per_tu));
+
+ /* Configure DW_DP_VIDEO_HBLANK_INTERVAL register */
+ hblank_interval = hblank * (link->rate / 4) / mode->clock;
+ regmap_write(dp->regmap, DW_DP_VIDEO_HBLANK_INTERVAL,
+ FIELD_PREP(HBLANK_INTERVAL_EN, 1) |
+ FIELD_PREP(HBLANK_INTERVAL, hblank_interval));
+
+ /* Video stream enable */
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE,
+ FIELD_PREP(VIDEO_STREAM_ENABLE, 1));
+
+ if (dw_dp_video_need_vsc_sdp(dp))
+ dw_dp_send_vsc_sdp(dp);
+
+ return 0;
+}
+
+static void dw_dp_hpd_init(struct dw_dp *dp)
+{
+ /* Enable all HPD interrupts */
+ regmap_update_bits(dp->regmap, DW_DP_HPD_INTERRUPT_ENABLE,
+ HPD_UNPLUG_EN | HPD_PLUG_EN | HPD_IRQ_EN,
+ FIELD_PREP(HPD_UNPLUG_EN, 1) |
+ FIELD_PREP(HPD_PLUG_EN, 1) |
+ FIELD_PREP(HPD_IRQ_EN, 1));
+
+ /* Enable all top-level interrupts */
+ regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE,
+ HPD_EVENT_EN, FIELD_PREP(HPD_EVENT_EN, 1));
+}
+
+static void dw_dp_aux_init(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE,
+ AUX_REPLY_EVENT_EN, FIELD_PREP(AUX_REPLY_EVENT_EN, 1));
+}
+
+static void dw_dp_init_hw(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, DEFAULT_FAST_LINK_TRAIN_EN,
+ FIELD_PREP(DEFAULT_FAST_LINK_TRAIN_EN, 0));
+
+ dw_dp_hpd_init(dp);
+ dw_dp_aux_init(dp);
+}
+
+static int dw_dp_aux_write_data(struct dw_dp *dp, const u8 *buffer, size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value = 0;
+
+ for (j = 0; j < num; j++)
+ value |= buffer[i * 4 + j] << (j * 8);
+
+ regmap_write(dp->regmap, DW_DP_AUX_DATA0 + i * 4, value);
+ }
+
+ return size;
+}
+
+static int dw_dp_aux_read_data(struct dw_dp *dp, u8 *buffer, size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_AUX_DATA0 + i * 4, &value);
+
+ for (j = 0; j < num; j++)
+ buffer[i * 4 + j] = value >> (j * 8);
+ }
+
+ return size;
+}
+
+static ssize_t dw_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct dw_dp *dp = container_of(aux, struct dw_dp, aux);
+ unsigned long timeout = msecs_to_jiffies(10);
+ u32 status, value;
+ ssize_t ret = 0;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ ret = dw_dp_aux_write_data(dp, msg->buffer, msg->size);
+ if (ret < 0)
+ return ret;
+ break;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (msg->size > 0)
+ value = FIELD_PREP(AUX_LEN_REQ, msg->size - 1);
+ else
+ value = FIELD_PREP(I2C_ADDR_ONLY, 1);
+ value |= FIELD_PREP(AUX_CMD_TYPE, msg->request);
+ value |= FIELD_PREP(AUX_ADDR, msg->address);
+ regmap_write(dp->regmap, DW_DP_AUX_CMD, value);
+
+ status = wait_for_completion_timeout(&dp->complete, timeout);
+ if (!status) {
+ dev_err(dp->dev, "timeout waiting for AUX reply\n");
+ return -ETIMEDOUT;
+ }
+
+ regmap_read(dp->regmap, DW_DP_AUX_STATUS, &value);
+ if (value & AUX_TIMEOUT)
+ return -ETIMEDOUT;
+
+ msg->reply = FIELD_GET(AUX_STATUS, value);
+
+ if (msg->size > 0 && msg->reply == DP_AUX_NATIVE_REPLY_ACK) {
+ if (msg->request & DP_AUX_I2C_READ) {
+ size_t count = FIELD_GET(AUX_BYTES_READ, value) - 1;
+
+ if (count != msg->size)
+ return -EBUSY;
+
+ ret = dw_dp_aux_read_data(dp, msg->buffer, count);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Limits for the video timing for DP:
+ * 1. the hfp should be 2 pixels aligned;
+ * 2. the minimum hsync should be 9 pixel;
+ * 3. the minimum hbp should be 16 pixel;
+ */
+static int dw_dp_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_bridge_state *state;
+ const struct dw_dp_output_format *fmt;
+ struct drm_display_mode *mode;
+ int min_hbp = 16;
+ int min_hsync = 9;
+
+ state = to_dw_dp_bridge_state(bridge_state);
+ mode = &state->mode;
+
+ fmt = dw_dp_get_output_format(bridge_state->output_bus_cfg.format);
+ if (!fmt)
+ return -EINVAL;
+
+ state->video_mapping = fmt->video_mapping;
+ state->color_format = fmt->color_format;
+ state->bpc = fmt->bpc;
+ state->bpp = fmt->bpp;
+
+ if ((adjusted_mode->hsync_start - adjusted_mode->hdisplay) & 0x1) {
+ adjusted_mode->hsync_start += 1;
+ dev_warn(dp->dev, "hfp is not 2 pixeel aligned, fixup to aligned hfp\n");
+ }
+
+ if (adjusted_mode->hsync_end - adjusted_mode->hsync_start < min_hsync) {
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + min_hsync;
+ dev_warn(dp->dev, "hsync is too narrow, fixup to min hsync:%d\n", min_hsync);
+ }
+
+ if (adjusted_mode->htotal - adjusted_mode->hsync_end < min_hbp) {
+ adjusted_mode->htotal = adjusted_mode->hsync_end + min_hbp;
+ dev_warn(dp->dev, "hbp is too narrow, fixup to min hbp:%d\n", min_hbp);
+ }
+
+ drm_mode_copy(mode, adjusted_mode);
+
+ return 0;
+}
+
+static enum drm_mode_status dw_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_link *link = &dp->link;
+ u32 min_bpp;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR420 &&
+ link->vsc_sdp_supported &&
+ (drm_mode_is_420_only(info, mode) || drm_mode_is_420_also(info, mode)))
+ min_bpp = 12;
+ else if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ min_bpp = 16;
+ else if (info->color_formats & DRM_COLOR_FORMAT_RGB444)
+ min_bpp = 18;
+ else
+ min_bpp = 24;
+
+ if (!link->vsc_sdp_supported &&
+ drm_mode_is_420_only(info, mode))
+ return MODE_NO_420;
+
+ if (!dw_dp_bandwidth_ok(dp, mode, min_bpp, link->lanes, link->rate))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static bool dw_dp_needs_link_retrain(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!dw_dp_link_train_valid(&link->train))
+ return false;
+
+ if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) < 0)
+ return false;
+
+ /* Retrain if Channel EQ or CR not ok */
+ return !drm_dp_channel_eq_ok(link_status, dp->link.lanes);
+}
+
+static void dw_dp_link_disable(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+
+ if (dw_dp_hpd_detect(dp))
+ drm_dp_link_power_down(&dp->aux, dp->link.revision);
+
+ dw_dp_phy_xmit_enable(dp, 0);
+
+ phy_power_off(dp->phy);
+
+ link->train.clock_recovered = false;
+ link->train.channel_equalized = false;
+}
+
+static int dw_dp_link_enable(struct dw_dp *dp)
+{
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_link_power_up(&dp->aux, dp->link.revision);
+ if (ret < 0)
+ return ret;
+
+ ret = dw_dp_link_train(dp);
+
+ return ret;
+}
+
+static void dw_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ int ret;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (!connector) {
+ dev_err(dp->dev, "failed to get connector\n");
+ return;
+ }
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state) {
+ dev_err(dp->dev, "failed to get connector state\n");
+ return;
+ }
+
+ set_bit(0, dp->sdp_reg_bank);
+
+ ret = dw_dp_link_enable(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable link: %d\n", ret);
+ return;
+ }
+
+ ret = dw_dp_video_enable(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable video: %d\n", ret);
+ return;
+ }
+}
+
+static void dw_dp_reset(struct dw_dp *dp)
+{
+ int val;
+
+ disable_irq(dp->irq);
+ regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET,
+ FIELD_PREP(CONTROLLER_RESET, 1));
+ usleep_range(10, 20);
+ regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET,
+ FIELD_PREP(CONTROLLER_RESET, 0));
+
+ dw_dp_init_hw(dp);
+ regmap_read_poll_timeout(dp->regmap, DW_DP_HPD_STATUS, val,
+ FIELD_GET(HPD_HOT_PLUG, val), 200, 200000);
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG);
+ enable_irq(dp->irq);
+}
+
+static void dw_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+
+ dw_dp_video_disable(dp);
+ dw_dp_link_disable(dp);
+ bitmap_zero(dp->sdp_reg_bank, SDP_REG_BANK_SIZE);
+ dw_dp_reset(dp);
+}
+
+static bool dw_dp_hpd_detect_link(struct dw_dp *dp, struct drm_connector *connector)
+{
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret < 0)
+ return false;
+ ret = dw_dp_link_parse(dp, connector);
+ phy_power_off(dp->phy);
+
+ return !ret;
+}
+
+static enum drm_connector_status dw_dp_bridge_detect(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+
+ if (!dw_dp_hpd_detect(dp))
+ return connector_status_disconnected;
+
+ if (!dw_dp_hpd_detect_link(dp, connector))
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static const struct drm_edid *dw_dp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ const struct drm_edid *edid;
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret)
+ return NULL;
+
+ edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
+
+ phy_power_off(dp->phy);
+
+ return edid;
+}
+
+static u32 *dw_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_link *link = &dp->link;
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_display_mode mode = crtc_state->mode;
+ const struct dw_dp_output_format *fmt;
+ u32 i, j = 0;
+ u32 *output_fmts;
+
+ *num_output_fmts = 0;
+
+ output_fmts = kcalloc(ARRAY_SIZE(dw_dp_output_formats), sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++) {
+ fmt = &dw_dp_output_formats[i];
+
+ if (fmt->bpc > conn_state->max_bpc)
+ continue;
+
+ if (!(fmt->color_format & di->color_formats))
+ continue;
+
+ if (fmt->color_format == DRM_COLOR_FORMAT_YCBCR420 &&
+ !link->vsc_sdp_supported)
+ continue;
+
+ if (fmt->color_format != DRM_COLOR_FORMAT_YCBCR420 &&
+ drm_mode_is_420_only(di, &mode))
+ continue;
+
+ if (!dw_dp_bandwidth_ok(dp, &mode, fmt->bpp, link->lanes, link->rate))
+ continue;
+
+ output_fmts[j++] = fmt->bus_format;
+ }
+
+ *num_output_fmts = j;
+
+ return output_fmts;
+}
+
+static struct drm_bridge_state *dw_dp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct dw_dp_bridge_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
+
+ return &state->base;
+}
+
+static const struct drm_bridge_funcs dw_dp_bridge_funcs = {
+ .atomic_duplicate_state = dw_dp_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
+ .atomic_get_output_bus_fmts = dw_dp_bridge_atomic_get_output_bus_fmts,
+ .atomic_check = dw_dp_bridge_atomic_check,
+ .mode_valid = dw_dp_bridge_mode_valid,
+ .atomic_enable = dw_dp_bridge_atomic_enable,
+ .atomic_disable = dw_dp_bridge_atomic_disable,
+ .detect = dw_dp_bridge_detect,
+ .edid_read = dw_dp_bridge_edid_read,
+};
+
+static int dw_dp_link_retrain(struct dw_dp *dp)
+{
+ struct drm_device *dev = dp->bridge.dev;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ if (!dw_dp_needs_link_retrain(dp))
+ return 0;
+
+ dev_dbg(dp->dev, "Retraining link\n");
+
+ drm_modeset_acquire_init(&ctx, 0);
+ for (;;) {
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ if (!ret)
+ ret = dw_dp_link_train(dp);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static void dw_dp_hpd_work(struct work_struct *work)
+{
+ struct dw_dp *dp = container_of(work, struct dw_dp, hpd_work);
+ bool long_hpd;
+ int ret;
+
+ mutex_lock(&dp->irq_lock);
+ long_hpd = dp->hotplug.long_hpd;
+ mutex_unlock(&dp->irq_lock);
+
+ dev_dbg(dp->dev, "[drm] Get hpd irq - %s\n", long_hpd ? "long" : "short");
+
+ if (!long_hpd) {
+ if (dw_dp_needs_link_retrain(dp)) {
+ ret = dw_dp_link_retrain(dp);
+ if (ret)
+ dev_warn(dp->dev, "Retrain link failed\n");
+ }
+ } else {
+ drm_helper_hpd_irq_event(dp->bridge.dev);
+ }
+}
+
+static void dw_dp_handle_hpd_event(struct dw_dp *dp)
+{
+ u32 value;
+
+ mutex_lock(&dp->irq_lock);
+ regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value);
+
+ if (value & HPD_IRQ) {
+ dev_dbg(dp->dev, "IRQ from the HPD\n");
+ dp->hotplug.long_hpd = false;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_IRQ);
+ }
+
+ if (value & HPD_HOT_PLUG) {
+ dev_dbg(dp->dev, "Hot plug detected\n");
+ dp->hotplug.long_hpd = true;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG);
+ }
+
+ if (value & HPD_HOT_UNPLUG) {
+ dev_dbg(dp->dev, "Unplug detected\n");
+ dp->hotplug.long_hpd = true;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_UNPLUG);
+ }
+ mutex_unlock(&dp->irq_lock);
+
+ schedule_work(&dp->hpd_work);
+}
+
+static irqreturn_t dw_dp_irq(int irq, void *data)
+{
+ struct dw_dp *dp = data;
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_GENERAL_INTERRUPT, &value);
+ if (!value)
+ return IRQ_NONE;
+
+ if (value & HPD_EVENT)
+ dw_dp_handle_hpd_event(dp);
+
+ if (value & AUX_REPLY_EVENT) {
+ regmap_write(dp->regmap, DW_DP_GENERAL_INTERRUPT, AUX_REPLY_EVENT);
+ complete(&dp->complete);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_range dw_dp_readable_ranges[] = {
+ regmap_reg_range(DW_DP_VERSION_NUMBER, DW_DP_ID),
+ regmap_reg_range(DW_DP_CONFIG_REG1, DW_DP_CONFIG_REG3),
+ regmap_reg_range(DW_DP_CCTL, DW_DP_SOFT_RESET_CTRL),
+ regmap_reg_range(DW_DP_VSAMPLE_CTRL, DW_DP_VIDEO_HBLANK_INTERVAL),
+ regmap_reg_range(DW_DP_AUD_CONFIG1, DW_DP_AUD_CONFIG1),
+ regmap_reg_range(DW_DP_SDP_VERTICAL_CTRL, DW_DP_SDP_STATUS_EN),
+ regmap_reg_range(DW_DP_PHYIF_CTRL, DW_DP_PHYIF_PWRDOWN_CTRL),
+ regmap_reg_range(DW_DP_AUX_CMD, DW_DP_AUX_DATA3),
+ regmap_reg_range(DW_DP_GENERAL_INTERRUPT, DW_DP_HPD_INTERRUPT_ENABLE),
+};
+
+static const struct regmap_access_table dw_dp_readable_table = {
+ .yes_ranges = dw_dp_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dw_dp_readable_ranges),
+};
+
+static const struct regmap_config dw_dp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = DW_DP_MAX_REGISTER,
+ .rd_table = &dw_dp_readable_table,
+};
+
+static void dw_dp_phy_exit(void *data)
+{
+ struct dw_dp *dp = data;
+
+ phy_exit(dp->phy);
+}
+
+struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
+ const struct dw_dp_plat_data *plat_data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dp *dp;
+ struct drm_bridge *bridge;
+ void __iomem *res;
+ int ret;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return ERR_PTR(-ENOMEM);
+
+ dp = devm_drm_bridge_alloc(dev, struct dw_dp, bridge, &dw_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return ERR_CAST(dp);
+
+ dp->dev = dev;
+ dp->pixel_mode = DW_DP_MP_QUAD_PIXEL;
+
+ dp->plat_data.max_link_rate = plat_data->max_link_rate;
+ bridge = &dp->bridge;
+ mutex_init(&dp->irq_lock);
+ INIT_WORK(&dp->hpd_work, dw_dp_hpd_work);
+ init_completion(&dp->complete);
+
+ res = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(res))
+ return ERR_CAST(res);
+
+ dp->regmap = devm_regmap_init_mmio(dev, res, &dw_dp_regmap_config);
+ if (IS_ERR(dp->regmap)) {
+ dev_err_probe(dev, PTR_ERR(dp->regmap), "failed to create regmap\n");
+ return ERR_CAST(dp->regmap);
+ }
+
+ dp->phy = devm_of_phy_get(dev, dev->of_node, NULL);
+ if (IS_ERR(dp->phy)) {
+ dev_err_probe(dev, PTR_ERR(dp->phy), "failed to get phy\n");
+ return ERR_CAST(dp->phy);
+ }
+
+ dp->apb_clk = devm_clk_get_enabled(dev, "apb");
+ if (IS_ERR(dp->apb_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->apb_clk), "failed to get apb clock\n");
+ return ERR_CAST(dp->apb_clk);
+ }
+
+ dp->aux_clk = devm_clk_get_enabled(dev, "aux");
+ if (IS_ERR(dp->aux_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->aux_clk), "failed to get aux clock\n");
+ return ERR_CAST(dp->aux_clk);
+ }
+
+ dp->i2s_clk = devm_clk_get(dev, "i2s");
+ if (IS_ERR(dp->i2s_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->i2s_clk), "failed to get i2s clock\n");
+ return ERR_CAST(dp->i2s_clk);
+ }
+
+ dp->spdif_clk = devm_clk_get(dev, "spdif");
+ if (IS_ERR(dp->spdif_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->spdif_clk), "failed to get spdif clock\n");
+ return ERR_CAST(dp->spdif_clk);
+ }
+
+ dp->hdcp_clk = devm_clk_get(dev, "hdcp");
+ if (IS_ERR(dp->hdcp_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->hdcp_clk), "failed to get hdcp clock\n");
+ return ERR_CAST(dp->hdcp_clk);
+ }
+
+ dp->rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(dp->rstc)) {
+ dev_err_probe(dev, PTR_ERR(dp->rstc), "failed to get reset control\n");
+ return ERR_CAST(dp->rstc);
+ }
+
+ bridge->of_node = dev->of_node;
+ bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
+ bridge->ycbcr_420_allowed = true;
+
+ devm_drm_bridge_add(dev, bridge);
+
+ dp->aux.dev = dev;
+ dp->aux.drm_dev = encoder->dev;
+ dp->aux.name = dev_name(dev);
+ dp->aux.transfer = dw_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret) {
+ dev_err_probe(dev, ret, "Aux register failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to attach bridge\n");
+
+ dw_dp_init_hw(dp);
+
+ ret = phy_init(dp->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "phy init failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ dp->irq = platform_get_irq(pdev, 0);
+ if (dp->irq < 0)
+ return ERR_PTR(ret);
+
+ ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq,
+ IRQF_ONESHOT, dev_name(dev), dp);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request irq\n");
+ return ERR_PTR(ret);
+ }
+
+ return dp;
+}
+EXPORT_SYMBOL_GPL(dw_dp_bind);
+
+MODULE_AUTHOR("Andy Yan <andyshrk@163.com>");
+MODULE_DESCRIPTION("DW DP Core Library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
index ab18f9a3bf23..df7a37eb47f4 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
@@ -90,6 +90,11 @@ static int audio_hw_params(struct device *dev, void *data,
params->iec.status[0] & IEC958_AES0_NONAUDIO);
dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width);
+ if (daifmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
+ dw_hdmi_set_sample_iec958(dw->data.hdmi, 1);
+ else
+ dw_hdmi_set_sample_iec958(dw->data.hdmi, 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index b281cabfe992..fe4c026280f0 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -8,6 +8,7 @@
*/
#include <linux/completion.h>
#include <linux/hdmi.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/module.h>
@@ -17,6 +18,7 @@
#include <drm/bridge/dw_hdmi_qp.h>
#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -25,6 +27,8 @@
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
+#include <media/cec.h>
+
#include <sound/hdmi-codec.h>
#include "dw-hdmi-qp.h"
@@ -36,6 +40,88 @@
#define SCRAMB_POLL_DELAY_MS 3000
+/*
+ * Unless otherwise noted, entries in this table are 100% optimization.
+ * Values can be obtained from dw_hdmi_qp_compute_n() but that function is
+ * slow so we pre-compute values we expect to see.
+ *
+ * The values for TMDS 25175, 25200, 27000, 54000, 74250 and 148500 kHz are
+ * the recommended N values specified in the Audio chapter of the HDMI
+ * specification.
+ */
+static const struct dw_hdmi_audio_tmds_n {
+ unsigned long tmds;
+ unsigned int n_32k;
+ unsigned int n_44k1;
+ unsigned int n_48k;
+} common_tmds_n_table[] = {
+ { .tmds = 25175000, .n_32k = 4576, .n_44k1 = 7007, .n_48k = 6864, },
+ { .tmds = 25200000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 27000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 28320000, .n_32k = 4096, .n_44k1 = 5586, .n_48k = 6144, },
+ { .tmds = 30240000, .n_32k = 4096, .n_44k1 = 5642, .n_48k = 6144, },
+ { .tmds = 31500000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, },
+ { .tmds = 32000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, },
+ { .tmds = 33750000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 36000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+ { .tmds = 40000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, },
+ { .tmds = 49500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 50000000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, },
+ { .tmds = 54000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 65000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 68250000, .n_32k = 4096, .n_44k1 = 5376, .n_48k = 6144, },
+ { .tmds = 71000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 72000000, .n_32k = 4096, .n_44k1 = 5635, .n_48k = 6144, },
+ { .tmds = 73250000, .n_32k = 11648, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 74250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 75000000, .n_32k = 4096, .n_44k1 = 5880, .n_48k = 6144, },
+ { .tmds = 78750000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, },
+ { .tmds = 78800000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, },
+ { .tmds = 79500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, },
+ { .tmds = 83500000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 85500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 88750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 97750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 101000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 106500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, },
+ { .tmds = 108000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+ { .tmds = 115500000, .n_32k = 4096, .n_44k1 = 5712, .n_48k = 6144, },
+ { .tmds = 119000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, },
+ { .tmds = 135000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 146250000, .n_32k = 11648, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 148500000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 154000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, },
+ { .tmds = 162000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+
+ /* For 297 MHz+ HDMI spec have some other rule for setting N */
+ { .tmds = 297000000, .n_32k = 3073, .n_44k1 = 4704, .n_48k = 5120, },
+ { .tmds = 594000000, .n_32k = 3073, .n_44k1 = 9408, .n_48k = 10240,},
+
+ /* End of table */
+ { .tmds = 0, .n_32k = 0, .n_44k1 = 0, .n_48k = 0, },
+};
+
+/*
+ * These are the CTS values as recommended in the Audio chapter of the HDMI
+ * specification.
+ */
+static const struct dw_hdmi_audio_tmds_cts {
+ unsigned long tmds;
+ unsigned int cts_32k;
+ unsigned int cts_44k1;
+ unsigned int cts_48k;
+} common_tmds_cts_table[] = {
+ { .tmds = 25175000, .cts_32k = 28125, .cts_44k1 = 31250, .cts_48k = 28125, },
+ { .tmds = 25200000, .cts_32k = 25200, .cts_44k1 = 28000, .cts_48k = 25200, },
+ { .tmds = 27000000, .cts_32k = 27000, .cts_44k1 = 30000, .cts_48k = 27000, },
+ { .tmds = 54000000, .cts_32k = 54000, .cts_44k1 = 60000, .cts_48k = 54000, },
+ { .tmds = 74250000, .cts_32k = 74250, .cts_44k1 = 82500, .cts_48k = 74250, },
+ { .tmds = 148500000, .cts_32k = 148500, .cts_44k1 = 165000, .cts_48k = 148500, },
+
+ /* End of table */
+ { .tmds = 0, .cts_32k = 0, .cts_44k1 = 0, .cts_48k = 0, },
+};
+
struct dw_hdmi_qp_i2c {
struct i2c_adapter adap;
@@ -48,18 +134,37 @@ struct dw_hdmi_qp_i2c {
bool is_segment;
};
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+struct dw_hdmi_qp_cec {
+ struct drm_connector *connector;
+ int irq;
+ u32 addresses;
+ struct cec_msg rx_msg;
+ u8 tx_status;
+ bool tx_done;
+ bool rx_done;
+};
+#endif
+
struct dw_hdmi_qp {
struct drm_bridge bridge;
struct device *dev;
struct dw_hdmi_qp_i2c *i2c;
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+ struct dw_hdmi_qp_cec *cec;
+#endif
+
struct {
const struct dw_hdmi_qp_phy_ops *ops;
void *data;
} phy;
+ unsigned long ref_clk_rate;
struct regmap *regm;
+
+ unsigned long tmds_char_rate;
};
static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val,
@@ -83,6 +188,346 @@ static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data,
regmap_update_bits(hdmi->regm, reg, mask, data);
}
+static struct dw_hdmi_qp *dw_hdmi_qp_from_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dw_hdmi_qp, bridge);
+}
+
+static void dw_hdmi_qp_set_cts_n(struct dw_hdmi_qp *hdmi, unsigned int cts,
+ unsigned int n)
+{
+ /* Set N */
+ dw_hdmi_qp_mod(hdmi, n, AUDPKT_ACR_N_VALUE, AUDPKT_ACR_CONTROL0);
+
+ /* Set CTS */
+ if (cts)
+ dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_EN, AUDPKT_ACR_CTS_OVR_EN_MSK,
+ AUDPKT_ACR_CONTROL1);
+ else
+ dw_hdmi_qp_mod(hdmi, 0, AUDPKT_ACR_CTS_OVR_EN_MSK,
+ AUDPKT_ACR_CONTROL1);
+
+ dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_VAL(cts), AUDPKT_ACR_CTS_OVR_VAL_MSK,
+ AUDPKT_ACR_CONTROL1);
+}
+
+static int dw_hdmi_qp_match_tmds_n_table(struct dw_hdmi_qp *hdmi,
+ unsigned long pixel_clk,
+ unsigned long freq)
+{
+ const struct dw_hdmi_audio_tmds_n *tmds_n = NULL;
+ int i;
+
+ for (i = 0; common_tmds_n_table[i].tmds != 0; i++) {
+ if (pixel_clk == common_tmds_n_table[i].tmds) {
+ tmds_n = &common_tmds_n_table[i];
+ break;
+ }
+ }
+
+ if (!tmds_n)
+ return -ENOENT;
+
+ switch (freq) {
+ case 32000:
+ return tmds_n->n_32k;
+ case 44100:
+ case 88200:
+ case 176400:
+ return (freq / 44100) * tmds_n->n_44k1;
+ case 48000:
+ case 96000:
+ case 192000:
+ return (freq / 48000) * tmds_n->n_48k;
+ default:
+ return -ENOENT;
+ }
+}
+
+static u32 dw_hdmi_qp_audio_math_diff(unsigned int freq, unsigned int n,
+ unsigned int pixel_clk)
+{
+ u64 cts = mul_u32_u32(pixel_clk, n);
+
+ return do_div(cts, 128 * freq);
+}
+
+static unsigned int dw_hdmi_qp_compute_n(struct dw_hdmi_qp *hdmi,
+ unsigned long pixel_clk,
+ unsigned long freq)
+{
+ unsigned int min_n = DIV_ROUND_UP((128 * freq), 1500);
+ unsigned int max_n = (128 * freq) / 300;
+ unsigned int ideal_n = (128 * freq) / 1000;
+ unsigned int best_n_distance = ideal_n;
+ unsigned int best_n = 0;
+ u64 best_diff = U64_MAX;
+ int n;
+
+ /* If the ideal N could satisfy the audio math, then just take it */
+ if (dw_hdmi_qp_audio_math_diff(freq, ideal_n, pixel_clk) == 0)
+ return ideal_n;
+
+ for (n = min_n; n <= max_n; n++) {
+ u64 diff = dw_hdmi_qp_audio_math_diff(freq, n, pixel_clk);
+
+ if (diff < best_diff ||
+ (diff == best_diff && abs(n - ideal_n) < best_n_distance)) {
+ best_n = n;
+ best_diff = diff;
+ best_n_distance = abs(best_n - ideal_n);
+ }
+
+ /*
+ * The best N already satisfy the audio math, and also be
+ * the closest value to ideal N, so just cut the loop.
+ */
+ if (best_diff == 0 && (abs(n - ideal_n) > best_n_distance))
+ break;
+ }
+
+ return best_n;
+}
+
+static unsigned int dw_hdmi_qp_find_n(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk,
+ unsigned long sample_rate)
+{
+ int n = dw_hdmi_qp_match_tmds_n_table(hdmi, pixel_clk, sample_rate);
+
+ if (n > 0)
+ return n;
+
+ dev_warn(hdmi->dev, "Rate %lu missing; compute N dynamically\n",
+ pixel_clk);
+
+ return dw_hdmi_qp_compute_n(hdmi, pixel_clk, sample_rate);
+}
+
+static unsigned int dw_hdmi_qp_find_cts(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk,
+ unsigned long sample_rate)
+{
+ const struct dw_hdmi_audio_tmds_cts *tmds_cts = NULL;
+ int i;
+
+ for (i = 0; common_tmds_cts_table[i].tmds != 0; i++) {
+ if (pixel_clk == common_tmds_cts_table[i].tmds) {
+ tmds_cts = &common_tmds_cts_table[i];
+ break;
+ }
+ }
+
+ if (!tmds_cts)
+ return 0;
+
+ switch (sample_rate) {
+ case 32000:
+ return tmds_cts->cts_32k;
+ case 44100:
+ case 88200:
+ case 176400:
+ return tmds_cts->cts_44k1;
+ case 48000:
+ case 96000:
+ case 192000:
+ return tmds_cts->cts_48k;
+ default:
+ return -ENOENT;
+ }
+}
+
+static void dw_hdmi_qp_set_audio_interface(struct dw_hdmi_qp *hdmi,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ u32 conf0 = 0;
+
+ /* Reset the audio data path of the AVP */
+ dw_hdmi_qp_write(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWINIT_P, GLOBAL_SWRESET_REQUEST);
+
+ /* Disable AUDS, ACR, AUDI */
+ dw_hdmi_qp_mod(hdmi, 0,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDS_TX_EN | PKTSCHED_AUDI_TX_EN,
+ PKTSCHED_PKT_EN);
+
+ /* Clear the audio FIFO */
+ dw_hdmi_qp_write(hdmi, AUDIO_FIFO_CLR_P, AUDIO_INTERFACE_CONTROL0);
+
+ /* Select I2S interface as the audio source */
+ dw_hdmi_qp_mod(hdmi, AUD_IF_I2S, AUD_IF_SEL_MSK, AUDIO_INTERFACE_CONFIG0);
+
+ /* Enable the active i2s lanes */
+ switch (hparms->channels) {
+ case 7 ... 8:
+ conf0 |= I2S_LINES_EN(3);
+ fallthrough;
+ case 5 ... 6:
+ conf0 |= I2S_LINES_EN(2);
+ fallthrough;
+ case 3 ... 4:
+ conf0 |= I2S_LINES_EN(1);
+ fallthrough;
+ default:
+ conf0 |= I2S_LINES_EN(0);
+ break;
+ }
+
+ dw_hdmi_qp_mod(hdmi, conf0, I2S_LINES_EN_MSK, AUDIO_INTERFACE_CONFIG0);
+
+ /*
+ * Enable bpcuv generated internally for L-PCM, or received
+ * from stream for NLPCM/HBR.
+ */
+ switch (fmt->bit_fmt) {
+ case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
+ conf0 = (hparms->channels == 8) ? AUD_HBR : AUD_ASP;
+ conf0 |= I2S_BPCUV_RCV_EN;
+ break;
+ default:
+ conf0 = AUD_ASP | I2S_BPCUV_RCV_DIS;
+ break;
+ }
+
+ dw_hdmi_qp_mod(hdmi, conf0, I2S_BPCUV_RCV_MSK | AUD_FORMAT_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+
+ /* Enable audio FIFO auto clear when overflow */
+ dw_hdmi_qp_mod(hdmi, AUD_FIFO_INIT_ON_OVF_EN, AUD_FIFO_INIT_ON_OVF_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+}
+
+/*
+ * When transmitting IEC60958 linear PCM audio, these registers allow to
+ * configure the channel status information of all the channel status
+ * bits in the IEC60958 frame. For the moment this configuration is only
+ * used when the I2S audio interface, General Purpose Audio (GPA),
+ * or AHB audio DMA (AHBAUDDMA) interface is active
+ * (for S/PDIF interface this information comes from the stream).
+ */
+static void dw_hdmi_qp_set_channel_status(struct dw_hdmi_qp *hdmi,
+ u8 *channel_status, bool ref2stream)
+{
+ /*
+ * AUDPKT_CHSTATUS_OVR0: { RSV, RSV, CS1, CS0 }
+ * AUDPKT_CHSTATUS_OVR1: { CS6, CS5, CS4, CS3 }
+ *
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * CS0: | Mode | d | c | b | a |
+ * CS1: | Category Code |
+ * CS2: | Channel Number | Source Number |
+ * CS3: | Clock Accuracy | Sample Freq |
+ * CS4: | Ori Sample Freq | Word Length |
+ * CS5: | | CGMS-A |
+ * CS6~CS23: Reserved
+ *
+ * a: use of channel status block
+ * b: linear PCM identification: 0 for lpcm, 1 for nlpcm
+ * c: copyright information
+ * d: additional format information
+ */
+
+ if (ref2stream)
+ channel_status[0] |= IEC958_AES0_NONAUDIO;
+
+ if ((dw_hdmi_qp_read(hdmi, AUDIO_INTERFACE_CONFIG0) & GENMASK(25, 24)) == AUD_HBR) {
+ /* fixup cs for HBR */
+ channel_status[3] = (channel_status[3] & 0xf0) | IEC958_AES3_CON_FS_768000;
+ channel_status[4] = (channel_status[4] & 0x0f) | IEC958_AES4_CON_ORIGFS_NOTID;
+ }
+
+ dw_hdmi_qp_write(hdmi, channel_status[0] | (channel_status[1] << 8),
+ AUDPKT_CHSTATUS_OVR0);
+
+ regmap_bulk_write(hdmi->regm, AUDPKT_CHSTATUS_OVR1, &channel_status[3], 1);
+
+ if (ref2stream)
+ dw_hdmi_qp_mod(hdmi, 0,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+ else
+ dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+}
+
+static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long long tmds_char_rate,
+ unsigned int sample_rate)
+{
+ unsigned int n, cts;
+
+ n = dw_hdmi_qp_find_n(hdmi, tmds_char_rate, sample_rate);
+ cts = dw_hdmi_qp_find_cts(hdmi, tmds_char_rate, sample_rate);
+
+ dw_hdmi_qp_set_cts_n(hdmi, cts, n);
+}
+
+static int dw_hdmi_qp_audio_enable(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+
+ if (hdmi->tmds_char_rate)
+ dw_hdmi_qp_mod(hdmi, 0, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
+
+ return 0;
+}
+
+static int dw_hdmi_qp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ bool ref2stream = false;
+
+ if (!hdmi->tmds_char_rate)
+ return -ENODEV;
+
+ if (fmt->bit_clk_provider | fmt->frame_clk_provider) {
+ dev_err(hdmi->dev, "unsupported clock settings\n");
+ return -EINVAL;
+ }
+
+ if (fmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
+ ref2stream = true;
+
+ dw_hdmi_qp_set_audio_interface(hdmi, fmt, hparms);
+ dw_hdmi_qp_set_sample_rate(hdmi, hdmi->tmds_char_rate, hparms->sample_rate);
+ dw_hdmi_qp_set_channel_status(hdmi, hparms->iec.status, ref2stream);
+ drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector, &hparms->cea);
+
+ return 0;
+}
+
+static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi)
+{
+ /*
+ * Keep ACR, AUDI, AUDS packet always on to make SINK device
+ * active for better compatibility and user experience.
+ *
+ * This also fix POP sound on some SINK devices which wakeup
+ * from suspend to active.
+ */
+ dw_hdmi_qp_mod(hdmi, I2S_BPCUV_RCV_DIS, I2S_BPCUV_RCV_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+ dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+
+ dw_hdmi_qp_mod(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE,
+ AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
+}
+
+static void dw_hdmi_qp_audio_disable(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
+
+ if (hdmi->tmds_char_rate)
+ dw_hdmi_qp_audio_disable_regs(hdmi);
+}
+
static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi,
unsigned char *buf, unsigned int length)
{
@@ -361,11 +806,55 @@ static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
return 0;
}
+/*
+ * Static values documented in the TRM
+ * Different values are only used for debug purposes
+ */
+#define DW_HDMI_QP_AUDIO_INFOFRAME_HB1 0x1
+#define DW_HDMI_QP_AUDIO_INFOFRAME_HB2 0xa
+
+static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi,
+ const u8 *buffer, size_t len)
+{
+ /*
+ * AUDI_CONTENTS0: { RSV, HB2, HB1, RSV }
+ * AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 }
+ * AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 }
+ *
+ * PB0: CheckSum
+ * PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 |
+ * PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 |
+ * PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 |
+ * PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 |
+ * PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 |
+ * PB6~PB10: Reserved
+ *
+ * AUDI_CONTENTS0 default value defined by HDMI specification,
+ * and shall only be changed for debug purposes.
+ */
+ u32 header_bytes = (DW_HDMI_QP_AUDIO_INFOFRAME_HB1 << 8) |
+ (DW_HDMI_QP_AUDIO_INFOFRAME_HB2 << 16);
+
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1);
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1);
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1);
+
+ /* Enable ACR, AUDI, AMD */
+ dw_hdmi_qp_mod(hdmi,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
+ PKTSCHED_PKT_EN);
+
+ /* Enable AUDS */
+ dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN);
+
+ return 0;
+}
+
static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
- struct drm_atomic_state *state = old_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
unsigned int op_mode;
@@ -379,9 +868,11 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
return;
if (connector->display_info.is_hdmi) {
- dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n",
- __func__, conn_state->hdmi.tmds_char_rate);
+ dev_dbg(hdmi->dev, "%s mode=HDMI %s rate=%llu bpc=%u\n", __func__,
+ drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
+ conn_state->hdmi.tmds_char_rate, conn_state->hdmi.output_bpc);
op_mode = 0;
+ hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate;
} else {
dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__);
op_mode = OPMODE_DVI;
@@ -396,15 +887,17 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
+ hdmi->tmds_char_rate = 0;
+
hdmi->phy.ops->disable(hdmi, hdmi->phy.data);
}
static enum drm_connector_status
-dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge)
+dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
@@ -455,6 +948,13 @@ static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge,
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN);
break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ dw_hdmi_qp_mod(hdmi, 0,
+ PKTSCHED_ACR_TX_EN |
+ PKTSCHED_AUDS_TX_EN |
+ PKTSCHED_AUDI_TX_EN,
+ PKTSCHED_PKT_EN);
+ break;
default:
dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
}
@@ -477,12 +977,188 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge,
case HDMI_INFOFRAME_TYPE_DRM:
return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return dw_hdmi_qp_config_audio_infoframe(hdmi, buffer, len);
+
default:
dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
return 0;
}
}
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+static irqreturn_t dw_hdmi_qp_cec_hardirq(int irq, void *dev_id)
+{
+ struct dw_hdmi_qp *hdmi = dev_id;
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 stat;
+
+ stat = dw_hdmi_qp_read(hdmi, CEC_INT_STATUS);
+ if (stat == 0)
+ return IRQ_NONE;
+
+ dw_hdmi_qp_write(hdmi, stat, CEC_INT_CLEAR);
+
+ if (stat & CEC_STAT_LINE_ERR) {
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_DONE) {
+ cec->tx_status = CEC_TX_STATUS_OK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_NACK) {
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (stat & CEC_STAT_EOM) {
+ unsigned int len, i, val;
+
+ val = dw_hdmi_qp_read(hdmi, CEC_RX_COUNT_STATUS);
+ len = (val & 0xf) + 1;
+
+ if (len > sizeof(cec->rx_msg.msg))
+ len = sizeof(cec->rx_msg.msg);
+
+ for (i = 0; i < 4; i++) {
+ val = dw_hdmi_qp_read(hdmi, CEC_RX_DATA3_0 + i * 4);
+ cec->rx_msg.msg[i * 4] = val & 0xff;
+ cec->rx_msg.msg[i * 4 + 1] = (val >> 8) & 0xff;
+ cec->rx_msg.msg[i * 4 + 2] = (val >> 16) & 0xff;
+ cec->rx_msg.msg[i * 4 + 3] = (val >> 24) & 0xff;
+ }
+
+ dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL);
+
+ cec->rx_msg.len = len;
+ cec->rx_done = true;
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t dw_hdmi_qp_cec_thread(int irq, void *dev_id)
+{
+ struct dw_hdmi_qp *hdmi = dev_id;
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ if (cec->tx_done) {
+ cec->tx_done = false;
+ drm_connector_hdmi_cec_transmit_attempt_done(cec->connector,
+ cec->tx_status);
+ }
+
+ if (cec->rx_done) {
+ cec->rx_done = false;
+ drm_connector_hdmi_cec_received_msg(cec->connector, &cec->rx_msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dw_hdmi_qp_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ cec->connector = connector;
+
+ dw_hdmi_qp_write(hdmi, 0, CEC_TX_COUNT);
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N);
+
+ return devm_request_threaded_irq(hdmi->dev, cec->irq,
+ dw_hdmi_qp_cec_hardirq,
+ dw_hdmi_qp_cec_thread, IRQF_SHARED,
+ dev_name(hdmi->dev), hdmi);
+}
+
+static int dw_hdmi_qp_cec_log_addr(struct drm_bridge *bridge, u8 logical_addr)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ cec->addresses = 0;
+ else
+ cec->addresses |= BIT(logical_addr) | CEC_ADDR_BROADCAST;
+
+ dw_hdmi_qp_write(hdmi, cec->addresses, CEC_ADDR);
+
+ return 0;
+}
+
+static int dw_hdmi_qp_cec_enable(struct drm_bridge *bridge, bool enable)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ unsigned int irqs;
+ u32 swdisable;
+
+ if (!enable) {
+ dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N);
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+
+ swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE);
+ swdisable = swdisable | CEC_SWDISABLE;
+ dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE);
+ } else {
+ swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE);
+ swdisable = swdisable & ~CEC_SWDISABLE;
+ dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE);
+
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL);
+
+ dw_hdmi_qp_cec_log_addr(bridge, CEC_LOG_ADDR_INVALID);
+
+ irqs = CEC_STAT_LINE_ERR | CEC_STAT_NACK | CEC_STAT_EOM |
+ CEC_STAT_DONE;
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, irqs, CEC_INT_MASK_N);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_qp_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ unsigned int i;
+ u32 val;
+
+ for (i = 0; i < msg->len; i++) {
+ if (!(i % 4))
+ val = msg->msg[i];
+ if ((i % 4) == 1)
+ val |= msg->msg[i] << 8;
+ if ((i % 4) == 2)
+ val |= msg->msg[i] << 16;
+ if ((i % 4) == 3)
+ val |= msg->msg[i] << 24;
+
+ if (i == (msg->len - 1) || (i % 4) == 3)
+ dw_hdmi_qp_write(hdmi, val, CEC_TX_DATA3_0 + (i / 4) * 4);
+ }
+
+ dw_hdmi_qp_write(hdmi, msg->len - 1, CEC_TX_COUNT);
+ dw_hdmi_qp_write(hdmi, CEC_CTRL_START, CEC_TX_CONTROL);
+
+ return 0;
+}
+#else
+#define dw_hdmi_qp_cec_init NULL
+#define dw_hdmi_qp_cec_enable NULL
+#define dw_hdmi_qp_cec_log_addr NULL
+#define dw_hdmi_qp_cec_transmit NULL
+#endif /* CONFIG_DRM_DW_HDMI_QP_CEC */
+
static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -494,6 +1170,13 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe,
.hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe,
+ .hdmi_audio_startup = dw_hdmi_qp_audio_enable,
+ .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable,
+ .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare,
+ .hdmi_cec_init = dw_hdmi_qp_cec_init,
+ .hdmi_cec_enable = dw_hdmi_qp_cec_enable,
+ .hdmi_cec_log_addr = dw_hdmi_qp_cec_log_addr,
+ .hdmi_cec_transmit = dw_hdmi_qp_cec_transmit,
};
static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id)
@@ -529,13 +1212,11 @@ static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N);
dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N);
- dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0);
+ dw_hdmi_qp_write(hdmi, hdmi->ref_clk_rate, TIMER_BASE_CONFIG0);
/* Software reset */
dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
-
dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0);
-
dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0);
/* Clear DONE and ERROR interrupts */
@@ -561,9 +1242,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(-ENODEV);
}
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi_qp, bridge,
+ &dw_hdmi_qp_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return ERR_CAST(hdmi);
hdmi->dev = dev;
@@ -580,6 +1262,13 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->phy.ops = plat_data->phy_ops;
hdmi->phy.data = plat_data->phy_data;
+ if (plat_data->ref_clk_rate) {
+ hdmi->ref_clk_rate = plat_data->ref_clk_rate;
+ } else {
+ hdmi->ref_clk_rate = 428571429;
+ dev_warn(dev, "Set ref_clk_rate to vendor default\n");
+ }
+
dw_hdmi_qp_init_hw(hdmi);
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
@@ -589,20 +1278,46 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(ret);
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_HPD;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
hdmi->bridge.vendor = "Synopsys";
hdmi->bridge.product = "DW HDMI QP TX";
+ if (plat_data->supported_formats)
+ hdmi->bridge.supported_formats = plat_data->supported_formats;
+
+ if (plat_data->max_bpc)
+ hdmi->bridge.max_bpc = plat_data->max_bpc;
+
hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi);
if (IS_ERR(hdmi->bridge.ddc))
return ERR_CAST(hdmi->bridge.ddc);
+ hdmi->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ hdmi->bridge.hdmi_audio_dev = dev;
+ hdmi->bridge.hdmi_audio_dai_port = 1;
+
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+ if (plat_data->cec_irq) {
+ hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER;
+ hdmi->bridge.hdmi_cec_dev = dev;
+ hdmi->bridge.hdmi_cec_adapter_name = dev_name(dev);
+
+ hdmi->cec = devm_kzalloc(hdmi->dev, sizeof(*hdmi->cec), GFP_KERNEL);
+ if (!hdmi->cec)
+ return ERR_PTR(-ENOMEM);
+
+ hdmi->cec->irq = plat_data->cec_irq;
+ } else {
+ dev_warn(dev, "Disabled CEC support due to missing IRQ\n");
+ }
+#endif
+
ret = devm_drm_bridge_add(dev, &hdmi->bridge);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
index 72987e6c4689..91a15f82e32a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
@@ -488,9 +488,23 @@
#define AUDPKT_VBIT_OVR0 0xf24
/* CEC Registers */
#define CEC_TX_CONTROL 0x1000
+#define CEC_CTRL_CLEAR BIT(0)
+#define CEC_CTRL_START BIT(0)
#define CEC_STATUS 0x1004
+#define CEC_STAT_DONE BIT(0)
+#define CEC_STAT_NACK BIT(1)
+#define CEC_STAT_ARBLOST BIT(2)
+#define CEC_STAT_LINE_ERR BIT(3)
+#define CEC_STAT_RETRANS_FAIL BIT(4)
+#define CEC_STAT_DISCARD BIT(5)
+#define CEC_STAT_TX_BUSY BIT(8)
+#define CEC_STAT_RX_BUSY BIT(9)
+#define CEC_STAT_DRIVE_ERR BIT(10)
+#define CEC_STAT_EOM BIT(11)
+#define CEC_STAT_NOTIFY_ERR BIT(12)
#define CEC_CONFIG 0x1008
#define CEC_ADDR 0x100c
+#define CEC_ADDR_BROADCAST BIT(15)
#define CEC_TX_COUNT 0x1020
#define CEC_TX_DATA3_0 0x1024
#define CEC_TX_DATA7_4 0x1028
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 996733ed2c00..3b77e73ac0ea 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/irq.h>
@@ -22,8 +23,8 @@
#include <media/cec-notifier.h>
-#include <uapi/linux/media-bus-format.h>
-#include <uapi/linux/videodev2.h>
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -176,6 +177,7 @@ struct dw_hdmi {
spinlock_t audio_lock;
struct mutex audio_mutex;
+ unsigned int sample_iec958;
unsigned int sample_non_pcm;
unsigned int sample_width;
unsigned int sample_rate;
@@ -197,6 +199,12 @@ struct dw_hdmi {
enum drm_connector_status last_connector_result;
};
+const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi)
+{
+ return hdmi->plat_data;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_to_plat_data);
+
#define HDMI_IH_PHY_STAT0_RX_SENSE \
(HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \
HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3)
@@ -711,6 +719,14 @@ void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm)
}
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm);
+void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958)
+{
+ mutex_lock(&hdmi->audio_mutex);
+ hdmi->sample_iec958 = iec958;
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_iec958);
+
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
{
mutex_lock(&hdmi->audio_mutex);
@@ -842,7 +858,8 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi)
hdmi->channels,
hdmi->sample_width,
hdmi->sample_rate,
- hdmi->sample_non_pcm);
+ hdmi->sample_non_pcm,
+ hdmi->sample_iec958);
}
static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
@@ -2621,6 +2638,7 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
* - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
* - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
* - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
* - MEDIA_BUS_FMT_YUV16_1X48,
* - MEDIA_BUS_FMT_RGB161616_1X48,
* - MEDIA_BUS_FMT_UYVY12_1X24,
@@ -2631,7 +2649,6 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
* - MEDIA_BUS_FMT_RGB101010_1X30,
* - MEDIA_BUS_FMT_UYVY8_1X16,
* - MEDIA_BUS_FMT_YUV8_1X24,
- * - MEDIA_BUS_FMT_RGB888_1X24,
*/
/* Can return a maximum of 11 possible output formats for a mode/connector */
@@ -2669,7 +2686,7 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
}
/*
- * If the current mode enforces 4:2:0, force the output but format
+ * If the current mode enforces 4:2:0, force the output bus format
* to 4:2:0 and do not add the YUV422/444/RGB formats
*/
if (conn->ycbcr_420_allowed &&
@@ -2889,12 +2906,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
}
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ return drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
return dw_hdmi_connector_create(hdmi);
@@ -2945,7 +2963,7 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi *hdmi = bridge->driver_private;
@@ -2959,10 +2977,9 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi *hdmi = bridge->driver_private;
- struct drm_atomic_state *state = old_state->base.state;
struct drm_connector *connector;
connector = drm_atomic_get_new_connector_for_encoder(state,
@@ -2977,7 +2994,8 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
mutex_unlock(&hdmi->mutex);
}
-static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+dw_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct dw_hdmi *hdmi = bridge->driver_private;
@@ -3333,9 +3351,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
u8 config0;
u8 config3;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi, bridge, &dw_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return hdmi;
hdmi->plat_data = plat_data;
hdmi->dev = dev;
@@ -3495,7 +3513,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
}
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 0fb02e4e7f4e..8fc2e282ff11 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/media-bus-format.h>
@@ -934,7 +935,7 @@ static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
@@ -1022,7 +1023,7 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -1043,7 +1044,7 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -1072,15 +1073,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->panel_bridge, bridge,
flags);
}
@@ -1193,9 +1195,10 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct dw_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return ERR_PTR(-ENOMEM);
+ dsi = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi, bridge,
+ &dw_mipi_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return ERR_CAST(dsi);
dsi->dev = dev;
dsi->plat_data = plat_data;
@@ -1264,7 +1267,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
}
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &dw_mipi_dsi_bridge_funcs;
dsi->bridge.of_node = pdev->dev.of_node;
return dsi;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index d7569bf2d9c3..5926a3a05d79 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -745,7 +746,7 @@ static int dw_mipi_dsi2_bridge_atomic_check(struct drm_bridge *bridge,
}
static void dw_mipi_dsi2_bridge_post_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
@@ -821,7 +822,7 @@ static void dw_mipi_dsi2_mode_set(struct dw_mipi_dsi2 *dsi2,
}
static void dw_mipi_dsi2_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
@@ -840,7 +841,7 @@ static void dw_mipi_dsi2_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_mipi_dsi2_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
@@ -870,15 +871,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge,
flags);
}
@@ -913,9 +915,10 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
struct dw_mipi_dsi2 *dsi2;
int ret;
- dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
- if (!dsi2)
- return ERR_PTR(-ENOMEM);
+ dsi2 = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi2, bridge,
+ &dw_mipi_dsi2_bridge_funcs);
+ if (IS_ERR(dsi2))
+ return ERR_CAST(dsi2);
dsi2->dev = dev;
dsi2->plat_data = plat_data;
@@ -980,7 +983,6 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
}
dsi2->bridge.driver_private = dsi2;
- dsi2->bridge.funcs = &dw_mipi_dsi2_bridge_funcs;
dsi2->bridge.of_node = pdev->dev.of_node;
return dsi2;
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 46198af9eebb..98df3e667d4a 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -20,10 +20,10 @@
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -149,7 +149,8 @@ static int tc358762_init(struct tc358762 *ctx)
return tc358762_clear_error(ctx);
}
-static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -171,7 +172,8 @@ static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_s
dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
}
-static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -188,7 +190,8 @@ static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_sta
ctx->pre_enabled = true;
}
-static void tc358762_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -199,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge, struct drm_bridge_state *
}
static int tc358762_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
bridge, flags);
}
@@ -261,9 +265,10 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
struct tc358762 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358762), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358762, bridge,
+ &tc358762_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -284,7 +289,6 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3d3d135b4348..084e9d898e22 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
}
static int tc358764_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
@@ -346,9 +347,10 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
struct tc358764 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358764), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358764, bridge,
+ &tc358764_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -367,7 +369,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358764_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 4637bf6ea7a3..4097fef4b86b 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -344,6 +344,14 @@
#define COLOR_BAR_MODE_BARS 2
#define PLL_DBG 0x0a04
+enum tc_mode {
+ mode_dpi_to_edp = BIT(1) | BIT(2),
+ mode_dpi_to_dp = BIT(1),
+ mode_dsi_to_edp = BIT(0) | BIT(2),
+ mode_dsi_to_dp = BIT(0),
+ mode_dsi_to_dpi = BIT(0) | BIT(1),
+};
+
static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
@@ -1548,9 +1556,8 @@ static int tc_edp_stream_disable(struct tc_data *tc)
return 0;
}
-static void
-tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1564,9 +1571,8 @@ tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
}
}
-static void
-tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
@@ -1576,9 +1582,8 @@ tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
dev_err(tc->dev, "main link stream stop error: %d\n", ret);
}
-static void
-tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
@@ -1603,9 +1608,8 @@ tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
}
}
-static void
-tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
@@ -1756,7 +1760,8 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
.get_modes = tc_connector_get_modes,
};
-static enum drm_connector_status tc_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+tc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct tc_data *tc = bridge_to_tc(bridge);
bool conn;
@@ -1781,7 +1786,7 @@ tc_connector_detect(struct drm_connector *connector, bool force)
struct tc_data *tc = connector_to_tc(connector);
if (tc->hpd_pin >= 0)
- return tc_bridge_detect(&tc->bridge);
+ return tc_bridge_detect(&tc->bridge, connector);
if (tc->panel_bridge)
return connector_status_connected;
@@ -1799,6 +1804,7 @@ static const struct drm_connector_funcs tc_connector_funcs = {
};
static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1811,6 +1817,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
}
static int tc_edp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
@@ -2329,7 +2336,6 @@ static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc)
if (bridge) {
tc->panel_bridge = bridge;
tc->bridge.type = DRM_MODE_CONNECTOR_DPI;
- tc->bridge.funcs = &tc_dpi_bridge_funcs;
return 0;
}
@@ -2362,7 +2368,6 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
}
- tc->bridge.funcs = &tc_edp_bridge_funcs;
if (tc->hpd_pin >= 0)
tc->bridge.ops |= DRM_BRIDGE_OP_DETECT;
tc->bridge.ops |= DRM_BRIDGE_OP_EDID;
@@ -2370,17 +2375,11 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
return 0;
}
-static int tc_probe_bridge_endpoint(struct tc_data *tc)
+static enum tc_mode tc_probe_get_mode(struct device *dev)
{
- struct device *dev = tc->dev;
struct of_endpoint endpoint;
struct device_node *node = NULL;
- const u8 mode_dpi_to_edp = BIT(1) | BIT(2);
- const u8 mode_dpi_to_dp = BIT(1);
- const u8 mode_dsi_to_edp = BIT(0) | BIT(2);
- const u8 mode_dsi_to_dp = BIT(0);
- const u8 mode_dsi_to_dpi = BIT(0) | BIT(1);
- u8 mode = 0;
+ enum tc_mode mode = 0;
/*
* Determine bridge configuration.
@@ -2403,7 +2402,28 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return -EINVAL;
}
mode |= BIT(endpoint.port);
+ }
+
+ if (mode != mode_dpi_to_edp &&
+ mode != mode_dpi_to_dp &&
+ mode != mode_dsi_to_dpi &&
+ mode != mode_dsi_to_edp &&
+ mode != mode_dsi_to_dp) {
+ dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
+ return -EINVAL;
+ }
+
+ return mode;
+}
+static int tc_probe_bridge_endpoint(struct tc_data *tc, enum tc_mode mode)
+{
+ struct device *dev = tc->dev;
+ struct of_endpoint endpoint;
+ struct device_node *node = NULL;
+
+ for_each_endpoint_of_node(dev->of_node, node) {
+ of_graph_parse_endpoint(node, &endpoint);
if (endpoint.port == 2) {
of_property_read_u8_array(node, "toshiba,pre-emphasis",
tc->pre_emphasis,
@@ -2429,24 +2449,28 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return tc_probe_edp_bridge_endpoint(tc);
}
- dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
-
+ /* Should never happen, mode was validated by tc_probe_get_mode() */
return -EINVAL;
}
static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
+ const struct drm_bridge_funcs *funcs;
struct tc_data *tc;
+ int mode;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ mode = tc_probe_get_mode(dev);
+ funcs = (mode == mode_dsi_to_dpi) ? &tc_dpi_bridge_funcs : &tc_edp_bridge_funcs;
+
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge, funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
- ret = tc_probe_bridge_endpoint(tc);
+ ret = tc_probe_bridge_endpoint(tc, mode);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index ec79b0dd0e2c..fbdc44e16229 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
};
static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ return drm_bridge_attach(encoder, priv->output.bridge, bridge,
flags);
}
@@ -580,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void tc358768_bridge_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -602,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge)
dev_warn(priv->dev, "Software disable failed: %d\n", ret);
}
-static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -682,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
return (u32)div_u64(m, n);
}
-static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
struct mipi_dsi_device *dsi_dev = priv->output.dev;
unsigned long mode_flags = dsi_dev->mode_flags;
u32 val, val2, lptxcnt, hact, data_type;
s32 raw_val;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
const struct drm_display_mode *mode;
u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
u32 dsiclk, hsbyteclk;
@@ -719,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
ret = tc358768_setup_pll(priv, mode);
if (ret) {
dev_err(dev, "PLL setup failed: %d\n", ret);
@@ -1076,14 +1086,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
tc358768_write(priv, TC358768_DSI_CONFW, val);
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
-static void tc358768_bridge_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -1100,11 +1108,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
#define MAX_INPUT_SEL_FORMATS 1
@@ -1166,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
.attach = tc358768_bridge_attach,
.mode_valid = tc358768_bridge_mode_valid,
.mode_fixup = tc358768_mode_fixup,
- .pre_enable = tc358768_bridge_pre_enable,
- .enable = tc358768_bridge_enable,
- .disable = tc358768_bridge_disable,
- .post_disable = tc358768_bridge_post_disable,
+ .atomic_pre_enable = tc358768_bridge_atomic_pre_enable,
+ .atomic_enable = tc358768_bridge_atomic_enable,
+ .atomic_disable = tc358768_bridge_atomic_disable,
+ .atomic_post_disable = tc358768_bridge_atomic_post_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -1282,9 +1287,10 @@ static int tc358768_i2c_probe(struct i2c_client *client)
if (!np)
return -ENODEV;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tc358768_priv, bridge,
+ &tc358768_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
priv->dev = dev;
@@ -1316,7 +1322,6 @@ static int tc358768_i2c_probe(struct i2c_client *client)
priv->dsi_host.dev = dev;
priv->dsi_host.ops = &tc358768_dsi_host_ops;
- priv->bridge.funcs = &tc358768_bridge_funcs;
priv->bridge.timings = &default_tc358768_timings;
priv->bridge.of_node = np;
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 0b4efaca6d68..366b12db0e7c 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -26,7 +26,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#define FLD_VAL(val, start, end) FIELD_PREP(GENMASK(start, end), val)
@@ -287,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
return container_of(b, struct tc_data, bridge);
}
-static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -310,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge)
usleep_range(10, 20);
}
-static void tc_bridge_post_disable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -369,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val)
ret, addr);
}
-/* helper function to access bus_formats */
-static struct drm_connector *get_connector(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return connector;
-
- return NULL;
-}
-
-static void tc_bridge_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2;
u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2;
u32 val = 0;
u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay;
- struct drm_display_mode *mode;
- struct drm_connector *connector = get_connector(bridge->encoder);
-
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
hback_porch = mode->htotal - mode->hsync_end;
hsync_len = mode->hsync_end - mode->hsync_start;
@@ -590,21 +582,25 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
}
static int tc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
+ return drm_bridge_attach(encoder, tc->panel_bridge,
&tc->bridge, flags);
}
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
- .pre_enable = tc_bridge_pre_enable,
- .enable = tc_bridge_enable,
+ .atomic_pre_enable = tc_bridge_atomic_pre_enable,
+ .atomic_enable = tc_bridge_atomic_enable,
.mode_valid = tc_mode_valid,
- .post_disable = tc_bridge_post_disable,
+ .atomic_post_disable = tc_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
};
static int tc_attach_host(struct tc_data *tc)
@@ -663,9 +659,10 @@ static int tc_probe(struct i2c_client *client)
struct tc_data *tc;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge,
+ &tc_bridge_funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
tc->i2c = client;
@@ -705,7 +702,6 @@ static int tc_probe(struct i2c_client *client)
return ret;
}
- tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
tc->bridge.pre_enable_prev_first = true;
drm_bridge_add(&tc->bridge);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index 82d4a4e206a5..e636459d9185 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -21,10 +21,11 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/i2c/tda998x.h>
#include <media/cec-notifier.h>
+#include <dt-bindings/display/tda998x.h>
+
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
enum {
@@ -750,7 +751,8 @@ tda998x_reset(struct tda998x_priv *priv)
*/
static void tda998x_edid_delay_done(struct timer_list *t)
{
- struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
+ struct tda998x_priv *priv = timer_container_of(priv, t,
+ edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
@@ -1364,6 +1366,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
@@ -1717,10 +1720,10 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
u8 ena_ap = be32_to_cpup(&port_data[2*i+1]);
switch (afmt) {
- case AFMT_I2S:
+ case TDA998x_I2S:
route = AUDIO_ROUTE_I2S;
break;
- case AFMT_SPDIF:
+ case TDA998x_SPDIF:
route = AUDIO_ROUTE_SPDIF;
break;
default:
@@ -1746,44 +1749,6 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
return 0;
}
-static int tda998x_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
-{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-
- if (p->audio_params.format != AFMT_UNUSED) {
- unsigned int ratio, route;
- bool spdif = p->audio_params.format == AFMT_SPDIF;
-
- route = AUDIO_ROUTE_I2S + spdif;
-
- priv->audio.route = &tda998x_audio_route[route];
- priv->audio.cea = p->audio_params.cea;
- priv->audio.sample_rate = p->audio_params.sample_rate;
- memcpy(priv->audio.status, p->audio_params.status,
- min(sizeof(priv->audio.status),
- sizeof(p->audio_params.status)));
- priv->audio.ena_ap = p->audio_params.config;
- priv->audio.i2s_format = I2S_FORMAT_PHILIPS;
-
- ratio = spdif ? 64 : p->audio_params.sample_width * 2;
- return tda998x_derive_cts_n(priv, &priv->audio, ratio);
- }
-
- return 0;
-}
-
static void tda998x_destroy(struct device *dev)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -1800,7 +1765,7 @@ static void tda998x_destroy(struct device *dev)
if (priv->hdmi->irq)
free_irq(priv->hdmi->irq, priv);
- del_timer_sync(&priv->edid_delay_timer);
+ timer_delete_sync(&priv->edid_delay_timer);
cancel_work_sync(&priv->detect_work);
i2c_unregister_device(priv->cec);
@@ -1817,9 +1782,9 @@ static int tda998x_create(struct device *dev)
u32 video;
int rev_lo, rev_hi, ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tda998x_priv, bridge, &tda998x_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -1982,13 +1947,8 @@ static int tda998x_create(struct device *dev)
if (priv->audio_port_enable[AUDIO_ROUTE_I2S] ||
priv->audio_port_enable[AUDIO_ROUTE_SPDIF])
tda998x_audio_codec_init(priv, &client->dev);
- } else if (dev->platform_data) {
- ret = tda998x_set_config(priv, dev->platform_data);
- if (ret)
- goto fail;
}
- priv->bridge.funcs = &tda998x_bridge_funcs;
#ifdef CONFIG_OF
priv->bridge.of_node = dev->of_node;
#endif
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index bba10cf9b4f9..2cb7cd0c0608 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
}
static int thc63_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
+ return drm_bridge_attach(encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
@@ -180,9 +181,10 @@ static int thc63_probe(struct platform_device *pdev)
struct thc63_dev *thc63;
int ret;
- thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL);
- if (!thc63)
- return -ENOMEM;
+ thc63 = devm_drm_bridge_alloc(&pdev->dev, struct thc63_dev, bridge,
+ &thc63_bridge_func);
+ if (IS_ERR(thc63))
+ return PTR_ERR(thc63);
thc63->dev = &pdev->dev;
platform_set_drvdata(pdev, thc63);
@@ -207,7 +209,6 @@ static int thc63_probe(struct platform_device *pdev)
thc63->bridge.driver_private = thc63;
thc63->bridge.of_node = pdev->dev.of_node;
- thc63->bridge.funcs = &thc63_bridge_func;
thc63->bridge.timings = &thc63->timings;
drm_bridge_add(&thc63->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index eaec70fa42b6..b07f7c9d5890 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -105,7 +105,7 @@ static const struct regmap_config dlpc_regmap_config = {
};
static void dlpc_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
struct device *dev = dlpc->dev;
@@ -170,7 +170,7 @@ static void dlpc_atomic_enable(struct drm_bridge *bridge,
}
static void dlpc_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
int ret;
@@ -193,7 +193,7 @@ static void dlpc_atomic_pre_enable(struct drm_bridge *bridge,
}
static void dlpc_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
@@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge,
drm_mode_copy(&dlpc->mode, adjusted_mode);
}
-static int dlpc_attach(struct drm_bridge *bridge,
+static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
- return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs dlpc_bridge_funcs = {
@@ -348,9 +348,10 @@ static int dlpc3433_probe(struct i2c_client *client)
struct dlpc *dlpc;
int ret;
- dlpc = devm_kzalloc(dev, sizeof(*dlpc), GFP_KERNEL);
- if (!dlpc)
- return -ENOMEM;
+ dlpc = devm_drm_bridge_alloc(dev, struct dlpc, bridge,
+ &dlpc_bridge_funcs);
+ if (IS_ERR(dlpc))
+ return PTR_ERR(dlpc);
dlpc->dev = dev;
@@ -365,7 +366,6 @@ static int dlpc3433_probe(struct i2c_client *client)
dev_set_drvdata(dev, dlpc);
i2c_set_clientdata(client, dlpc);
- dlpc->bridge.funcs = &dlpc_bridge_funcs;
dlpc->bridge.of_node = dev->of_node;
drm_bridge_add(&dlpc->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 336380114eea..033c44326552 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -35,12 +35,14 @@
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -159,6 +161,9 @@ struct sn65dsi83 {
bool lvds_dual_link_even_odd_swap;
int lvds_vod_swing_conf[2];
int lvds_term_conf[2];
+ int irq;
+ struct delayed_work monitor_work;
+ struct work_struct reset_work;
};
static const struct regmap_range sn65dsi83_readable_ranges[] = {
@@ -285,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
}
static int sn65dsi83_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
@@ -363,11 +369,104 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
return dsi_div - 1;
}
+static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ int err;
+
+ /*
+ * Reset active outputs of the related CRTC.
+ *
+ * This way, drm core will reconfigure each components in the CRTC
+ * outputs path. In our case, this will force the previous component to
+ * go back in LP11 mode and so allow the reconfiguration of SN65DSI83
+ * bridge.
+ *
+ * Keep the lock during the whole operation to be atomic.
+ */
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ dev_warn(sn65dsi83->dev, "reset the pipe\n");
+
+retry:
+ err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx);
+ if (err == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return 0;
+}
+
+static void sn65dsi83_reset_work(struct work_struct *ws)
+{
+ struct sn65dsi83 *ctx = container_of(ws, struct sn65dsi83, reset_work);
+ int ret;
+
+ /* Reset the pipe */
+ ret = sn65dsi83_reset_pipe(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "reset pipe failed %pe\n", ERR_PTR(ret));
+ return;
+ }
+ if (ctx->irq)
+ enable_irq(ctx->irq);
+}
+
+static void sn65dsi83_handle_errors(struct sn65dsi83 *ctx)
+{
+ unsigned int irq_stat;
+ int ret;
+
+ /*
+ * Schedule a reset in case of:
+ * - the bridge doesn't answer
+ * - the bridge signals an error
+ */
+
+ ret = regmap_read(ctx->regmap, REG_IRQ_STAT, &irq_stat);
+ if (ret || irq_stat) {
+ /*
+ * IRQ acknowledged is not always possible (the bridge can be in
+ * a state where it doesn't answer anymore). To prevent an
+ * interrupt storm, disable interrupt. The interrupt will be
+ * after the reset.
+ */
+ if (ctx->irq)
+ disable_irq_nosync(ctx->irq);
+
+ schedule_work(&ctx->reset_work);
+ }
+}
+
+static void sn65dsi83_monitor_work(struct work_struct *work)
+{
+ struct sn65dsi83 *ctx = container_of(to_delayed_work(work),
+ struct sn65dsi83, monitor_work);
+
+ sn65dsi83_handle_errors(ctx);
+
+ schedule_delayed_work(&ctx->monitor_work, msecs_to_jiffies(1000));
+}
+
+static void sn65dsi83_monitor_start(struct sn65dsi83 *ctx)
+{
+ schedule_delayed_work(&ctx->monitor_work, msecs_to_jiffies(1000));
+}
+
+static void sn65dsi83_monitor_stop(struct sn65dsi83 *ctx)
+{
+ cancel_delayed_work_sync(&ctx->monitor_work);
+}
+
static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -457,6 +556,8 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
REG_LVDS_FMT_HS_NEG_POLARITY : 0) |
(mode->flags & DRM_MODE_FLAG_NVSYNC ?
REG_LVDS_FMT_VS_NEG_POLARITY : 0);
+ val |= bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW ?
+ REG_LVDS_FMT_DE_NEG_POLARITY : 0;
/* Set up bits-per-pixel, 18bpp or 24bpp. */
if (lvds_format_24bpp) {
@@ -535,7 +636,7 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
}
static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
unsigned int pval;
@@ -549,14 +650,32 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
if (pval)
dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval);
+
+ if (ctx->irq) {
+ /* Enable irq to detect errors */
+ regmap_write(ctx->regmap, REG_IRQ_GLOBAL, REG_IRQ_GLOBAL_IRQ_EN);
+ regmap_write(ctx->regmap, REG_IRQ_EN, 0xff);
+ } else {
+ /* Use the polling task */
+ sn65dsi83_monitor_start(ctx);
+ }
}
static void sn65dsi83_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
int ret;
+ if (ctx->irq) {
+ /* Disable irq */
+ regmap_write(ctx->regmap, REG_IRQ_EN, 0x0);
+ regmap_write(ctx->regmap, REG_IRQ_GLOBAL, 0x0);
+ } else {
+ /* Stop the polling task */
+ sn65dsi83_monitor_stop(ctx);
+ }
+
/* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
gpiod_set_value_cansleep(ctx->enable_gpio, 0);
usleep_range(10000, 11000);
@@ -806,6 +925,14 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
return 0;
}
+static irqreturn_t sn65dsi83_irq(int irq, void *data)
+{
+ struct sn65dsi83 *ctx = data;
+
+ sn65dsi83_handle_errors(ctx);
+ return IRQ_HANDLED;
+}
+
static int sn65dsi83_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -814,11 +941,13 @@ static int sn65dsi83_probe(struct i2c_client *client)
struct sn65dsi83 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sn65dsi83, bridge, &sn65dsi83_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
+ INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
+ INIT_DELAYED_WORK(&ctx->monitor_work, sn65dsi83_monitor_work);
if (dev->of_node) {
model = (enum sn65dsi83_model)(uintptr_t)
@@ -843,12 +972,20 @@ static int sn65dsi83_probe(struct i2c_client *client)
if (IS_ERR(ctx->regmap))
return dev_err_probe(dev, PTR_ERR(ctx->regmap), "failed to get regmap\n");
+ if (client->irq) {
+ ctx->irq = client->irq;
+ ret = devm_request_threaded_irq(ctx->dev, ctx->irq, NULL, sn65dsi83_irq,
+ IRQF_ONESHOT, dev_name(ctx->dev), ctx);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq\n");
+ }
+
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sn65dsi83_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
+ ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
drm_bridge_add(&ctx->bridge);
ret = sn65dsi83_host_attach(ctx);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index e4d9006b59f1..276d05d25ad8 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -32,10 +32,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */
#define SN_DEVICE_REV_REG 0x08
#define SN_DPPLL_SRC_REG 0x0A
#define DPPLL_CLK_SRC_DSICLK BIT(0)
@@ -106,10 +106,21 @@
#define SN_PWM_EN_INV_REG 0xA5
#define SN_PWM_INV_MASK BIT(0)
#define SN_PWM_EN_MASK BIT(1)
+
+#define SN_IRQ_EN_REG 0xE0
+#define IRQ_EN BIT(0)
+
+#define SN_IRQ_EVENTS_EN_REG 0xE6
+#define HPD_INSERTION_EN BIT(1)
+#define HPD_REMOVAL_EN BIT(2)
+
#define SN_AUX_CMD_STATUS_REG 0xF4
#define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3)
#define AUX_IRQ_STATUS_AUX_SHORT BIT(5)
#define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6)
+#define SN_IRQ_STATUS_REG 0xF5
+#define HPD_REMOVAL_STATUS BIT(2)
+#define HPD_INSERTION_STATUS BIT(1)
#define MIN_DSI_CLK_FREQ_MHZ 40
@@ -152,7 +163,9 @@
* @ln_assign: Value to program to the LN_ASSIGN register.
* @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG.
* @comms_enabled: If true then communication over the aux channel is enabled.
+ * @hpd_enabled: If true then HPD events are enabled.
* @comms_mutex: Protects modification of comms_enabled.
+ * @hpd_mutex: Protects modification of hpd_enabled.
*
* @gchip: If we expose our GPIOs, this is used.
* @gchip_output: A cache of whether we've set GPIOs to output. This
@@ -190,13 +203,15 @@ struct ti_sn65dsi86 {
u8 ln_assign;
u8 ln_polrs;
bool comms_enabled;
+ bool hpd_enabled;
struct mutex comms_mutex;
+ struct mutex hpd_mutex;
#if defined(CONFIG_OF_GPIO)
struct gpio_chip gchip;
DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS);
#endif
-#if defined(CONFIG_PWM)
+#if IS_REACHABLE(CONFIG_PWM)
struct pwm_chip *pchip;
bool pwm_enabled;
atomic_t pwm_pin_busy;
@@ -221,6 +236,23 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = {
.max_register = 0xFF,
};
+static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg,
+ u8 *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_read(pdata->regmap, reg, &reg_val);
+ if (ret) {
+ dev_err(pdata->dev, "fail to read raw reg %#x: %d\n",
+ reg, ret);
+ return ret;
+ }
+ *val = (u8)reg_val;
+
+ return 0;
+}
+
static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata,
unsigned int reg, u16 *val)
{
@@ -244,11 +276,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
}
-static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
+static struct drm_display_mode *
+get_new_adjusted_display_mode(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ return &crtc_state->adjusted_mode;
+}
+
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
u32 bit_rate_khz, clk_freq_khz;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
bit_rate_khz = mode->clock *
mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
@@ -275,7 +322,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = {
460800000,
};
-static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
int i;
u32 refclk_rate;
@@ -288,7 +336,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
clk_prepare_enable(pdata->refclk);
} else {
- refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000;
refclk_lut = ti_sn_bridge_dsiclk_lut;
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
}
@@ -312,12 +360,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i];
}
-static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
+static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
mutex_lock(&pdata->comms_mutex);
/* configure bridge ref_clk */
- ti_sn_bridge_set_refclk_freq(pdata);
+ ti_sn_bridge_set_refclk_freq(pdata, state);
/*
* HPD on this bridge chip is a bit useless. This is an eDP bridge
@@ -331,12 +380,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
* 200 ms. We'll assume that the panel driver will have the hardcoded
* delay in its prepare and always disable HPD.
*
- * If HPD somehow makes sense on some future panel we'll have to
- * change this to be conditional on someone specifying that HPD should
- * be used.
+ * For DisplayPort bridge type, we need HPD. So we use the bridge type
+ * to conditionally disable HPD.
+ * NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms()
+ * can be called before. So for DisplayPort, HPD will be enabled once
+ * bridge type is set. We are using bridge type instead of "no-hpd"
+ * property because it is not used properly in devicetree description
+ * and hence is unreliable.
*/
- regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
- HPD_DISABLE);
+
+ if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort)
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+ HPD_DISABLE);
pdata->comms_enabled = true;
@@ -356,6 +411,7 @@ static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata)
static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
int ret;
ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
@@ -370,6 +426,17 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
gpiod_set_value_cansleep(pdata->enable_gpio, 1);
/*
+ * After EN is deasserted and an external clock is detected, the bridge
+ * will sample GPIO3:1 to determine its frequency. The driver will
+ * overwrite this setting in ti_sn_bridge_set_refclk_freq(). But this is
+ * racy. Thus we have to wait a couple of us. According to the datasheet
+ * the GPIO lines has to be stable at least 5 us (td5) but it seems that
+ * is not enough and the refclk frequency value is still lost or
+ * overwritten by the bridge itself. Waiting for 20us seems to work.
+ */
+ usleep_range(20, 30);
+
+ /*
* If we have a reference clock we can enable communication w/ the
* panel (including the aux channel) w/out any need for an input clock
* so we can do it in resume which lets us read the EDID before
@@ -377,7 +444,14 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
* clock so reading early doesn't work.
*/
if (pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, NULL);
+
+ if (client->irq) {
+ ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN,
+ IRQ_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret);
+ }
return ret;
}
@@ -424,90 +498,29 @@ static int status_show(struct seq_file *s, void *data)
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(status);
-static void ti_sn65dsi86_debugfs_remove(void *data)
-{
- debugfs_remove_recursive(data);
-}
-
-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
-{
- struct device *dev = pdata->dev;
- struct dentry *debugfs;
- int ret;
-
- debugfs = debugfs_create_dir(dev_name(dev), NULL);
-
- /*
- * We might get an error back if debugfs wasn't enabled in the kernel
- * so let's just silently return upon failure.
- */
- if (IS_ERR_OR_NULL(debugfs))
- return;
-
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
- if (ret)
- return;
-
- debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
-}
-
/* -----------------------------------------------------------------------------
* Auxiliary Devices (*not* AUX)
*/
-static void ti_sn65dsi86_uninit_aux(void *data)
-{
- auxiliary_device_uninit(data);
-}
-
-static void ti_sn65dsi86_delete_aux(void *data)
-{
- auxiliary_device_delete(data);
-}
-
-static void ti_sn65dsi86_aux_device_release(struct device *dev)
-{
- struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
-
- kfree(aux);
-}
-
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct auxiliary_device **aux_out,
const char *name)
{
struct device *dev = pdata->dev;
+ const struct i2c_client *client = to_i2c_client(dev);
struct auxiliary_device *aux;
- int ret;
+ int id;
- aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ id = (client->adapter->nr << 10) | client->addr;
+ aux = __devm_auxiliary_device_create(dev, KBUILD_MODNAME, name,
+ NULL, id);
if (!aux)
- return -ENOMEM;
-
- aux->name = name;
- aux->dev.parent = dev;
- aux->dev.release = ti_sn65dsi86_aux_device_release;
- device_set_of_node_from_dev(&aux->dev, dev);
- ret = auxiliary_device_init(aux);
- if (ret) {
- kfree(aux);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
- if (ret)
- return ret;
-
- ret = auxiliary_device_add(aux);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
- if (!ret)
- *aux_out = aux;
+ return -ENODEV;
- return ret;
+ *aux_out = aux;
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -731,6 +744,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -747,7 +761,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
* Attach the next bridge.
* We never want the next bridge to *also* create a connector.
*/
- ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
+ ret = drm_bridge_attach(encoder, pdata->next_bridge,
&pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
goto err_initted_aux;
@@ -812,7 +826,7 @@ ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
}
static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -820,12 +834,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
}
-static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
unsigned int bit_rate_mhz, clk_freq_mhz;
unsigned int val;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* set DSIA clk frequency */
bit_rate_mhz = (mode->clock / 1000) *
@@ -855,12 +870,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state,
+ unsigned int bpp)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* Calculate minimum bit rate based on our pixel clock. */
bit_rate_khz = mode->clock * bpp;
@@ -959,10 +976,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata)
return valid_rates;
}
-static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
u8 hsync_polarity = 0, vsync_polarity = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1072,7 +1090,7 @@ exit:
}
static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
struct drm_connector *connector;
@@ -1084,7 +1102,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
int max_dp_lanes;
unsigned int bpp;
- connector = drm_atomic_get_new_connector_for_encoder(old_bridge_state->base.state,
+ connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (!connector) {
dev_err_ratelimited(pdata->dev, "Could not get the connector\n");
@@ -1104,7 +1122,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
pdata->ln_polrs << LN_POLRS_OFFSET);
/* set dsi clk frequency value */
- ti_sn_bridge_set_dsi_rate(pdata);
+ ti_sn_bridge_set_dsi_rate(pdata, state);
/*
* The SN65DSI86 only supports ASSR Display Authentication method and
@@ -1139,7 +1157,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
/* Train until we run out of rates */
- for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp);
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
dp_rate_idx++) {
if (!(valid_rates & BIT(dp_rate_idx)))
@@ -1155,7 +1173,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
/* config video parameters */
- ti_sn_bridge_set_video_timings(pdata);
+ ti_sn_bridge_set_video_timings(pdata, state);
/* enable video stream */
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
@@ -1163,21 +1181,21 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
pm_runtime_get_sync(pdata->dev);
if (!pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, state);
/* td7: min 100 us after enable before DSI data */
usleep_range(100, 110);
}
static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1194,14 +1212,20 @@ static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
pm_runtime_put_sync(pdata->dev);
}
-static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+ti_sn_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int val = 0;
- pm_runtime_get_sync(pdata->dev);
+ /*
+ * Runtime reference is grabbed in ti_sn_bridge_hpd_enable()
+ * as the chip won't report HPD just after being powered on.
+ * HPD_DEBOUNCED_STATE reflects correct state only after the
+ * debounce time (~100-400 ms).
+ */
+
regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
- pm_runtime_put_autosuspend(pdata->dev);
return val & HPD_DEBOUNCED_STATE ? connector_status_connected
: connector_status_disconnected;
@@ -1215,6 +1239,61 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
+static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ struct dentry *debugfs;
+
+ debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
+ debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+}
+
+static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
+ int ret;
+
+ /*
+ * Device needs to be powered on before reading the HPD state
+ * for reliable hpd detection in ti_sn_bridge_detect() due to
+ * the high debounce time.
+ */
+
+ pm_runtime_get_sync(pdata->dev);
+
+ mutex_lock(&pdata->hpd_mutex);
+ pdata->hpd_enabled = true;
+ mutex_unlock(&pdata->hpd_mutex);
+
+ if (client->irq) {
+ ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
+ HPD_REMOVAL_EN | HPD_INSERTION_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret);
+ }
+}
+
+static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
+ int ret;
+
+ if (client->irq) {
+ ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
+ HPD_REMOVAL_EN | HPD_INSERTION_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret);
+ }
+
+ mutex_lock(&pdata->hpd_mutex);
+ pdata->hpd_enabled = false;
+ mutex_unlock(&pdata->hpd_mutex);
+
+ pm_runtime_put_autosuspend(pdata->dev);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@@ -1228,6 +1307,9 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .debugfs_init = ti_sn65dsi86_debugfs_init,
+ .hpd_enable = ti_sn_bridge_hpd_enable,
+ .hpd_disable = ti_sn_bridge_hpd_disable,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@@ -1293,6 +1375,41 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata)
return 0;
}
+static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private)
+{
+ struct ti_sn65dsi86 *pdata = private;
+ struct drm_device *dev = pdata->bridge.dev;
+ u8 status;
+ int ret;
+ bool hpd_event;
+
+ ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS);
+
+ dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status);
+ if (!status)
+ return IRQ_NONE;
+
+ ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Only send the HPD event if we are bound with a device. */
+ mutex_lock(&pdata->hpd_mutex);
+ if (pdata->hpd_enabled && hpd_event)
+ drm_kms_helper_hotplug_event(dev);
+ mutex_unlock(&pdata->hpd_mutex);
+
+ return IRQ_HANDLED;
+}
+
static int ti_sn_bridge_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -1311,13 +1428,30 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
if (ret)
return ret;
- pdata->bridge.funcs = &ti_sn_bridge_funcs;
pdata->bridge.of_node = np;
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
- if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
- pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
+ if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
+ pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HPD;
+ /*
+ * If comms were already enabled they would have been enabled
+ * with the wrong value of HPD_DISABLE. Update it now. Comms
+ * could be enabled if anyone is holding a pm_runtime reference
+ * (like if a GPIO is in use). Note that in most cases nobody
+ * is doing AUX channel xfers before the bridge is added so
+ * HPD doesn't _really_ matter then. The only exception is in
+ * the eDP case where the panel wants to read the EDID before
+ * the bridge is added. We always consistently have HPD disabled
+ * for eDP.
+ */
+ mutex_lock(&pdata->comms_mutex);
+ if (pdata->comms_enabled)
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
+ HPD_DISABLE, 0);
+ mutex_unlock(&pdata->comms_mutex);
+ }
drm_bridge_add(&pdata->bridge);
@@ -1361,7 +1495,7 @@ static struct auxiliary_driver ti_sn_bridge_driver = {
/* -----------------------------------------------------------------------------
* PWM Controller
*/
-#if defined(CONFIG_PWM)
+#if IS_REACHABLE(CONFIG_PWM)
static int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata)
{
return atomic_xchg(&pdata->pwm_pin_busy, 1) ? -EBUSY : 0;
@@ -1702,24 +1836,15 @@ static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(SN_GPIO_INPUT_SHIFT + offset));
}
-static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
- int ret;
-
- if (!test_bit(offset, pdata->gchip_output)) {
- dev_err(pdata->dev, "Ignoring GPIO set while input\n");
- return;
- }
val &= 1;
- ret = regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
- BIT(SN_GPIO_OUTPUT_SHIFT + offset),
- val << (SN_GPIO_OUTPUT_SHIFT + offset));
- if (ret)
- dev_warn(pdata->dev,
- "Failed to set bridge GPIO %u: %d\n", offset, ret);
+ return regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
+ BIT(SN_GPIO_OUTPUT_SHIFT + offset),
+ val << (SN_GPIO_OUTPUT_SHIFT + offset));
}
static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
@@ -1893,6 +2018,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
+ u8 id_buf[8];
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -1900,12 +2026,13 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return -ENODEV;
}
- pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ pdata = devm_drm_bridge_alloc(dev, struct ti_sn65dsi86, bridge, &ti_sn_bridge_funcs);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
dev_set_drvdata(dev, pdata);
pdata->dev = dev;
+ mutex_init(&pdata->hpd_mutex);
mutex_init(&pdata->comms_mutex);
pdata->regmap = devm_regmap_init_i2c(client,
@@ -1936,7 +2063,25 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (ret)
return ret;
- ti_sn65dsi86_debugfs_init(pdata);
+ pm_runtime_get_sync(dev);
+ ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf));
+ pm_runtime_put_autosuspend(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read device id\n");
+
+ /* The ID string is stored backwards */
+ if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
+ return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL,
+ ti_sn_bridge_interrupt,
+ IRQF_ONESHOT,
+ dev_name(pdata->dev), pdata);
+
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request interrupt\n");
+ }
/*
* Break ourselves up into a collection of aux devices. The only real
@@ -1955,7 +2100,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return ret;
}
- if (IS_ENABLED(CONFIG_PWM)) {
+ if (IS_REACHABLE(CONFIG_PWM)) {
ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->pwm_aux, "pwm");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index 3472ed5924e8..27053d020df7 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -18,7 +18,8 @@ struct tdp158 {
struct device *dev;
};
-static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *prev)
+static void tdp158_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
int err;
struct tdp158 *tdp158 = bridge->driver_private;
@@ -34,7 +35,8 @@ static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *pr
gpiod_set_value_cansleep(tdp158->enable, 1);
}
-static void tdp158_disable(struct drm_bridge *bridge, struct drm_bridge_state *prev)
+static void tdp158_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tdp158 *tdp158 = bridge->driver_private;
@@ -43,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge, struct drm_bridge_state *p
regulator_disable(tdp158->vcc);
}
-static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int tdp158_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct tdp158 *tdp158 = bridge->driver_private;
- return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags);
+ return drm_bridge_attach(encoder, tdp158->next, bridge, flags);
}
static const struct drm_bridge_funcs tdp158_bridge_funcs = {
@@ -64,9 +68,10 @@ static int tdp158_probe(struct i2c_client *client)
struct tdp158 *tdp158;
struct device *dev = &client->dev;
- tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL);
- if (!tdp158)
- return -ENOMEM;
+ tdp158 = devm_drm_bridge_alloc(dev, struct tdp158, bridge,
+ &tdp158_bridge_funcs);
+ if (IS_ERR(tdp158))
+ return PTR_ERR(tdp158);
tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (IS_ERR(tdp158->next))
@@ -85,7 +90,6 @@ static int tdp158_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable");
tdp158->bridge.of_node = dev->of_node;
- tdp158->bridge.funcs = &tdp158_bridge_funcs;
tdp158->bridge.driver_private = tdp158;
tdp158->dev = dev;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 79ab5da827e1..b80ee089f880 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -89,7 +89,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- return drm_bridge_detect(dvi->next_bridge);
+ return drm_bridge_detect(dvi->next_bridge, connector);
}
static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
}
static int tfp410_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge,
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, encoder);
return 0;
}
@@ -340,14 +341,14 @@ static int tfp410_init(struct device *dev, bool i2c)
return -ENXIO;
}
- dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
- if (!dvi)
- return -ENOMEM;
+ dvi = devm_drm_bridge_alloc(dev, struct tfp410, bridge,
+ &tfp410_bridge_funcs);
+ if (IS_ERR(dvi))
+ return PTR_ERR(dvi);
dvi->dev = dev;
dev_set_drvdata(dev, dvi);
- dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 47b74cb25b14..dcf686c4e73d 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
}
static int tpd12s015_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ ret = drm_bridge_attach(encoder, tpd->next_bridge,
bridge, flags);
if (ret < 0)
return ret;
@@ -76,6 +77,12 @@ static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
return connector_status_disconnected;
}
+static enum drm_connector_status
+tpd12s015_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ return tpd12s015_detect(bridge);
+}
+
static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -93,7 +100,7 @@ static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
.attach = tpd12s015_attach,
.detach = tpd12s015_detach,
- .detect = tpd12s015_detect,
+ .detect = tpd12s015_bridge_detect,
.hpd_enable = tpd12s015_hpd_enable,
.hpd_disable = tpd12s015_hpd_disable,
};
@@ -115,13 +122,13 @@ static int tpd12s015_probe(struct platform_device *pdev)
struct gpio_desc *gpio;
int ret;
- tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
- if (!tpd)
- return -ENOMEM;
+ tpd = devm_drm_bridge_alloc(&pdev->dev, struct tpd12s015_device,
+ bridge, &tpd12s015_bridge_funcs);
+ if (IS_ERR(tpd))
+ return PTR_ERR(tpd);
platform_set_drvdata(pdev, tpd);
- tpd->bridge.funcs = &tpd12s015_bridge_funcs;
tpd->bridge.of_node = pdev->dev.of_node;
tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
diff --git a/drivers/gpu/drm/bridge/waveshare-dsi.c b/drivers/gpu/drm/bridge/waveshare-dsi.c
new file mode 100644
index 000000000000..43f4e7412d72
--- /dev/null
+++ b/drivers/gpu/drm/bridge/waveshare-dsi.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ * Based on panel-raspberrypi-touchscreen by Broadcom
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+struct ws_bridge {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct backlight_device *backlight;
+ struct device *dev;
+ struct regmap *reg_map;
+};
+
+static const struct regmap_config ws_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+};
+
+static struct ws_bridge *bridge_to_ws_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ws_bridge, bridge);
+}
+
+static int ws_bridge_attach_dsi(struct ws_bridge *ws)
+{
+ const struct mipi_dsi_device_info info = {
+ .type = "ws-bridge",
+ .channel = 0,
+ .node = NULL,
+ };
+ struct device_node *dsi_host_node;
+ struct device *dev = ws->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+
+ dsi_host_node = of_graph_get_remote_node(dev->of_node, 0, 0);
+ if (!dsi_host_node) {
+ dev_err(dev, "Failed to get remote port\n");
+ return -ENODEV;
+ }
+ host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Failed to find dsi_host\n");
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi))
+ return dev_err_probe(dev, PTR_ERR(dsi), "Failed to create dsi device\n");
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 2;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to attach dsi to host\n");
+
+ return 0;
+}
+
+static int ws_bridge_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+ int ret;
+
+ ret = ws_bridge_attach_dsi(ws);
+ if (ret)
+ return ret;
+
+ return drm_bridge_attach(encoder, ws->next_bridge,
+ &ws->bridge, flags);
+}
+
+static void ws_bridge_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+
+ regmap_write(ws->reg_map, 0xad, 0x01);
+ backlight_enable(ws->backlight);
+}
+
+static void ws_bridge_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+
+ backlight_disable(ws->backlight);
+ regmap_write(ws->reg_map, 0xad, 0x00);
+}
+
+static const struct drm_bridge_funcs ws_bridge_bridge_funcs = {
+ .enable = ws_bridge_bridge_enable,
+ .disable = ws_bridge_bridge_disable,
+ .attach = ws_bridge_bridge_attach,
+};
+
+static int ws_bridge_bl_update_status(struct backlight_device *bl)
+{
+ struct ws_bridge *ws = bl_get_data(bl);
+
+ regmap_write(ws->reg_map, 0xab, 0xff - backlight_get_brightness(bl));
+ regmap_write(ws->reg_map, 0xaa, 0x01);
+
+ return 0;
+}
+
+static const struct backlight_ops ws_bridge_bl_ops = {
+ .update_status = ws_bridge_bl_update_status,
+};
+
+static struct backlight_device *ws_bridge_create_backlight(struct ws_bridge *ws)
+{
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+ struct device *dev = ws->dev;
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, ws,
+ &ws_bridge_bl_ops, &props);
+}
+
+static int ws_bridge_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct drm_panel *panel;
+ struct ws_bridge *ws;
+ int ret;
+
+ ws = devm_drm_bridge_alloc(dev, struct ws_bridge, bridge, &ws_bridge_bridge_funcs);
+ if (IS_ERR(ws))
+ return PTR_ERR(ws);
+
+ ws->dev = dev;
+
+ ws->reg_map = devm_regmap_init_i2c(i2c, &ws_regmap_config);
+ if (IS_ERR(ws->reg_map))
+ return dev_err_probe(dev, PTR_ERR(ws->reg_map), "Failed to allocate regmap\n");
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, &panel, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find remote panel\n");
+
+ ws->next_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ws->next_bridge))
+ return PTR_ERR(ws->next_bridge);
+
+ ws->backlight = ws_bridge_create_backlight(ws);
+ if (IS_ERR(ws->backlight)) {
+ ret = PTR_ERR(ws->backlight);
+ dev_err(dev, "Failed to create backlight: %d\n", ret);
+ return ret;
+ }
+
+ regmap_write(ws->reg_map, 0xc0, 0x01);
+ regmap_write(ws->reg_map, 0xc2, 0x01);
+ regmap_write(ws->reg_map, 0xac, 0x01);
+
+ ws->bridge.type = DRM_MODE_CONNECTOR_DPI;
+ ws->bridge.of_node = dev->of_node;
+ devm_drm_bridge_add(dev, &ws->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ws_bridge_of_ids[] = {
+ {.compatible = "waveshare,dsi2dpi",},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ws_bridge_of_ids);
+
+static struct i2c_driver ws_bridge_driver = {
+ .driver = {
+ .name = "ws_dsi2dpi",
+ .of_match_table = ws_bridge_of_ids,
+ },
+ .probe = ws_bridge_probe,
+};
+module_i2c_driver(ws_bridge_driver);
+
+MODULE_AUTHOR("Joseph Guo <qijian.guo@nxp.com>");
+MODULE_DESCRIPTION("Waveshare DSI2DPI bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index a8fca079921b..fddfbd4d2493 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -193,6 +193,8 @@ CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
CONFIG_DRM_ANALOGIX_ANX7625=y
+CONFIG_PHY_MTK_HDMI=y
+CONFIG_PHY_MTK_MIPI_DSI=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 139b81db6312..ac5e7ed195cf 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -98,14 +98,14 @@ done
make ${KERNEL_IMAGE_NAME}
-mkdir -p /lava-files/
+mkdir -p /kernel/
for image in ${KERNEL_IMAGE_NAME}; do
- cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
+ cp arch/${KERNEL_ARCH}/boot/${image} /kernel/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
- cp ${DEVICE_TREES} /lava-files/.
+ cp ${DEVICE_TREES} /kernel/.
fi
make modules
@@ -113,33 +113,22 @@ mkdir -p install/modules/
INSTALL_MOD_PATH=install/modules/ make modules_install
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
- make Image.lzma
- mkimage \
- -f auto \
- -A arm \
- -O linux \
- -d arch/arm64/boot/Image.lzma \
- -C lzma\
- -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
- /lava-files/cheza-kernel
- KERNEL_IMAGE_NAME+=" cheza-kernel"
-
# Make a gzipped copy of the Image for db410c.
- gzip -k /lava-files/Image
+ gzip -k /kernel/Image
KERNEL_IMAGE_NAME+=" Image.gz"
fi
# Pass needed files to the test stage
mkdir -p install
cp -rfv .gitlab-ci/* install/.
-cp -rfv ci/* install/.
+cp -rfv bin/ci/* install/.
cp -rfv install/common install/ci-common
cp -rfv drivers/gpu/drm/ci/* install/.
. .gitlab-ci/container/container_post_build.sh
if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
- xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
+ xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /kernel/vmlinux.xz
FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
if [[ -n $DEVICE_TREES ]]; then
@@ -148,7 +137,7 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
ls -l "${S3_JWT_FILE}"
for f in $FILES_TO_UPLOAD; do
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/$f \
+ ci-fairy s3cp --token-file "${S3_JWT_FILE}" /kernel/$f \
https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
done
@@ -165,7 +154,7 @@ ln -s common artifacts/install/ci-common
cp .config artifacts/${CI_JOB_NAME}_config
for image in ${KERNEL_IMAGE_NAME}; do
- cp /lava-files/$image artifacts/install/.
+ cp /kernel/$image artifacts/install/.
done
tar -C artifacts -cf artifacts/install.tar install
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 9c198239033d..af27ff5de369 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -1,8 +1,7 @@
.build:
extends:
- - .build-rules
- .container+build-rules
- stage: build
+ stage: build-only
artifacts:
paths:
- artifacts
@@ -14,7 +13,7 @@
- .build
- .use-debian/arm64_build
tags:
- - aarch64
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
variables:
DEFCONFIG: "arch/arm/configs/multi_v7_defconfig"
KERNEL_IMAGE_NAME: "zImage"
@@ -25,7 +24,7 @@
- .build
- .use-debian/arm64_build
tags:
- - aarch64
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
variables:
DEFCONFIG: "arch/arm64/configs/defconfig"
KERNEL_IMAGE_NAME: "Image"
@@ -45,16 +44,22 @@
igt:arm32:
extends: .build:arm32
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
igt:arm64:
extends: .build:arm64
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
igt:x86_64:
extends: .build:x86_64
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
@@ -68,7 +73,7 @@ testing:arm32:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm.config
@@ -80,7 +85,7 @@ testing:arm64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm64.config
@@ -92,7 +97,7 @@ testing:x86_64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: x86_64.config
@@ -110,3 +115,112 @@ build-nodebugfs:arm64:
build:x86_64:
extends: .build:x86_64
+
+# Disable build jobs that we won't use
+alpine-build-testing:
+ rules:
+ - when: never
+
+debian-android:
+ rules:
+ - when: never
+
+debian-arm32:
+ rules:
+ - when: never
+
+debian-arm32-asan:
+ rules:
+ - when: never
+
+debian-arm64:
+ rules:
+ - when: never
+
+debian-arm64-asan:
+ rules:
+ - when: never
+
+debian-arm64-build-test:
+ rules:
+ - when: never
+
+debian-arm64-release:
+ rules:
+ - when: never
+
+debian-arm64-ubsan:
+ rules:
+ - when: never
+
+debian-build-testing:
+ rules:
+ - when: never
+
+debian-clang:
+ rules:
+ - when: never
+
+debian-clang-release:
+ rules:
+ - when: never
+
+debian-no-libdrm:
+ rules:
+ - when: never
+
+debian-ppc64el:
+ rules:
+ - when: never
+
+debian-release:
+ rules:
+ - when: never
+
+debian-s390x:
+ rules:
+ - when: never
+
+debian-testing:
+ rules:
+ - when: never
+
+debian-testing-asan:
+ rules:
+ - when: never
+
+debian-testing-msan:
+ rules:
+ - when: never
+
+debian-testing-ubsan:
+ rules:
+ - when: never
+
+debian-vulkan:
+ rules:
+ - when: never
+
+debian-x86_32:
+ rules:
+ - when: never
+
+fedora-release:
+ rules:
+ - when: never
+
+rustfmt:
+ rules:
+ - when: never
+
+shader-db:
+ rules:
+ - when: never
+
+windows-msvc:
+ rules:
+ - when: never
+
+yaml-toml-shell-py-test:
+ rules:
+ - when: never
diff --git a/drivers/gpu/drm/ci/check-devicetrees.yml b/drivers/gpu/drm/ci/check-devicetrees.yml
new file mode 100644
index 000000000000..727bd56018b8
--- /dev/null
+++ b/drivers/gpu/drm/ci/check-devicetrees.yml
@@ -0,0 +1,50 @@
+.dt-check-base:
+ stage: static-checks
+ timeout: "30m"
+ variables:
+ GIT_DEPTH: 1
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ SCHEMA: "display:gpu"
+ VENV_PATH: "/tmp/dtcheck-venv"
+ before_script:
+ - apt-get update -qq
+ # Minimum supported version of LLVM for building x86 kernels is 15.0.0.
+ # In mesa-ci containers, LLVM_VERSION is defined as a container-level property and is currently set to 19.
+ - apt-get install -y --no-install-recommends clang-${LLVM_VERSION} lld-${LLVM_VERSION} llvm-${LLVM_VERSION} python3-dev python3-venv python3-pip yamllint
+ - python3 -m venv "${VENV_PATH}"
+ - source "${VENV_PATH}/bin/activate"
+ - pip3 install dtschema
+ script:
+ - drivers/gpu/drm/ci/${SCRIPT_NAME}
+ artifacts:
+ when: on_failure
+ paths:
+ - ${ARTIFACT_FILE}
+ allow_failure:
+ exit_codes:
+ - 102
+
+dtbs-check:arm32:
+ extends:
+ - .build:arm32
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dtbs-check.sh"
+ ARTIFACT_FILE: "dtbs-check.log"
+
+dtbs-check:arm64:
+ extends:
+ - .build:arm64
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dtbs-check.sh"
+ ARTIFACT_FILE: "dtbs-check.log"
+
+dt-binding-check:
+ extends:
+ - .build
+ - .use-debian/x86_64_build
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dt-binding-check.sh"
+ ARTIFACT_FILE: "dt-binding-check.log"
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 2a94f54ce4cf..5f90508578a3 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -20,50 +20,58 @@ debian/arm64_build:
EXTRA_LOCAL_PACKAGES: "jq libasound2 libcairo2 libdw1 libglib2.0-0 libjson-c5"
# Disable container jobs that we won't use
-alpine/x86_64_build:
+debian/arm64_test-vk:
rules:
- when: never
-debian/x86_64_test-vk:
+debian/baremetal_arm32_test-gl:
rules:
- when: never
-debian/arm64_test-vk:
+debian/baremetal_arm64_test-vk:
rules:
- when: never
-debian/arm64_test-gl:
+debian/ppc64el_build:
rules:
- when: never
-fedora/x86_64_build:
+debian/s390x_build:
rules:
- when: never
-debian/android_build:
+debian/x86_32_build:
rules:
- when: never
-.debian/x86_64_test-android:
+debian/x86_64_test-android:
rules:
- when: never
-windows_build_msvc:
+debian/x86_64_test-video:
rules:
- when: never
-windows_test_msvc:
+debian/x86_64_test-vk:
rules:
- when: never
-.debian/x86_64_build-mingw:
- rules:
+fedora/x86_64_build:
+ rules:
- when: never
-rustfmt:
- rules:
+debian/android_build:
+ rules:
+ - when: never
+
+windows_build_msvc:
+ rules:
+ - when: never
+
+windows_test_msvc:
+ rules:
- when: never
windows_msvc:
rules:
- - when: never \ No newline at end of file
+ - when: never
diff --git a/drivers/gpu/drm/ci/dt-binding-check.sh b/drivers/gpu/drm/ci/dt-binding-check.sh
new file mode 100755
index 000000000000..99e1c0df84b7
--- /dev/null
+++ b/drivers/gpu/drm/ci/dt-binding-check.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+VENV_PATH="${VENV_PATH:-/tmp/dtschema-venv}"
+source "${VENV_PATH}/bin/activate"
+
+if ! make -j"${FDO_CI_CONCURRENT:-4}" dt_binding_check \
+ DT_SCHEMA_FILES="${SCHEMA:-}" 2>dt-binding-check.log; then
+ echo "ERROR: 'make dt_binding_check' failed. Please check dt-binding-check.log for details."
+ exit 1
+fi
+
+if [[ -s dt-binding-check.log ]]; then
+ echo "WARNING: dt_binding_check reported warnings. Please check dt-binding-check.log" \
+ "for details."
+ exit 102
+fi
diff --git a/drivers/gpu/drm/ci/dtbs-check.sh b/drivers/gpu/drm/ci/dtbs-check.sh
new file mode 100755
index 000000000000..57842c452439
--- /dev/null
+++ b/drivers/gpu/drm/ci/dtbs-check.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+: "${KERNEL_ARCH:?ERROR: KERNEL_ARCH must be set}"
+: "${LLVM_VERSION:?ERROR: LLVM_VERSION must be set}"
+
+./drivers/gpu/drm/ci/setup-llvm-links.sh
+
+make LLVM=1 ARCH="${KERNEL_ARCH}" defconfig
+
+if ! make -j"${FDO_CI_CONCURRENT:-4}" ARCH="${KERNEL_ARCH}" LLVM=1 dtbs_check \
+ DT_SCHEMA_FILES="${SCHEMA:-}" 2>dtbs-check.log; then
+ echo "ERROR: 'make dtbs_check' failed. Please check dtbs-check.log for details."
+ exit 1
+fi
+
+if [[ -s dtbs-check.log ]]; then
+ echo "WARNING: dtbs_check reported warnings. Please check dtbs-check.log for details."
+ exit 102
+fi
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index 90bde9f00cc3..56638814bb28 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,17 +1,17 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha c6a9a9c3bce90923f7700219354e0b6e5a3c9ba6
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 02337aec715c25dae7ff2479d986f831c77fe536
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: a73311079a5d8ac99eb25336a8369a2c3c6b519b
+ IGT_VERSION: 129d5b10baaadea1d6cd6377341c4cb42e7ee6fd
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
- MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
+ MESA_TEMPLATES_COMMIT: &ci-templates-commit c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb
DRM_CI_PROJECT_URL: https://gitlab.freedesktop.org/${DRM_CI_PROJECT_PATH}
CI_PRE_CLONE_SCRIPT: |-
set -o xtrace
@@ -20,6 +20,9 @@ variables:
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
+ S3_JWT_FILE_SCRIPT: |-
+ echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
+ unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
S3_KERNEL_BUCKET: mesa-rootfs
@@ -31,17 +34,18 @@ variables:
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
- # default kernel for rootfs before injecting the current kernel tree
- KERNEL_REPO: "gfx-ci/linux"
- KERNEL_TAG: "v6.6.21-mesa-f8ea"
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
- PKG_REPO_REV: "3cc12a2a"
LAVA_TAGS: subset-1-gfx
- LAVA_JOB_PRIORITY: 30
+ # Default priority for non-merge pipelines
+ FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: "" # Empty tags are ignored by gitlab
+ FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: kvm
+ FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: aarch64
+ JOB_PRIORITY: 30
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
# Python scripts for structured logger
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
+
default:
id_tokens:
S3_JWT:
@@ -50,16 +54,13 @@ default:
- export SCRIPTS_DIR=$(mktemp -d)
- curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${DRM_CI_PROJECT_URL}/-/raw/${DRM_CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh"
- source ${SCRIPTS_DIR}/setup-test-env.sh
- - echo -e "\e[0Ksection_start:$(date +%s):unset_env_vars_section[collapsed=true]\r\e[0KUnsetting vulnerable environment variables"
- - echo -n "${S3_JWT}" > "${S3_JWT_FILE}"
- - unset CI_JOB_JWT S3_JWT
- - echo -e "\e[0Ksection_end:$(date +%s):unset_env_vars_section\r\e[0K"
+ - eval "$S3_JWT_FILE_SCRIPT"
- echo -e "\e[0Ksection_start:$(date +%s):drm_ci_download_section[collapsed=true]\r\e[0KDownloading mesa from $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz"
- cd $CI_PROJECT_DIR
- curl --output - $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz | tar -xz
- mv mesa-$DRM_CI_COMMIT_SHA/.gitlab-ci* .
- - mv mesa-$DRM_CI_COMMIT_SHA/bin/ci .
+ - mv mesa-$DRM_CI_COMMIT_SHA/bin .
- rm -rf mesa-$DRM_CI_COMMIT_SHA/
- echo -e "\e[0Ksection_end:$(date +%s):drm_ci_download_section\r\e[0K"
@@ -71,20 +72,19 @@ default:
export S3_JWT="$(<${S3_JWT_FILE})" &&
rm "${S3_JWT_FILE}"
+
include:
- project: 'freedesktop/ci-templates'
- ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
- file:
- - '/templates/ci-fairy.yml'
- - project: 'freedesktop/ci-templates'
ref: *ci-templates-commit
file:
- '/templates/alpine.yml'
- '/templates/debian.yml'
- '/templates/fedora.yml'
+ - '/templates/ci-fairy.yml'
- project: *drm-ci-project-path
ref: *drm-ci-commit-sha
file:
+ - '/.gitlab-ci/build/gitlab-ci.yml'
- '/.gitlab-ci/container/gitlab-ci.yml'
- '/.gitlab-ci/farm-rules.yml'
- '/.gitlab-ci/lava/lava-gitlab-ci.yml'
@@ -104,20 +104,27 @@ include:
- '/src/microsoft/ci/gitlab-ci-inc.yml'
- '/src/nouveau/ci/gitlab-ci-inc.yml'
- '/src/virtio/ci/gitlab-ci-inc.yml'
+ - 'docs/gitlab-ci.yml'
- drivers/gpu/drm/ci/image-tags.yml
- drivers/gpu/drm/ci/container.yml
- drivers/gpu/drm/ci/static-checks.yml
- drivers/gpu/drm/ci/build.yml
- drivers/gpu/drm/ci/test.yml
+ - drivers/gpu/drm/ci/check-devicetrees.yml
+ - drivers/gpu/drm/ci/kunit.yml
- 'https://gitlab.freedesktop.org/gfx-ci/lab-status/-/raw/main/lab-status.yml'
stages:
- sanity
- container
- - code-validation
+ - deploy
- git-archive
- - build
+ - build-for-tests
+ - build-only
+ - static-checks
+ - kunit
+ - code-validation
- amdgpu
- i915
- mediatek
@@ -128,33 +135,27 @@ stages:
- rockchip
- software-driver
+
# YAML anchors for rule conditions
# --------------------------------
.rules-anchors:
rules:
- # Pipeline for forked project branch
- - if: &is-forked-branch '$CI_COMMIT_BRANCH && $CI_PROJECT_NAMESPACE != "mesa"'
- when: manual
- # Forked project branch / pre-merge pipeline not for Marge bot
- - if: &is-forked-branch-or-pre-merge-not-for-marge '$CI_PROJECT_NAMESPACE != "mesa" || ($GITLAB_USER_LOGIN != "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event")'
- when: manual
- # Pipeline runs for the main branch of the upstream Mesa project
- - if: &is-mesa-main '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_COMMIT_BRANCH'
- when: always
- # Post-merge pipeline
- - if: &is-post-merge '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
- when: on_success
- # Post-merge pipeline, not for Marge Bot
- - if: &is-post-merge-not-for-marge '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
- when: on_success
+ # do not duplicate pipelines on merge pipelines
+ - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
+ when: never
+ # merge pipeline
+ - if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
+ # post-merge pipeline
+ - if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
# Pre-merge pipeline
- - if: &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"'
- when: on_success
- # Pre-merge pipeline for Marge Bot
- - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
- when: on_success
+ - if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- - &is-fork-push '$CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"'
+ - if: &is-fork-push $CI_PIPELINE_SOURCE == "push"
+ # nightly pipeline
+ - if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
+ # pipeline for direct pushes that bypassed the CI
+ - if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
+
# Rules applied to every job in the pipeline
.common-rules:
@@ -162,42 +163,70 @@ stages:
- if: *is-fork-push
when: manual
+
.never-post-merge-rules:
rules:
- if: *is-post-merge
when: never
-# Rule to filter for only scheduled pipelines.
-.scheduled_pipeline-rules:
- rules:
- - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
- when: on_success
-# Generic rule to not run the job during scheduled pipelines. Jobs that aren't
-# something like a nightly run should include this rule.
-.no_scheduled_pipelines-rules:
+.container+build-rules:
rules:
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
+ - !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines
+ - if: *is-merge-attempt
+ when: on_success
+ # Same as above, but for pre-merge pipelines
+ - if: *is-pre-merge
+ when: manual
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: manual
+ # Build everything in scheduled pipelines
- if: *is-scheduled-pipeline
- when: never
+ when: on_success
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
+ - when: manual
-# When to automatically run the CI for build jobs
-.build-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - !reference [.never-post-merge-rules, rules]
- # Run automatically once all dependency jobs have passed
- - when: on_success
-# When to automatically run the CI for container jobs
-.container+build-rules:
+# Repeat of the above but with `when: on_success` replaced with
+# `when: delayed` + `start_in:`, for build-only jobs.
+# Note: make sure the branches in this list are the same as in
+# `.container+build-rules` above.
+.build-only-delayed-rules:
rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
- !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines
+ - if: *is-merge-attempt
+ when: delayed
+ start_in: &build-delay 5 minutes
+ # Same as above, but for pre-merge pipelines
+ - if: *is-pre-merge
+ when: manual
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: manual
+ # Build everything in scheduled pipelines
+ - if: *is-scheduled-pipeline
+ when: delayed
+ start_in: *build-delay
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
- when: manual
+
.ci-deqp-artifacts:
artifacts:
- name: "mesa_${CI_JOB_NAME}"
+ name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
when: always
untracked: false
paths:
@@ -208,40 +237,20 @@ stages:
- _build/meson-logs/strace
-.container-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - !reference [.never-post-merge-rules, rules]
- # Run pipeline by default in the main project if any CI pipeline
- # configuration files were changed, to ensure docker images are up to date
- - if: *is-post-merge
- changes:
- - drivers/gpu/drm/ci/**/*
- when: on_success
- # Run pipeline by default if it was triggered by Marge Bot, is for a
- # merge request, and any files affecting the pipeline were changed
- - if: *is-pre-merge-for-marge
- when: on_success
- # Run pipeline by default in the main project if it was not triggered by
- # Marge Bot, and any files affecting the pipeline were changed
- - if: *is-post-merge-not-for-marge
- when: on_success
- # Allow triggering jobs manually in other cases
- - when: manual
-
+python-artifacts:
+ variables:
+ GIT_DEPTH: 10
# Git archive
-
-make git archive:
+make-git-archive:
extends:
- .fdo.ci-fairy
stage: git-archive
rules:
- !reference [.scheduled_pipeline-rules, rules]
- # ensure we are running on packet
tags:
- - packet.net
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
script:
# Remove drm-ci files we just added
- rm -rf .gitlab-ci.*
@@ -261,33 +270,80 @@ sanity:
extends:
- .fdo.ci-fairy
stage: sanity
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
rules:
- if: *is-pre-merge
when: on_success
- # Other cases default to never
+ - when: never
variables:
GIT_STRATEGY: none
script:
# ci-fairy check-commits --junit-xml=check-commits.xml
- - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ - |
+ set -eu
+ image_tags=(
+ ALPINE_X86_64_LAVA_SSH_TAG
+ CONTAINER_TAG
+ DEBIAN_BASE_TAG
+ DEBIAN_BUILD_TAG
+ DEBIAN_PYUTILS_TAG
+ DEBIAN_TEST_GL_TAG
+ KERNEL_TAG
+ PKG_REPO_REV
+ )
+ for var in "${image_tags[@]}"
+ do
+ if [ "$(echo -n "${!var}" | wc -c)" -gt 20 ]
+ then
+ echo "$var is too long; please make sure it is at most 20 chars."
+ exit 1
+ fi
+ done
artifacts:
when: on_failure
reports:
junit: check-*.xml
-# Rules for tests that should not block merging, but should be available to
-# optionally run with the "play" button in the UI in pre-merge non-marge
-# pipelines. This should appear in "extends:" after any includes of
-# test-source-dep.yml rules, so that these rules replace those.
-.test-manual-mr:
+
+mr-label-maker-test:
+ extends:
+ - .fdo.ci-fairy
+ stage: sanity
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - if: *is-forked-branch-or-pre-merge-not-for-marge
- when: manual
+ - !reference [.mr-label-maker-rules, rules]
variables:
- JOB_TIMEOUT: 80
+ GIT_STRATEGY: fetch
+ timeout: 10m
+ script:
+ - set -eu
+ - python3 -m venv .venv
+ - source .venv/bin/activate
+ - pip install git+https://gitlab.freedesktop.org/freedesktop/mr-label-maker
+ - mr-label-maker --dry-run --mr $CI_MERGE_REQUEST_IID
# Jobs that need to pass before spending hardware resources on further testing
.required-for-hardware-jobs:
- needs: []
+ needs:
+ - job: clang-format
+ optional: true
+ - job: rustfmt
+ optional: true
+ - job: toml-lint
+ optional: true
+
+deploy-docs:
+ rules:
+ - when: never
+
+linkcheck-docs:
+ rules:
+ - when: never
+
+test-docs:
+ rules:
+ - when: never
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index f38836ec837c..b24d4bc53cda 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -19,6 +19,7 @@ set +e
cat /sys/kernel/debug/dri/*/state
set -e
+mkdir -p /lib/modules
case "$DRIVER_NAME" in
amdgpu|vkms)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
@@ -47,7 +48,7 @@ else
ARCH="x86_64"
fi
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s ${FDO_HTTP_CACHE_URI:-}$PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
+curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s $PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
TESTLIST="/igt/libexec/igt-gpu-tools/ci-testlist.txt"
@@ -69,7 +70,7 @@ igt-runner \
run \
--igt-folder /igt/libexec/igt-gpu-tools \
--caselist $TESTLIST \
- --output /results \
+ --output $RESULTS_DIR \
-vvvv \
$IGT_SKIPS \
$IGT_FLAKES \
@@ -80,13 +81,21 @@ set -e
deqp-runner junit \
--testsuite IGT \
- --results /results/failures.csv \
- --output /results/junit.xml \
+ --results $RESULTS_DIR/failures.csv \
+ --output $RESULTS_DIR/junit.xml \
--limit 50 \
- --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
-
-# Store the results also in the simpler format used by the runner in ChromeOS CI
-#sed -r 's/(dmesg-warn|pass)/success/g' /results/results.txt > /results/results_simple.txt
+ --template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
+
+# Check if /proc/lockdep_stats exists
+if [ -f /proc/lockdep_stats ]; then
+ # If debug_locks is 0, it indicates lockdep is detected and it turns itself off.
+ debug_locks=$(grep 'debug_locks:' /proc/lockdep_stats | awk '{print $2}')
+ if [ "$debug_locks" -eq 0 ] && [ "$ret" -eq 0 ]; then
+ echo "Warning: LOCKDEP issue detected. Please check dmesg logs for more information."
+ cat /proc/lockdep_stats
+ ret=101
+ fi
+fi
cd $oldpath
exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 8d8b9e71852e..7acc2e2a8eaa 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,15 +1,18 @@
variables:
- CONTAINER_TAG: "2024-09-09-uprevs"
- DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
+ CONTAINER_TAG: "20250502-mesa-uprev"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
-
- DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
- KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
-
- DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
- DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
+ # default kernel for rootfs before injecting the current kernel tree
+ KERNEL_TAG: "v6.14-mesa-0bdd"
+ KERNEL_REPO: "gfx-ci/linux"
+ PKG_REPO_REV: "95bf62c"
+
+ DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
+
+ ALPINE_X86_64_BUILD_TAG: "${CONTAINER_TAG}"
+ ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
- ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" \ No newline at end of file
+ CONDITIONAL_BUILD_ANGLE_TAG: 384145a4023315dae658259bee07c43a
+ CONDITIONAL_BUILD_PIGLIT_TAG: a19e424b8a3f020dbf1b9dd29f220a4f
diff --git a/drivers/gpu/drm/ci/kunit.sh b/drivers/gpu/drm/ci/kunit.sh
new file mode 100755
index 000000000000..7a1052fd3f17
--- /dev/null
+++ b/drivers/gpu/drm/ci/kunit.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+: "${KERNEL_ARCH:?ERROR: KERNEL_ARCH must be set}"
+: "${LLVM_VERSION:?ERROR: LLVM_VERSION must be set}"
+
+./drivers/gpu/drm/ci/setup-llvm-links.sh
+
+export PATH="/usr/bin:$PATH"
+
+./tools/testing/kunit/kunit.py run \
+ --arch "${KERNEL_ARCH}" \
+ --make_options LLVM=1 \
+ --kunitconfig=drivers/gpu/drm/tests
diff --git a/drivers/gpu/drm/ci/kunit.yml b/drivers/gpu/drm/ci/kunit.yml
new file mode 100644
index 000000000000..0d5b2c4433d2
--- /dev/null
+++ b/drivers/gpu/drm/ci/kunit.yml
@@ -0,0 +1,37 @@
+.kunit-packages: &kunit-packages
+ - apt-get update -qq
+ # Minimum supported version of LLVM for building x86 kernels is 15.0.0.
+ # In mesa-ci containers, LLVM_VERSION is defined as a container-level property and is currently set to 19.
+ - apt-get install -y --no-install-recommends clang-${LLVM_VERSION} lld-${LLVM_VERSION} llvm-${LLVM_VERSION}
+
+.kunit-base:
+ stage: kunit
+ timeout: "30m"
+ variables:
+ GIT_DEPTH: 1
+ script:
+ - drivers/gpu/drm/ci/kunit.sh
+
+kunit:arm32:
+ extends:
+ - .build:arm32
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-arm
+
+kunit:arm64:
+ extends:
+ - .build:arm64
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-aarch64
+
+kunit:x86_64:
+ extends:
+ - .build:x86_64
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-x86
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 6add15083c78..a295102c3468 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -1,58 +1,105 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: MIT
+# shellcheck disable=SC2086 # we want word splitting
+# shellcheck disable=SC1091 # paths only become valid at runtime
-set -e
-set -x
+# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
+_check_artifact_path() {
+ _url="https://${1}/${2}"
+ if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "${_url}"; then
+ echo -n "${_url}"
+ fi
+}
+
+get_path_to_artifact() {
+ _mainline_artifact="$(_check_artifact_path ${BASE_SYSTEM_MAINLINE_HOST_PATH} ${1})"
+ if [ -n "${_mainline_artifact}" ]; then
+ echo -n "${_mainline_artifact}"
+ return
+ fi
+ _fork_artifact="$(_check_artifact_path ${BASE_SYSTEM_FORK_HOST_PATH} ${1})"
+ if [ -n "${_fork_artifact}" ]; then
+ echo -n "${_fork_artifact}"
+ return
+ fi
+ set +x
+ error "Sorry, I couldn't find a viable built path for ${1} in either mainline or a fork." >&2
+ echo "" >&2
+ echo "If you're working on CI, this probably means that you're missing a dependency:" >&2
+ echo "this job ran ahead of the job which was supposed to upload that artifact." >&2
+ echo "" >&2
+ echo "If you aren't working on CI, please ping @mesa/ci-helpers to see if we can help." >&2
+ echo "" >&2
+ echo "This job is going to fail, because I can't find the resources I need. Sorry." >&2
+ set -x
+ exit 1
+}
+
+. "${SCRIPTS_DIR}/setup-test-env.sh"
+
+section_start prepare_rootfs "Preparing root filesystem"
+
+set -ex
-# Try to use the kernel and rootfs built in mainline first, so we're more
-# likely to hit cache
-if curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
- BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
-else
- BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
-fi
+ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
+[ $? != 1 ] || exit 1
rm -rf results
mkdir -p results/job-rootfs-overlay/
-cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
+artifacts/ci-common/export-gitlab-job-env-for-dut.sh \
+ > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
-cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
-# Prepare env vars for upload.
-section_start variables "Variables passed through:"
-KERNEL_IMAGE_BASE="https://${BASE_SYSTEM_HOST_PATH}" \
- artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
-section_end variables
-
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
+# Prepare env vars for upload.
+section_switch variables "Environment variables passed through to device:"
+cat results/job-rootfs-overlay/set-job-env-vars.sh
+
+section_switch lava_submit "Submitting job for scheduling"
+
touch results/lava.log
tail -f results/lava.log &
-
+# Ensure that we are printing the commands that are being executed,
+# making it easier to debug the job in case it fails.
+set -x
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
- submit \
+ --farm "${FARM}" \
+ --device-type "${DEVICE_TYPE}" \
+ --boot-method "${BOOT_METHOD}" \
+ --job-timeout-min $((CI_JOB_TIMEOUT/60 - 5)) \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
- --rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
+ --rootfs-url "${ROOTFS_URL}" \
--kernel-url-prefix "https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}" \
- --build-url "${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/kernel-files.tar.zst" \
- --job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
- --job-timeout-min ${JOB_TIMEOUT:-80} \
+ --kernel-external "${EXTERNAL_KERNEL_TAG}" \
--first-stage-init artifacts/ci-common/init-stage1.sh \
- --ci-project-dir "${CI_PROJECT_DIR}" \
- --device-type "${DEVICE_TYPE}" \
- --farm "${FARM}" \
--dtb-filename "${DTB}" \
--jwt-file "${S3_JWT_FILE}" \
--kernel-image-name "${KERNEL_IMAGE_NAME}" \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
- --boot-method "${BOOT_METHOD}" \
--visibility-group "${VISIBILITY_GROUP}" \
--lava-tags "${LAVA_TAGS}" \
--mesa-job-name "$CI_JOB_NAME" \
--structured-log-file "results/lava_job_detail.json" \
--ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \
+ --project-name "${CI_PROJECT_NAME}" \
+ --starting-section "${CURRENT_SECTION}" \
+ --job-submitted-at "${CI_JOB_STARTED_AT}" \
+ - append-overlay \
+ --name=kernel-build \
+ --url="${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/kernel-files.tar.zst" \
+ --compression=zstd \
+ --path="${CI_PROJECT_DIR}" \
+ --format=tar \
+ - append-overlay \
+ --name=job-overlay \
+ --url="https://${JOB_ROOTFS_OVERLAY_PATH}" \
+ --compression=gz \
+ --path="/" \
+ --format=tar \
+ - submit \
>> results/lava.log
diff --git a/drivers/gpu/drm/ci/setup-llvm-links.sh b/drivers/gpu/drm/ci/setup-llvm-links.sh
new file mode 100755
index 000000000000..ace33af82a3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/setup-llvm-links.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: MIT
+set -euo pipefail
+
+ln -svf "$(which clang++-${LLVM_VERSION})" /usr/bin/clang++
+ln -svf "$(which clang-${LLVM_VERSION})" /usr/bin/clang
+ln -svf "$(which ld.lld-${LLVM_VERSION})" /usr/bin/ld.lld
+ln -svf "$(which lld-${LLVM_VERSION})" /usr/bin/lld
+ln -svf "$(which llvm-ar-${LLVM_VERSION})" /usr/bin/llvm-ar
+ln -svf "$(which llvm-nm-${LLVM_VERSION})" /usr/bin/llvm-nm
+ln -svf "$(which llvm-objcopy-${LLVM_VERSION})" /usr/bin/llvm-objcopy
+ln -svf "$(which llvm-readelf-${LLVM_VERSION})" /usr/bin/llvm-readelf
+ln -svf "$(which llvm-strip-${LLVM_VERSION})" /usr/bin/llvm-strip
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index f0ef60c8f56d..81147e86bfd0 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -1,27 +1,30 @@
-.test-rules:
- rules:
- - if: '$FD_FARM == "offline" && $RUNNER_TAG =~ /^google-freedreno-/'
- when: never
- - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
- when: never
- - !reference [.no_scheduled_pipelines-rules, rules]
- - when: on_success
+.allow_failure_lockdep:
+ variables:
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ allow_failure:
+ exit_codes:
+ - 101
.lava-test:
extends:
- - .test-rules
+ - .container+build-rules
+ - .allow_failure_lockdep
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - !reference [.collabora-farm-rules, rules]
+ - when: on_success
script:
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- rm -rf install
- tar -xf artifacts/install.tar
- - mv install/* artifacts/.
+ - mv -n install/* artifacts/.
# Override it with our lava-submit.sh script
- ./artifacts/lava-submit.sh
.lava-igt:arm32:
extends:
- - .lava-test:arm32
+ - .lava-arm32-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "armhf"
@@ -30,14 +33,14 @@
- testing:arm32
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_arm32
- - debian/x86_64_build
+ - debian/arm32_test-gl
+ - python-artifacts
- testing:arm32
- igt:arm32
.lava-igt:arm64:
extends:
- - .lava-test:arm64
+ - .lava-arm64-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "arm64"
@@ -46,14 +49,14 @@
- testing:arm64
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_arm64
- - debian/x86_64_build
+ - debian/arm64_test-gl
+ - python-artifacts
- testing:arm64
- igt:arm64
.lava-igt:x86_64:
extends:
- - .lava-test:x86_64
+ - .lava-x86_64-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "amd64"
@@ -62,17 +65,21 @@
- testing:x86_64
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_x86_64
- - debian/x86_64_build
+ - debian/x86_64_test-gl
+ - python-artifacts
- testing:x86_64
- igt:x86_64
.baremetal-igt-arm64:
extends:
- - .baremetal-test-arm64
- - .use-debian/baremetal_arm64_test
- - .test-rules
+ - .baremetal-test-arm64-gl
+ - .use-debian/baremetal_arm64_test-gl
+ - .allow_failure_lockdep
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - !reference [.google-freedreno-farm-rules, rules]
+ - when: on_success
variables:
FDO_CI_CONCURRENT: 10
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
@@ -81,13 +88,37 @@
BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
FARM: google
needs:
- - debian/baremetal_arm64_test
+ - debian/baremetal_arm64_test-gl
- job: testing:arm64
artifacts: false
- igt:arm64
tags:
- $RUNNER_TAG
+.software-driver:
+ stage: software-driver
+ extends:
+ - .test-gl
+ - .allow_failure_lockdep
+ timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM
+ before_script:
+ - !reference [default, before_script]
+ - rm -rf install
+ - tar -xf artifacts/install.tar
+ script:
+ - ln -sf $CI_PROJECT_DIR/install /install
+ - mv install/bzImage /kernel/bzImage
+ - install/crosvm-runner.sh install/igt_runner.sh
+ needs:
+ - debian/x86_64_test-gl
+ - testing:x86_64
+ - igt:x86_64
+
.msm-sc7180:
extends:
- .lava-igt:arm64
@@ -132,7 +163,7 @@ msm:apq8016:
BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:apq8096:
extends:
@@ -146,21 +177,7 @@ msm:apq8096:
GPU_VERSION: apq8096
RUNNER_TAG: google-freedreno-db820c
script:
- - ./install/bare-metal/fastboot.sh
-
-msm:sdm845:
- extends:
- - .baremetal-igt-arm64
- stage: msm
- parallel: 6
- variables:
- DEVICE_TYPE: sdm845-cheza-r3
- DRIVER_NAME: msm
- BM_KERNEL: https://${PIPELINE_ARTIFACTS_BASE}/arm64/cheza-kernel
- GPU_VERSION: sdm845
- RUNNER_TAG: google-freedreno-cheza
- script:
- - ./install/bare-metal/cros-servo.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:sm8350-hdk:
extends:
@@ -291,7 +308,7 @@ i915:cml:
variables:
DEVICE_TYPE: asus-C436FA-Flip-hatch
GPU_VERSION: cml
- RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-flip-hatch
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-Flip-hatch
i915:tgl:
extends:
@@ -439,47 +456,16 @@ panfrost:g12b:
- .panfrost-gpu
virtio_gpu:none:
- stage: software-driver
- timeout: "1h30m"
+ extends:
+ - .software-driver
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
GPU_VERSION: none
- extends:
- - .test-gl
- - .test-rules
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - mkdir -p $CI_PROJECT_DIR/results
- - ln -sf $CI_PROJECT_DIR/results /results
- - install/crosvm-runner.sh install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
vkms:none:
- stage: software-driver
- timeout: "1h30m"
+ extends:
+ - .software-driver
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
- extends:
- - .test-gl
- - .test-rules
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - mkdir -p /lib/modules
- - mkdir -p $CI_PROJECT_DIR/results
- - ln -sf $CI_PROJECT_DIR/results /results
- - ./install/crosvm-runner.sh ./install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
index e70bd9d447ca..adffb011298a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -3,13 +3,6 @@
# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-kms_async_flips@async-flip-with-page-flip-events
-
-# Board Name: hp-11A-G6-EE-grunt
-# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
kms_async_flips@crc
# Board Name: hp-11A-G6-EE-grunt
@@ -25,3 +18,17 @@ kms_plane@pixel-format-source-clamping
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_async_flips@async-flip-with-page-flip-events
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://gitlab.freedesktop.org/drm/amd/-/issues/3835
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_async_flips@async-flip-with-page-flip-events-atomic
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://gitlab.freedesktop.org/drm/amd/-/issues/3836
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_async_flips@crc-atomic
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index f41b3e112976..902d54027506 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -14,6 +14,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -32,3 +33,8 @@ amdgpu/amd_pci_unplug@amdgpu_hotunplug_with_exported_bo
amdgpu/amd_pci_unplug@amdgpu_hotunplug_with_exported_fence
amdgpu/amd_vrr_range@freesync-parsing
device_reset.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index 0907cb0f6d9e..8e2b5504004e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,5 +1,3 @@
-core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
@@ -7,18 +5,14 @@ i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
kms_fb_coherency@memset-crc,Crash
-kms_flip@busy-flip,Timeout
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@@ -33,20 +27,18 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_prop_blob@invalid-set-prop,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -54,8 +46,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
index 0207c9807bee..fee246c25b9a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -53,3 +53,17 @@ kms_pm_rpm@modeset-lpsp-stress
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_pm_rpm@drm-resources-equal
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13346
+# Failure Rate: 60
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+i915_hangman@gt-engine-error
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13345
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 5186ba3dbbc6..922327632eff 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -20,6 +21,7 @@ i915_pm_rc6_residency.*
i915_suspend.*
kms_scaling_modes.*
i915_pm_rpm.*
+i915_module_load@reload-with-fault-injection
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index 64772fedaed5..7353ab11e940 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -8,6 +8,7 @@ kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
@@ -31,11 +32,7 @@ kms_plane_alpha_blend@constant-alpha-max,Fail
kms_pm_backlight@basic-brightness,Fail
kms_pm_backlight@fade,Fail
kms_pm_backlight@fade-with-dpms,Fail
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
kms_sysfs_edid_timing,Fail
perf@i915-ref-count,Fail
perf@non-zero-reason,Timeout
@@ -45,8 +42,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index 4f50e0240ff4..80bf2741866c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index f352b719cf7d..6fef7c1e56ea 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,5 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -9,8 +8,8 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-i915_pm_rps@engine-order,Fail
-kms_big_fb@linear-16bpp-rotate-180,Timeout
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
kms_fb_coherency@memset-crc,Crash
kms_flip@busy-flip,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
@@ -36,14 +35,17 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
+kms_pipe_stress@stress-xrgb8888-untiled,Fail
+kms_pipe_stress@stress-xrgb8888-ytiled,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
@@ -51,10 +53,17 @@ kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
kms_psr2_sf@plane-move-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
+kms_psr2_sf@psr2-cursor-plane-update-sf,Fail
+kms_psr2_sf@psr2-overlay-plane-update-continuous-sf,Fail
+kms_psr2_sf@psr2-overlay-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-overlay-primary-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-plane-move-sf-dmg-area,Fail
+kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
+kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail
-kms_vblank@query-forked-hang,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -63,6 +72,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
index d8401251e5f4..5343cc1c8696 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -25,3 +25,10 @@ kms_plane_alpha_blend@constant-alpha-min
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_async_flips@crc
+
+# Board Name: asus-C436FA-Flip-hatch
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13347
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index 9d753d97c9ab..c393a138b8a6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -16,10 +17,10 @@ gem_.*
# Hangs the machine and timeout occurs
i915_pm_rc6_residency.*
i915_suspend.*
-xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
i915_pm_rpm.*
+kms_flip.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 6eb64c672f7d..8adf5f0a6e80 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,26 +1,19 @@
-core_setmaster@master-drop-set-user,Fail
core_setmaster_vs_auth,Fail
+gen9_exec_parse@unaligned-access,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Timeout
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@@ -35,32 +28,20 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
-kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_prop_blob@invalid-set-prop,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
kms_rotation_crc@multiplane-rotation,Fail
-kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
-core_setmaster@master-drop-set-shared-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
index 077886b76093..2a909c657343 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
@@ -11,3 +11,10 @@ core_hotunplug@unplug-rescan
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_fb_coherency@memset-crc
+
+# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13348
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@dpms-off-confusion-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index 9c64146aed90..2e4ef9f35654 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -27,3 +28,301 @@ perf.*
# Kernel panic
drm_fdinfo.*
kms_plane_alpha_blend.*
+
+kms_async_flips.*
+# [IGT] kms_async_flips: executing
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON(intel_dp->pps.vdd_wakeref)
+# WARNING: CPU: 0 PID: 1253 at drivers/gpu/drm/i915/display/intel_pps.c:760 intel_pps_vdd_on_unlocked+0x351/0x360
+# Modules linked in:
+# CPU: 0 UID: 0 PID: 1253 Comm: kms_async_flips Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: HP Bloog/Bloog, BIOS 09/19/2019
+# RIP: 0010:intel_pps_vdd_on_unlocked+0x351/0x360
+# Code: 8b 77 50 4d 85 f6 75 03 4c 8b 37 e8 19 98 03 00 48 c7 c1 10 d3 61 84 4c 89 f2 48 c7 c7 67 56 69 84 48 89 c6 e8 a0 22 63 ff 90 <0f> 0b 90 90 e9 5f fd ff ff e8 81 94 69 00 90 90 90 90 90 90 90 90
+# RSP: 0018:ffff9573c0dcfad8 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffff93498148e238 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffff9573c0dcf988 RDI: 0000000000000001
+# RBP: ffff934980d80000 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff8488c8a0 R12: ffff934980d80d68
+# R13: 0000000000000000 R14: ffff93498093c9b0 R15: 0000000000000000
+# FS: 00007fbf1f4486c0(0000) GS:ffff9349fbc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055f260419028 CR3: 0000000106594000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# intel_pps_vdd_on+0x78/0x150
+# intel_dp_detect+0xb9/0x7c0
+# drm_helper_probe_detect+0x47/0xb0
+# drm_helper_probe_single_connector_modes+0x40b/0x660
+# drm_mode_getconnector+0x369/0x490
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7fbf208e4cdb
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffe061f6c60 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbf208e4cdb
+# RDX: 00007ffe061f6d00 RSI: 00000000c05064a7 RDI: 0000000000000003
+# RBP: 00007ffe061f6d00 R08: 0000000000000007 R09: 000055f260410cb0
+# R10: 650094cf40322201 R11: 0000000000000246 R12: 00000000c05064a7
+# R13: 0000000000000003 R14: 00000000c05064a7 R15: 00007ffe061f6d00
+# </TASK>
+# irq event stamp: 0
+# hardirqs last enabled at (0): [<0000000000000000>] 0x0
+# hardirqs last disabled at (0): [<ffffffff82d0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last enabled at (0): [<ffffffff82d0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last disabled at (0): [<0000000000000000>] 0x0
+# ---[ end trace 0000000000000000 ]---
+# i915 0000:00:02.0: [drm] *ERROR* PPS state mismatch
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* Failed to read DPCD register 0x92
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# [IGT] kms_async_flips: starting subtest test-time-stamp
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] commit wait timed out
+# INFO: task kms_async_flips:1253 blocked for more than 122 seconds.
+# Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+# task:kms_async_flips state:D stack:12720 pid:1253 tgid:1253 ppid:164 flags:0x00004006
+# Call Trace:
+# <TASK>
+# __schedule+0x495/0xd20
+# schedule+0x35/0x120
+# drm_vblank_work_flush+0x91/0xf0
+# intel_wait_for_vblank_workers+0x58/0x90
+# intel_atomic_commit_tail+0xa86/0x15f0
+# intel_atomic_commit+0x257/0x2a0
+# drm_atomic_commit+0xac/0xe0
+# set_property_atomic+0x91/0x160
+# drm_mode_obj_set_property_ioctl+0xc1/0x110
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7fbf208e4cdb
+# RSP: 002b:00007ffe061f6c50 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000055f24e272330 RCX: 00007fbf208e4cdb
+# RDX: 00007ffe061f6ce0 RSI: 00000000c01864ba RDI: 0000000000000003
+# RBP: 00007ffe061f6ce0 R08: 0000000000000000 R09: 000055f2604184f0
+# R10: 00007fbf209ca310 R11: 0000000000000246 R12: 00000000c01864ba
+# R13: 0000000000000003 R14: 0000000000000000 R15: 000055f26040ab90
+# </TASK>
+# INFO: lockdep is turned off.
+
+kms_lease.*
+# [IGT] kms_lease: executing
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON(intel_dp->pps.vdd_wakeref)
+# WARNING: CPU: 1 PID: 1271 at drivers/gpu/drm/i915/display/intel_pps.c:760 intel_pps_vdd_on_unlocked+0x351/0x360
+# Modules linked in:
+# CPU: 1 UID: 0 PID: 1271 Comm: kms_lease Tainted: G W 6.13.0-rc2-gfb6fd142a8eb #1
+# Tainted: [W]=WARN
+# Hardware name: HP Bloog/Bloog, BIOS 09/19/2019
+# RIP: 0010:intel_pps_vdd_on_unlocked+0x351/0x360
+# Code: 8b 77 50 4d 85 f6 75 03 4c 8b 37 e8 19 98 03 00 48 c7 c1 10 d3 61 a1 4c 89 f2 48 c7 c7 67 56 69 a1 48 89 c6 e8 a0 22 63 ff 90 <0f> 0b 90 90 e9 5f fd ff ff e8 81 94 69 00 90 90 90 90 90 90 90 90
+# RSP: 0018:ffffb26500ddfad8 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffff988580dc6238 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffb26500ddf988 RDI: 0000000000000001
+# RBP: ffff988580d90000 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffffa188c8a0 R12: ffff988580d90d68
+# R13: 0000000000000000 R14: ffff98858093c9b0 R15: 0000000000000000
+# FS: 00007f6b0c60d6c0(0000) GS:ffff9885fbd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055eea27460a8 CR3: 0000000105ac6000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# intel_pps_vdd_on+0x78/0x150
+# intel_dp_detect+0xb9/0x7c0
+# drm_helper_probe_detect+0x47/0xb0
+# drm_helper_probe_single_connector_modes+0x40b/0x660
+# drm_mode_getconnector+0x369/0x490
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f6b0d637c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffc42b13670 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f6b0d637c5b
+# RDX: 00007ffc42b13710 RSI: 00000000c05064a7 RDI: 0000000000000003
+# RBP: 00007ffc42b13710 R08: 0000000000000007 R09: 000055eea2741720
+# R10: 9fb9be4e118bfa43 R11: 0000000000000246 R12: 00000000c05064a7
+# R13: 0000000000000003 R14: 00000000c05064a7 R15: 00007ffc42b13710
+# </TASK>
+# irq event stamp: 0
+# hardirqs last enabled at (0): [<0000000000000000>] 0x0
+# hardirqs last disabled at (0): [<ffffffff9fd0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last enabled at (0): [<ffffffff9fd0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last disabled at (0): [<0000000000000000>] 0x0
+# ---[ end trace 0000000000000000 ]---
+# i915 0000:00:02.0: [drm] *ERROR* PPS state mismatch
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* Failed to read DPCD register 0x92
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# [IGT] kms_lease: starting subtest page-flip-implicit-plane
+# [IGT] kms_lease: starting dynamic subtest pipe-A-eDP-1
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] commit wait timed out
+# INFO: task kms_lease:1271 blocked for more than 122 seconds.
+# Tainted: G W 6.13.0-rc2-gfb6fd142a8eb #1
+# "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+# task:kms_lease state:D stack:12720 pid:1271 tgid:1271 ppid:184 flags:0x00004006
+# Call Trace:
+# <TASK>
+# __schedule+0x495/0xd20
+# schedule+0x35/0x120
+# drm_vblank_work_flush+0x91/0xf0
+# intel_wait_for_vblank_workers+0x58/0x90
+# intel_atomic_commit_tail+0xa86/0x15f0
+# intel_atomic_commit+0x257/0x2a0
+# drm_atomic_commit+0xac/0xe0
+# set_property_atomic+0x91/0x160
+# drm_mode_obj_set_property_ioctl+0xc1/0x110
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f6b0d637c5b
+# RSP: 002b:00007ffc42b13650 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 00007ffc42b14270 RCX: 00007f6b0d637c5b
+# RDX: 00007ffc42b136e0 RSI: 00000000c01864ba RDI: 0000000000000003
+# RBP: 00007ffc42b136e0 R08: 0000000000000000 R09: 000055eea2741750
+# R10: 00007f6b0d71d310 R11: 0000000000000246 R12: 00000000c01864ba
+# R13: 0000000000000003 R14: 0000000000000000 R15: 000055eea27378d0
+# </TASK>
+# INFO: lockdep is turned off.
+
+# The test alternates between failing and timing out on reruns, causing the pipeline to fail
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
index ed9f7b576843..57453e340040 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -1,15 +1,17 @@
-core_setmaster@master-drop-set-user,Fail
+drm_fdinfo@busy-check-all,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
-i915_pm_rpm@module-reload,Fail
-kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@nonexisting-fb,Fail
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,UnexpectedImprovement(Skip)
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
@@ -17,6 +19,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
@@ -27,25 +30,17 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
+kms_rotation_crc@bad-pixel-format,Fail
kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
-perf_pmu@busy-accuracy-50,Fail
perf_pmu@module-unload,Fail
perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
+prime_busy@before-wait,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
index 5c3ef4486b9d..8b2aefd1cc52 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
@@ -1,13 +1,6 @@
# Board Name: acer-cb317-1h-c3z6-dedede
-# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12475
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13349
# Failure Rate: 100
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.12.0-rc1
-kms_flip@flip-vs-panning-interruptible
-
-# Board Name: acer-cb317-1h-c3z6-dedede
-# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12476
-# Failure Rate: 100
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.12.0-rc1
-kms_universal_plane@cursor-fb-leak
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-fences-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
index 1a3d87c0ca6e..8dec57da1bb3 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..117098bc95d9 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -21,8 +21,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index 6ec2f83ffe13..e287462a491a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -37,3 +38,115 @@ tools_test.*
# GPU hang
sysfs_timeslice_.*
sysfs_heartbeat_.*
+
+device_reset@unbind-reset-rebind
+# [IGT] device_reset: starting subtest unbind-reset-rebind
+# i915 0000:00:02.0: [drm] Found kabylake (device ID 5917) display version 9.00 stepping C0
+# i915 0000:00:02.0: vgaarb: deactivate vga console
+# i915 0000:00:02.0: [drm] *ERROR* DPLL0 not locked
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON((val & (1 << 30)) == 0)
+# WARNING: CPU: 4 PID: 472 at drivers/gpu/drm/i915/display/intel_cdclk.c:944 skl_get_cdclk+0x1ca/0x360
+# Modules linked in:
+# CPU: 4 UID: 0 PID: 472 Comm: device_reset Not tainted 6.13.0-rc2-ge95c88d68ac3 #1
+# Hardware name: HP Nami/Nami, BIOS 09/19/2019
+# RIP: 0010:skl_get_cdclk+0x1ca/0x360
+# Code: 67 50 4d 85 e4 0f 84 89 01 00 00 e8 e0 16 13 00 48 c7 c1 a0 71 5f 9a 4c 89 e2 48 c7 c7 67 56 69 9a 48 89 c6 e8 67 a1 72 ff 90 <0f> 0b 90 90 8b 43 08 8b 53 04 89 43 0c 89 03 85 d2 0f 85 26 ff ff
+# RSP: 0018:ffffacba40f5fbd0 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: ffff953182571058 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffacba40f5fa80 RDI: 0000000000000001
+# RBP: ffff953182570d68 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff9a88c8a0 R12: ffff953180d3f4a0
+# R13: 0000000000000000 R14: ffff953182570d68 R15: 0000000000000000
+# FS: 00007f74989c46c0(0000) GS:ffff9534eed00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f1df530eff8 CR3: 0000000102302002 CR4: 00000000003706f0
+# DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+# DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+# Call Trace:
+# <TASK>
+# intel_update_cdclk+0x1c/0x90
+# intel_cdclk_init_hw+0x46e/0x4c0
+# intel_power_domains_init_hw+0x3eb/0x770
+# intel_display_driver_probe_noirq+0x85/0x230
+# i915_driver_probe+0x66d/0xc60
+# local_pci_probe+0x3a/0x90
+# pci_device_probe+0xb5/0x180
+# really_probe+0xc9/0x2b0
+# __driver_probe_device+0x6e/0x110
+# device_driver_attach+0x42/0xa0
+# bind_store+0x71/0xc0
+# kernfs_fop_write_iter+0x121/0x1c0
+# vfs_write+0x2a8/0x530
+# ksys_write+0x6f/0xf0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f7499e662c0
+# Code: 40 00 48 8b 15 41 9b 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 80 3d 21 23 0e 00 00 74 17 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 58 c3 0f 1f 80 00 00 00 00 48 83 ec 28 48 89
+# RSP: 002b:00007ffffaf36a68 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7499e662c0
+# RDX: 000000000000000c RSI: 00007ffffaf36b50 RDI: 0000000000000003
+# RBP: 000000000000000c R08: 0000000000000007 R09: 000055b37d0964a0
+# R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffffaf36b50
+# R13: 0000000000000003 R14: 000055b3526a7220 R15: 00007f749a0dc020
+# </TASK>
+# irq event stamp: 28591
+# hardirqs last enabled at (28625): [<ffffffff98da3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (28658): [<ffffffff98da3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (28688): [<ffffffff98d17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (28701): [<ffffffff98d17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON((val & (1 << 30)) == 0)
+# WARNING: CPU: 1 PID: 472 at drivers/gpu/drm/i915/display/intel_cdclk.c:944 skl_get_cdclk+0x1ca/0x360
+# Modules linked in:
+# CPU: 1 UID: 0 PID: 472 Comm: device_reset Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: HP Nami/Nami, BIOS 09/19/2019
+# RIP: 0010:skl_get_cdclk+0x1ca/0x360
+# Code: 67 50 4d 85 e4 0f 84 89 01 00 00 e8 e0 16 13 00 48 c7 c1 a0 71 5f 9a 4c 89 e2 48 c7 c7 67 56 69 9a 48 89 c6 e8 67 a1 72 ff 90 <0f> 0b 90 90 8b 43 08 8b 53 04 89 43 0c 89 03 85 d2 0f 85 26 ff ff
+# RSP: 0018:ffffacba40f5fb98 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffffacba40f5fbbc RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffacba40f5fa48 RDI: 0000000000000001
+# RBP: ffff953182570d68 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff9a88c8a0 R12: ffff953180d3f4a0
+# R13: ffff953182573208 R14: ffff953182570d68 R15: 0000000000000000
+# FS: 00007f74989c46c0(0000) GS:ffff9534eec40000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f2b6598ee00 CR3: 0000000102302001 CR4: 00000000003706f0
+# DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+# DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+# Call Trace:
+# <TASK>
+# gen9_disable_dc_states+0x86/0x300
+# intel_power_well_enable+0x56/0x70
+# __intel_display_power_get_domain.part.0+0x4d/0x70
+# intel_power_domains_init_hw+0x95/0x770
+# intel_display_driver_probe_noirq+0x85/0x230
+# i915_driver_probe+0x66d/0xc60
+# local_pci_probe+0x3a/0x90
+# pci_device_probe+0xb5/0x180
+# really_probe+0xc9/0x2b0
+# __driver_probe_device+0x6e/0x110
+# device_driver_attach+0x42/0xa0
+# bind_store+0x71/0xc0
+# kernfs_fop_write_iter+0x121/0x1c0
+# vfs_write+0x2a8/0x530
+# ksys_write+0x6f/0xf0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f7499e662c0
+# Code: 40 00 48 8b 15 41 9b 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 80 3d 21 23 0e 00 00 74 17 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 58 c3 0f 1f 80 00 00 00 00 48 83 ec 28 48 89
+# RSP: 002b:00007ffffaf36a68 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7499e662c0
+# RDX: 000000000000000c RSI: 00007ffffaf36b50 RDI: 0000000000000003
+# RBP: 000000000000000c R08: 0000000000000007 R09: 000055b37d0964a0
+# R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffffaf36b50
+# R13: 0000000000000003 R14: 000055b3526a7220 R15: 00007f749a0dc020
+# </TASK>
+# irq event stamp: 29605
+# hardirqs last enabled at (29617): [<ffffffff98da3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (29634): [<ffffffff98da3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (29630): [<ffffffff98d17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (29625): [<ffffffff98d17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 461ef69ef08a..462c050a8b2d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,64 +1,26 @@
-api_intel_allocator@fork-simple-stress-signal,Timeout
-api_intel_allocator@open-vm,Timeout
-api_intel_allocator@simple-allocator,Timeout
-api_intel_bb@lot-of-buffers,Timeout
-api_intel_bb@object-reloc-keep-cache,Timeout
-api_intel_bb@offset-control,Timeout
-api_intel_bb@render-ccs,Timeout
-api_intel_bb@reset-bb,Timeout
-core_auth@basic-auth,Timeout
-core_hotunplug@hotrebind,Timeout
-core_setmaster@master-drop-set-user,Fail
-debugfs_test@read_all_entries_display_on,Timeout
-drm_read@empty-block,Timeout
-dumb_buffer@create-clear,Timeout
-dumb_buffer@invalid-bpp,Timeout
-gen3_render_tiledx_blits,Timeout
-gen7_exec_parse@basic-allocation,Timeout
-gen9_exec_parse@batch-invalid-length,Timeout
-gen9_exec_parse@bb-secure,Timeout
-gen9_exec_parse@secure-batches,Timeout
-gen9_exec_parse@shadow-peek,Timeout
-gen9_exec_parse@unaligned-jump,Timeout
-i915_getparams_basic@basic-subslice-total,Timeout
-i915_hangman@gt-engine-hang,Timeout
+api_intel_allocator@reopen,Timeout
+api_intel_bb@destroy-bb,Timeout
+core_hotunplug@hotrebind-lateclose,Timeout
+dumb_buffer@map-valid,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-i915_pciid,Timeout
-i915_pipe_stress@stress-xrgb8888-ytiled,Timeout
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rps@engine-order,Timeout
-i915_pm_rps@thresholds-idle-park,Timeout
-i915_query@engine-info,Timeout
-i915_query@query-topology-kernel-writes,Timeout
-i915_query@test-query-geometry-subslices,Timeout
+i915_pm_rps@waitboost,Fail
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
perf_pmu@busy,Timeout
perf_pmu@enable-race,Timeout
-perf_pmu@event-wait,Timeout
-perf_pmu@faulting-read,Timeout
-perf_pmu@gt-awake,Timeout
-perf_pmu@interrupts,Timeout
perf_pmu@module-unload,Fail
-perf_pmu@most-busy-idle-check-all,Timeout
perf_pmu@rc6,Crash
-perf_pmu@render-node-busy-idle,Fail
perf_pmu@semaphore-wait-idle,Timeout
-prime_busy@after,Timeout
-prime_mmap@test_aperture_limit,Timeout
-prime_mmap@test_map_unmap,Timeout
+prime_busy@before,Fail
prime_mmap@test_refcounting,Timeout
-prime_self_import@basic-with_one_bo,Timeout
-sriov_basic@enable-vfs-autoprobe-off,Timeout
-syncobj_basic@bad-destroy,Timeout
-syncobj_basic@bad-flags-fd-to-handle,Timeout
-syncobj_basic@create-signaled,Timeout
-syncobj_eventfd@invalid-bad-pad,Timeout
-syncobj_eventfd@timeline-wait-before-signal,Timeout
+sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout
+syncobj_basic@illegal-fd-to-handle,Timeout
syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout
syncobj_wait@invalid-signal-illegal-handle,Timeout
syncobj_wait@invalid-single-wait-all-unsubmitted,Timeout
@@ -66,9 +28,3 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
-template@A,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index b47df5855e8d..429dc3c731df 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -35,3 +36,16 @@ syncobj_wait.*
# Kernel panic and test hangs with multiple kms tests
kms_.*
+
+# The test alternates between failing and timing out on reruns, causing the pipeline to fail
+api_intel_allocator@reopen
+api_intel_bb@destroy-bb
+core_hotunplug@hotrebind-lateclose
+dumb_buffer@map-valid
+i915_pm_rps@engine-order
+perf_pmu@busy
+perf_pmu@enable-race
+perf_pmu@semaphore-wait-idle
+prime_mmap@test_refcounting
+sriov_basic@enable-vfs-bind-unbind-each-numvfs-all
+syncobj_basic@illegal-fd-to-handle
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 0ce240e3aa07..0f167cfd503c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,5 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -7,11 +6,8 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-i915_pm_rps@engine-order,Fail
-kms_big_fb@linear-16bpp-rotate-180,Timeout
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -33,19 +29,19 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
-kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack-mmap-gtt,Timeout
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -54,8 +50,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
index 60b8d1c64e70..19a289617080 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
@@ -4,3 +4,10 @@
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_pm_rpm@modeset-lpsp-stress
+
+# Board Name: dell-latitude-5400-8665U-sarien
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13351
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 29bff8922ae1..7e7374ebf3d1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 8e0efc80d510..592d7d69e6fc 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -1,8 +1,5 @@
-fbdev@eof,Fail
-fbdev@read,Fail
kms_3d,Fail
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
@@ -20,8 +17,28 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_lease@lease-uevent,Fail
-kms_rmfb@close-fd,Fail
+kms_flip@flip-vs-suspend,Fail
kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
+kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 2e5bf6ae25f2..443596d9e662 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -39,3 +39,17 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_prop_blob@invalid-set-prop
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/dri-devel/c0b4cded-0971-4abb-9965-2acf4602d7cd@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/d25442b9-0b6b-433c-8e23-997840fad305@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@flip-vs-wf_vblank-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index 8198e06344a3..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -11,7 +11,13 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 845f852bb4a0..184d0cccc318 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,16 +1,38 @@
-core_setmaster@master-drop-set-shared-fd,Fail
dumb_buffer@create-clear,Crash
-fbdev@eof,Fail
-fbdev@pan,Fail
-fbdev@read,Fail
-fbdev@unaligned-read,Fail
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
kms_color@invalid-gamma-lut-sizes,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
index df7e5ce7a036..0c67fec92450 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -18,3 +18,24 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
fbdev@write
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/a520d1d6-95b3-4573-b8f2-689f05bc2230@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@basic-flip-vs-modeset
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/ca960a82-00fc-4183-b983-998f7ac2fbb5@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/da578eed-224f-4374-853a-1ff0aa20d03b@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-toggle-modeset-transition
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index 8198e06344a3..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -11,7 +11,13 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 8198e06344a3..9fd44a4b962a 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 066d24ee3e08..72c469021b66 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,5 +1,4 @@
kms_3d,Fail
-kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index 1674c8e214d6..87724413174c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 5550be5486ed..a4d2f2a7963a 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
index 6dbc2080347d..e4a8f8352cd6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -15,4 +15,3 @@ kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index d74e04405e65..d270af1cca52 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -18,3 +18,24 @@ msm/msm_shrink@copy-gpu-oom-32
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
msm/msm_shrink@copy-gpu-oom-8
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/68
+# Failure Rate: 40
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/69
+# Failure Rate: 20
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane@plane-position-hole-dpms
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/73
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-position-covered
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index c2833eee1c4b..154b047787b2 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -23,3 +24,16 @@ kms_flip@2x-wf_vblank-ts-check
# Hangs the machine
kms_cursor_crc@cursor-random-max-size
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
+
+# It causes other tests to fail, so skip it.
+kms_invalid_mode@overflow-vrefresh
+
+# sc7180 does not have APRIV, so memptrs is not protected.
+# (Preemption is not supported on devices that do not have
+# APRIV, so this is ok)
+msm/msm_mapping@memptrs
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
index 6dbc2080347d..e4a8f8352cd6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -15,4 +15,3 @@ kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index cd3d3b0befe4..cafc802cecea 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -11,3 +11,10 @@ msm/msm_mapping@shadow
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/74
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_cursor_crc@cursor-random-128x128
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index 7c69c1f1d55b..a9bb3e1ad75c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -13,7 +13,18 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
+
+# sc7180 does not have APRIV, so memptrs is not protected.
+# (Preemption is not supported on devices that do not have
+# APRIV, so this is ok)
+msm/msm_mapping@memptrs
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
deleted file mode 100644
index fa8c7e663858..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-drm_read@invalid-buffer,Fail
-kms_color@ctm-0-25,Fail
-kms_color@ctm-0-50,Fail
-kms_color@ctm-0-75,Fail
-kms_color@ctm-blue-to-red,Fail
-kms_color@ctm-green-to-red,Fail
-kms_color@ctm-negative,Fail
-kms_color@ctm-red-to-blue,Fail
-kms_color@ctm-signed,Fail
-kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
-kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size,Fail
-kms_cursor_legacy@cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@cursor-vs-flip-toggle,Fail
-kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
-kms_cursor_legacy@flip-vs-cursor-atomic,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
-kms_cursor_legacy@flip-vs-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning-vs-hang,Fail
-kms_lease@lease-uevent,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@coverage-7efc,Fail
-kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@overlay,Fail
-kms_plane_cursor@viewport,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
deleted file mode 100644
index 38ec0305c1f4..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ /dev/null
@@ -1,132 +0,0 @@
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-atomic
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-legacy
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-before-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-msm/msm_shrink@copy-gpu-32
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-msm/msm_shrink@copy-gpu-oom-32
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-kms_cursor_legacy@short-flip-before-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-kms_cursor_legacy@flip-vs-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-msm/msm_shrink@copy-mmap-oom-8
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
-# Failure Rate: 50
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc2
-kms_lease@page-flip-implicit-plane
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
-# Failure Rate: 50
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc5
-kms_flip@flip-vs-expired-vblank
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
deleted file mode 100644
index 94783cafc21a..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-# Hangs machine
-kms_bw.*
-
-# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
-# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
-# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
-msm/msm_mapping@*
-
-# Skip driver specific tests
-^amdgpu.*
-nouveau_.*
-^panfrost.*
-^v3d.*
-^vc4.*
-^vmwgfx*
-
-# Skip intel specific tests
-gem_.*
-i915_.*
-tools_test.*
-
-# Currently fails and causes coverage loss for other tests
-# since core_getversion also fails.
-core_hotunplug.*
-
-# Whole machine hangs
-kms_cursor_crc.*
-
-# IGT test crash
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc2
-kms_content_protection@uevent
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
index 4892c0c70a6d..8d26b23133aa 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
@@ -7,9 +7,4 @@ kms_cursor_legacy@torture-bo,Fail
kms_cursor_legacy@torture-move,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
msm/msm_recovery@gpu-fault-parallel,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
index 329770c520d9..9450f2a002fd 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index 90282dfa19f4..61122ea7f008 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -1,24 +1,9 @@
+core_setmaster@master-drop-set-root,Crash
+core_setmaster@master-drop-set-shared-fd,Crash
core_setmaster@master-drop-set-user,Crash
+core_setmaster_vs_auth,Crash
dumb_buffer@create-clear,Crash
fbdev@pan,Crash
-kms_bw@linear-tiling-2-displays-1920x1080p,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Crash
-kms_cursor_crc@cursor-onscreen-32x32,Crash
-kms_cursor_crc@cursor-onscreen-64x64,Crash
-kms_cursor_crc@cursor-random-32x10,Crash
-kms_cursor_crc@cursor-sliding-32x10,Crash
-kms_cursor_crc@cursor-sliding-32x32,Crash
-kms_cursor_crc@cursor-sliding-64x21,Crash
-kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
-kms_cursor_legacy@cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-atomic,Crash
-kms_flip@flip-vs-panning-vs-hang,Crash
-kms_invalid_mode@int-max-clock,Crash
-kms_lease@invalid-create-leases,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Crash
-kms_plane@pixel-format,Crash
-kms_plane@pixel-format-source-clamping,Crash
+kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
kms_prop_blob@invalid-set-prop,Crash
-kms_properties@get_properties-sanity-atomic,Crash
-kms_properties@get_properties-sanity-non-atomic,Crash
-kms_rmfb@close-fd,Crash
+kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index eb16b29dee48..71418ea35a17 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 83a38853b4af..45dd8d493f6e 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -24,6 +24,7 @@ kms_cursor_crc@cursor-rapid-movement-32x32,Fail
kms_cursor_crc@cursor-rapid-movement-64x21,Fail
kms_cursor_crc@cursor-rapid-movement-64x64,Fail
kms_cursor_crc@cursor-size-change,Fail
+kms_cursor_crc@cursor-size-hints,Fail
kms_cursor_crc@cursor-sliding-32x10,Fail
kms_cursor_crc@cursor-sliding-32x32,Fail
kms_cursor_crc@cursor-sliding-64x21,Fail
@@ -32,6 +33,7 @@ kms_cursor_edge_walk@64x64-left-edge,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-toggle,Fail
kms_cursor_legacy@flip-vs-cursor-atomic,Fail
@@ -41,20 +43,18 @@ kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail
kms_flip@basic-flip-vs-wf_vblank,Fail
kms_flip@blocking-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning,Fail
kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
kms_flip@plain-flip-fb-recreate,Fail
kms_flip@plain-flip-fb-recreate-interruptible,Fail
kms_flip@plain-flip-ts-check,Fail
kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
@@ -64,14 +64,11 @@ kms_pipe_crc_basic@nonblocking-crc,Fail
kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
kms_pipe_crc_basic@read-crc,Fail
kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Crash
-kms_plane@pixel-format-source-clamping,Crash
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
kms_plane@plane-panning-bottom-right,Fail
kms_plane@plane-panning-top-left,Fail
kms_plane@plane-position-covered,Fail
kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_cursor@primary,Fail
-kms_plane_multiple@tiling-none,Fail
-kms_rmfb@close-fd,Fail
kms_universal_plane@universal-plane-functional,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 56f7d4f1ed15..b467991d4094 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -74,3 +74,73 @@ kms_bw@linear-tiling-2-displays-2160x1440p
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u
+# Failure Rate: 40
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-1-displays-2160x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u
+# Failure Rate: 80
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane_multiple@tiling-none
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-1-displays-1920x1080p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane@plane-position-hole-dpms
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-absolute-wf_vblank
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-blocking-wf-vblank
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u
+# Failure Rate: 60
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-2-displays-1920x1080p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@modeset-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/eece9a80-42f3-41f4-86cc-69d8a51b976a@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/63dfd5b7-8a54-44a3-9530-f8dcd77a21d1@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@linear-tiling-1-displays-3840x2160p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index eb16b29dee48..b83ec75161b2 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -14,7 +14,13 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index c72fee70e739..9749ddb75121 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -157,6 +157,7 @@ kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index 9c9e048725f8..28e37185bac0 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -19,7 +19,13 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
index 71c02104a683..6ebcc7d89fbd 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
@@ -1,12 +1,5 @@
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
kms_cursor_crc@cursor-rapid-movement-256x85,Fail
kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
kms_cursor_crc@cursor-rapid-movement-64x64,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
@@ -20,8 +13,9 @@ kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
kms_lease@lease-uevent,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
kms_writeback@writeback-check-output,Fail
kms_writeback@writeback-check-output-XRGB2101010,Fail
kms_writeback@writeback-fb-id,Fail
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index 62428f3c8f31..e3ca6da8cde7 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -88,3 +88,31 @@ kms_flip@flip-vs-expired-vblank
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_pipe_crc_basic@nonblocking-crc-frame-sequence
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/2364a6bf-e6bc-4741-8c78-cea8bdb06e03@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@modeset-vs-vblank-race
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/f7d72ed9-a783-46d7-b75d-54072bda32a3@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_pipe_crc_basic@suspend-read-crc
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/98d3ba54-bcb9-41ab-adb1-a18ba61ee2e4@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-panning-bottom-right-suspend
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/b58d15eb-094d-4ac2-aad3-83e518c2f55d@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_vblank@ts-continuation-dpms-suspend
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index b3d16e82e9a2..716d2d4e452d 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -1,8 +1,10 @@
# keeps printing vkms_vblank_simulate: vblank timer overrun and never ends
kms_invalid_mode@int-max-clock
+kms_invalid_mode@overflow-vrefresh
-# Kernel panic
-kms_cursor_crc@cursor-rapid-movement-32x10
+# kernel panic seen with kms_cursor_crc tests
+kms_cursor_crc.*
+# kms_cursor_crc@cursor-rapid-movement-32x10
# Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 0 PID: 2635 Comm: kworker/u8:13 Not tainted 6.9.0-rc7-g40935263a1fd #1
# Hardware name: ChromiumOS crosvm, BIOS 0
@@ -52,7 +54,7 @@ kms_cursor_crc@cursor-rapid-movement-32x10
# FS: 0000000000000000(0000) GS:ffffa2ad2bc00000(0000) knlGS:0000000000000000
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-kms_cursor_crc@cursor-rapid-movement-256x85
+#kms_cursor_crc@cursor-rapid-movement-256x85
# [drm:drm_crtc_add_crc_entry] *ERROR* Overflow of CRC buffer, userspace reads too slow.
# Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 1 PID: 10 Comm: kworker/u8:0 Not tainted 6.9.0-rc7-g646381cde463 #1
@@ -104,7 +106,7 @@ kms_cursor_crc@cursor-rapid-movement-256x85
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
# CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0
-kms_cursor_crc@cursor-onscreen-256x256
+#kms_cursor_crc@cursor-onscreen-256x256
# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 1 PID: 1913 Comm: kworker/u8:6 Not tainted 6.10.0-rc5-g8a28e73ebead #1
# Hardware name: ChromiumOS crosvm, BIOS 0
@@ -258,6 +260,535 @@ kms_cursor_edge_walk@128x128-left-edge
# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0
# vkms_vblank_simulate: vblank timer overrun
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-64x64
+# ------------[ cut here ]------------
+# WARNING: CPU: 1 PID: 1250 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 1 UID: 0 PID: 1250 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-ge95c88d68ac3 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf ee ec 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 95 ec 48 89 df 5b e9 50 05 95 ec 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffff9fb640aafb08 EFLAGS: 00010202
+# RAX: ffff8e7240859e05 RBX: ffff8e7241cd6400 RCX: ffffffffae496b65
+# RDX: ffffffffad2d1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff8e72590cc000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f0942ad56c0(0000) GS:ffff8e726bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f0942ad0008 CR3: 0000000118d1e000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f0943a84cdb
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007fff267d68d0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000000000000005a RCX: 00007f0943a84cdb
+# RDX: 00007fff267d6960 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007fff267d6960 R08: 0000000000000007 R09: 00005619a60f3450
+# R10: fe95a83851609dee R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 00005619a3cd2c68 R15: 00005619a608c830
+# </TASK>
+# irq event stamp: 57793
+# hardirqs last enabled at (57799): [<ffffffffacba3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (57804): [<ffffffffacba3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (45586): [<ffffffffacb17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (45569): [<ffffffffacb17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 0 UID: 0 PID: 119 Comm: kworker/u8:6 Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffff9fb640efbd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8e7241926000 RSI: ffff8e7241927ffc RDI: 000000000b7fb767
+# RBP: ffff9fb640efbde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8e7241b09a80 R11: 0000000000000000 R12: ffff8e7241cd6200
+# R13: 0000000000000013 R14: 0000000000000000 R15: ffff8e7241b09a80
+# FS: 0000000000000000(0000) GS:ffff8e726bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000118d1e000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffff9fb640efbd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8e7241926000 RSI: ffff8e7241927ffc RDI: 000000000b7fb767
+# RBP: ffff9fb640efbde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8e7241b09a80 R11: 0000000000000000 R12: ffff8e7241cd6200
+# R13: 0000000000000013 R14: 0000000000000000 R15: ffff8e7241b09a80
+# FS: 0000000000000000(0000) GS:ffff8e726bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000118d1e000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-128x42
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-32x32
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-32x32
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-128x128
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2898 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2898 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g1f006005ebf8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e e1 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 e1 48 89 df 5b e9 50 05 15 e1 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa35c4082fb08 EFLAGS: 00010202
+# RAX: ffff8b02c1c0f005 RBX: ffff8b02d4509600 RCX: ffffffffa2c96b65
+# RDX: ffffffffa1ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000030f11ddf R11: 00000000f30f11dd R12: ffff8b02d2528000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4b071ab6c0(0000) GS:ffff8b02efc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055f7f8384b48 CR3: 0000000104ef0000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4b0815ac5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffd726f75e0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000000000000005d RCX: 00007f4b0815ac5b
+# RDX: 00007ffd726f7670 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffd726f7670 R08: 0000000000000007 R09: 000055f7f83e30c0
+# R10: 2b16893c03bd381a R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055f7dbee72c8 R15: 000055f7f837bab0
+# </TASK>
+# irq event stamp: 58921
+# hardirqs last enabled at (58927): [<ffffffffa13a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58932): [<ffffffffa13a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (46140): [<ffffffffa1317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (46135): [<ffffffffa1317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 1657 Comm: kworker/u8:14 Tainted: G W 6.13.0-rc2-g1f006005ebf8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa35c40c43d20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8b02c52c8000 RSI: ffff8b02c52c9ffc RDI: 00000000fa4761c9
+# RBP: ffffa35c40c43de0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8b02c1c87840 R11: 0000000000000000 R12: ffff8b02d4508800
+# R13: 0000000000000021 R14: 0000000000000000 R15: ffff8b02c1c87840
+# FS: 0000000000000000(0000) GS:ffff8b02efd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000104ef0000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa35c40c43d20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8b02c52c8000 RSI: ffff8b02c52c9ffc RDI: 00000000fa4761c9
+# RBP: ffffa35c40c43de0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8b02c1c87840 R11: 0000000000000000 R12: ffff8b02d4508800
+# R13: 0000000000000021 R14: 0000000000000000 R15: ffff8b02c1c87840
+# FS: 0000000000000000(0000) GS:ffff8b02efd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+
# Skip driver specific tests
^amdgpu.*
^msm.*
@@ -272,3 +803,9 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index e17265039ca8..515aceac22b1 100644
--- a/drivers/gpu/drm/clients/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
@@ -10,8 +13,8 @@
static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
MODULE_PARM_DESC(active,
- "Choose which drm client to start, default is"
- CONFIG_DRM_CLIENT_DEFAULT "]");
+ "Choose which drm client to start, default is "
+ CONFIG_DRM_CLIENT_DEFAULT);
/**
* drm_client_setup() - Setup in-kernel DRM clients
@@ -31,6 +34,10 @@ MODULE_PARM_DESC(active,
*/
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_dbg(dev, "driver does not support mode-setting, skipping DRM clients\n");
+ return;
+ }
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (!strcmp(drm_client_default, "fbdev")) {
diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c
index f894ba52bdb5..28951e392482 100644
--- a/drivers/gpu/drm/clients/drm_fbdev_client.c
+++ b/drivers/gpu/drm/clients/drm_fbdev_client.c
@@ -13,22 +13,36 @@
* struct drm_client_funcs
*/
+static void drm_fbdev_client_free(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
+
static void drm_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
+ /*
+ * Fully probed framebuffer device
+ */
drm_fb_helper_unregister_info(fb_helper);
} else {
+ /*
+ * Partially initialized client, no framebuffer device yet
+ */
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
}
-static int drm_fbdev_client_restore(struct drm_client_dev *client)
+static int drm_fbdev_client_restore(struct drm_client_dev *client, bool force)
{
- drm_fb_helper_lastclose(client->dev);
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
return 0;
}
@@ -62,32 +76,27 @@ err_drm_err:
return ret;
}
-static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_fbdev_client_suspend(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (holds_console_lock)
- drm_fb_helper_set_suspend(fb_helper, true);
- else
- drm_fb_helper_set_suspend_unlocked(fb_helper, true);
+ drm_fb_helper_set_suspend_unlocked(fb_helper, true);
return 0;
}
-static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_fbdev_client_resume(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (holds_console_lock)
- drm_fb_helper_set_suspend(fb_helper, false);
- else
- drm_fb_helper_set_suspend_unlocked(fb_helper, false);
+ drm_fb_helper_set_suspend_unlocked(fb_helper, false);
return 0;
}
static const struct drm_client_funcs drm_fbdev_client_funcs = {
.owner = THIS_MODULE,
+ .free = drm_fbdev_client_free,
.unregister = drm_fbdev_client_unregister,
.restore = drm_fbdev_client_restore,
.hotplug = drm_fbdev_client_hotplug,
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
index 379850c83e51..4d3005273b27 100644
--- a/drivers/gpu/drm/clients/drm_log.c
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -100,7 +100,7 @@ static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line)
return;
iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]);
drm_client_buffer_vunmap_local(scanout->buffer);
- drm_client_framebuffer_flush(scanout->buffer, &r);
+ drm_client_buffer_flush(scanout->buffer, &r);
}
static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s,
@@ -133,7 +133,7 @@ static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s,
if (scanout->line >= scanout->rows)
scanout->line = 0;
drm_client_buffer_vunmap_local(scanout->buffer);
- drm_client_framebuffer_flush(scanout->buffer, &r);
+ drm_client_buffer_flush(scanout->buffer, &r);
}
static void drm_log_draw_new_line(struct drm_log_scanout *scanout,
@@ -204,7 +204,7 @@ static int drm_log_setup_modeset(struct drm_client_dev *client,
if (format == DRM_FORMAT_INVALID)
return -EINVAL;
- scanout->buffer = drm_client_framebuffer_create(client, width, height, format);
+ scanout->buffer = drm_client_buffer_create_dumb(client, width, height, format);
if (IS_ERR(scanout->buffer)) {
drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n",
width, height, &format);
@@ -272,7 +272,7 @@ static void drm_log_init_client(struct drm_log *dlog)
err_failed_commit:
for (i = 0; i < n_modeset; i++)
- drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+ drm_client_buffer_delete(dlog->scanout[i].buffer);
err_nomodeset:
kfree(dlog->scanout);
@@ -286,26 +286,45 @@ static void drm_log_free_scanout(struct drm_client_dev *client)
if (dlog->n_scanout) {
for (i = 0; i < dlog->n_scanout; i++)
- drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+ drm_client_buffer_delete(dlog->scanout[i].buffer);
dlog->n_scanout = 0;
kfree(dlog->scanout);
dlog->scanout = NULL;
}
}
-static void drm_log_client_unregister(struct drm_client_dev *client)
+static void drm_log_client_free(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
struct drm_device *dev = client->dev;
+ kfree(dlog);
+
+ drm_dbg(dev, "Unregistered with drm log\n");
+}
+
+static void drm_log_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
unregister_console(&dlog->con);
mutex_lock(&dlog->lock);
drm_log_free_scanout(client);
- drm_client_release(client);
mutex_unlock(&dlog->lock);
- kfree(dlog);
- drm_dbg(dev, "Unregistered with drm log\n");
+ drm_client_release(client);
+}
+
+static int drm_log_client_restore(struct drm_client_dev *client, bool force)
+{
+ int ret;
+
+ if (force)
+ ret = drm_client_modeset_commit_locked(client);
+ else
+ ret = drm_client_modeset_commit(client);
+
+ return ret;
}
static int drm_log_client_hotplug(struct drm_client_dev *client)
@@ -319,27 +338,29 @@ static int drm_log_client_hotplug(struct drm_client_dev *client)
return 0;
}
-static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock)
+static int drm_log_client_suspend(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
- console_stop(&dlog->con);
+ console_suspend(&dlog->con);
return 0;
}
-static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock)
+static int drm_log_client_resume(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
- console_start(&dlog->con);
+ console_resume(&dlog->con);
return 0;
}
static const struct drm_client_funcs drm_log_client_funcs = {
.owner = THIS_MODULE,
+ .free = drm_log_client_free,
.unregister = drm_log_client_unregister,
+ .restore = drm_log_client_restore,
.hotplug = drm_log_client_hotplug,
.suspend = drm_log_client_suspend,
.resume = drm_log_client_resume,
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 8d22b7627d41..df09cf9a8ca1 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -8,6 +8,7 @@ config DRM_DISPLAY_DP_AUX_BUS
config DRM_DISPLAY_HELPER
tristate
depends on DRM
+ select CEC_CORE if DRM_DISPLAY_DP_AUX_CEC || DRM_DISPLAY_HDMI_CEC_HELPER || CEC_NOTIFIER
help
DRM helpers for display adapters.
@@ -16,6 +17,7 @@ if DRM_DISPLAY_HELPER
config DRM_BRIDGE_CONNECTOR
bool
select DRM_DISPLAY_HDMI_AUDIO_HELPER
+ select DRM_DISPLAY_HDMI_CEC_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
@@ -23,7 +25,6 @@ config DRM_BRIDGE_CONNECTOR
config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
select DRM_DISPLAY_DP_HELPER
- select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
DisplayPort/USB-C to HDMI adapters.
@@ -82,6 +83,16 @@ config DRM_DISPLAY_HDMI_AUDIO_HELPER
DRM display helpers for HDMI Audio functionality (generic HDMI Codec
implementation).
+config DRM_DISPLAY_HDMI_CEC_HELPER
+ bool
+ help
+ DRM display helpers for HDMI CEC implementation.
+
+config DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER
+ def_bool CEC_NOTIFIER
+ help
+ DRM display helpers for HDMI CEC notifiers implementation.
+
config DRM_DISPLAY_HDMI_HELPER
bool
help
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index b17879b957d5..0ff4a1ad0222 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -16,6 +16,10 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
drm_hdmi_audio_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_HELPER) += \
+ drm_hdmi_cec_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER) += \
+ drm_hdmi_cec_notifier_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 32108307de66..a2d30cf9e06d 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -3,6 +3,7 @@
* Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -19,7 +20,9 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdcp_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -98,6 +101,28 @@ struct drm_bridge_connector {
* HDMI connector infrastructure, if any (see &DRM_BRIDGE_OP_HDMI).
*/
struct drm_bridge *bridge_hdmi;
+ /**
+ * @bridge_hdmi_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * HDMI Audio infrastructure, if any (see &DRM_BRIDGE_OP_HDMI_AUDIO).
+ */
+ struct drm_bridge *bridge_hdmi_audio;
+ /**
+ * @bridge_dp_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * DisplayPort Audio infrastructure, if any (see
+ * &DRM_BRIDGE_OP_DP_AUDIO).
+ */
+ struct drm_bridge *bridge_dp_audio;
+ /**
+ * @bridge_hdmi_cec:
+ *
+ * The bridge in the chain that implements CEC support, if any (see
+ * DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER).
+ */
+ struct drm_bridge *bridge_hdmi_cec;
};
#define to_drm_bridge_connector(x) \
@@ -112,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
- struct drm_bridge *bridge;
/* Notify all bridges in the pipeline of hotplug events. */
- drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) {
if (bridge->funcs->hpd_notify)
bridge->funcs->hpd_notify(bridge, status);
}
@@ -186,7 +210,7 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
if (detect) {
- status = detect->funcs->detect(detect);
+ status = detect->funcs->detect(detect, connector);
if (hdmi)
drm_atomic_helper_connector_hdmi_hotplug(connector, status);
@@ -327,7 +351,7 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
drm_bridge_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
@@ -433,14 +457,25 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
- if (!bridge->funcs->hdmi_audio_startup)
- return 0;
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(bridge, connector);
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
- return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ if (!bridge->funcs->dp_audio_startup)
+ return 0;
+
+ return bridge->funcs->dp_audio_startup(bridge, connector);
+ }
+
+ return -EINVAL;
}
static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
@@ -451,11 +486,19 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ return bridge->funcs->hdmi_audio_prepare(bridge, connector, fmt, hparms);
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
- return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ return bridge->funcs->dp_audio_prepare(bridge, connector, fmt, hparms);
+ }
+
+ return -EINVAL;
}
static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
@@ -464,11 +507,15 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+ bridge->funcs->hdmi_audio_shutdown(bridge, connector);
+ }
- bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+ bridge->funcs->dp_audio_shutdown(bridge, connector);
+ }
}
static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
@@ -478,15 +525,27 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
- if (bridge->funcs->hdmi_audio_mute_stream)
- return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
+ if (!bridge->funcs->hdmi_audio_mute_stream)
+ return -ENOTSUPP;
+
+ return bridge->funcs->hdmi_audio_mute_stream(bridge, connector,
enable, direction);
- else
- return -ENOTSUPP;
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ if (!bridge->funcs->dp_audio_mute_stream)
+ return -ENOTSUPP;
+
+ return bridge->funcs->dp_audio_mute_stream(bridge, connector,
+ enable, direction);
+ }
+
+ return -EINVAL;
}
static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
@@ -496,10 +555,83 @@ static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_aud
.mute_stream = drm_bridge_connector_audio_mute_stream,
};
+static int drm_bridge_connector_hdmi_cec_enable(struct drm_connector *connector, bool enable)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_enable(bridge, enable);
+}
+
+static int drm_bridge_connector_hdmi_cec_log_addr(struct drm_connector *connector, u8 logical_addr)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_log_addr(bridge, logical_addr);
+}
+
+static int drm_bridge_connector_hdmi_cec_transmit(struct drm_connector *connector,
+ u8 attempts,
+ u32 signal_free_time,
+ struct cec_msg *msg)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_transmit(bridge, attempts,
+ signal_free_time,
+ msg);
+}
+
+static int drm_bridge_connector_hdmi_cec_init(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ if (!bridge->funcs->hdmi_cec_init)
+ return 0;
+
+ return bridge->funcs->hdmi_cec_init(bridge, connector);
+}
+
+static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_funcs = {
+ .init = drm_bridge_connector_hdmi_cec_init,
+ .enable = drm_bridge_connector_hdmi_cec_enable,
+ .log_addr = drm_bridge_connector_hdmi_cec_log_addr,
+ .transmit = drm_bridge_connector_hdmi_cec_transmit,
+};
+
/* -----------------------------------------------------------------------------
* Bridge Connector Initialisation
*/
+static void drm_bridge_connector_put_bridges(struct drm_device *dev, void *data)
+{
+ struct drm_bridge_connector *bridge_connector = (struct drm_bridge_connector *)data;
+
+ drm_bridge_put(bridge_connector->bridge_edid);
+ drm_bridge_put(bridge_connector->bridge_hpd);
+ drm_bridge_put(bridge_connector->bridge_detect);
+ drm_bridge_put(bridge_connector->bridge_modes);
+ drm_bridge_put(bridge_connector->bridge_hdmi);
+ drm_bridge_put(bridge_connector->bridge_hdmi_audio);
+ drm_bridge_put(bridge_connector->bridge_dp_audio);
+ drm_bridge_put(bridge_connector->bridge_hdmi_cec);
+}
+
/**
* drm_bridge_connector_init - Initialise a connector for a chain of bridges
* @drm: the DRM device
@@ -520,9 +652,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_bridge_connector *bridge_connector;
struct drm_connector *connector;
struct i2c_adapter *ddc = NULL;
- struct drm_bridge *bridge, *panel_bridge = NULL;
+ struct drm_bridge *panel_bridge __free(drm_bridge_put) = NULL;
unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
unsigned int max_bpc = 8;
+ bool support_hdcp = false;
int connector_type;
int ret;
@@ -530,6 +663,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (!bridge_connector)
return ERR_PTR(-ENOMEM);
+ ret = drmm_add_action(drm, drm_bridge_connector_put_bridges, bridge_connector);
+ if (ret)
+ return ERR_PTR(ret);
+
bridge_connector->encoder = encoder;
/*
@@ -547,20 +684,28 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
* detection are available, we don't support hotplug detection at all.
*/
connector_type = DRM_MODE_CONNECTOR_Unknown;
- drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
if (!bridge->interlace_allowed)
connector->interlace_allowed = false;
if (!bridge->ycbcr_420_allowed)
connector->ycbcr_420_allowed = false;
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- bridge_connector->bridge_edid = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- bridge_connector->bridge_hpd = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- bridge_connector->bridge_detect = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- bridge_connector->bridge_modes = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_EDID) {
+ drm_bridge_put(bridge_connector->bridge_edid);
+ bridge_connector->bridge_edid = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_put(bridge_connector->bridge_hpd);
+ bridge_connector->bridge_hpd = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT) {
+ drm_bridge_put(bridge_connector->bridge_detect);
+ bridge_connector->bridge_detect = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_MODES) {
+ drm_bridge_put(bridge_connector->bridge_modes);
+ bridge_connector->bridge_modes = drm_bridge_get(bridge);
+ }
if (bridge->ops & DRM_BRIDGE_OP_HDMI) {
if (bridge_connector->bridge_hdmi)
return ERR_PTR(-EBUSY);
@@ -568,7 +713,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
!bridge->funcs->hdmi_clear_infoframe)
return ERR_PTR(-EINVAL);
- bridge_connector->bridge_hdmi = bridge;
+ bridge_connector->bridge_hdmi = drm_bridge_get(bridge);
if (bridge->supported_formats)
supported_formats = bridge->supported_formats;
@@ -576,27 +721,87 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc = bridge->max_bpc;
}
- if (!drm_bridge_get_next_bridge(bridge))
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) {
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_hdmi_audio = drm_bridge_get(bridge);
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) {
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->dp_audio_prepare ||
+ !bridge->funcs->dp_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_dp_audio = drm_bridge_get(bridge);
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge);
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge);
+
+ if (!bridge->funcs->hdmi_cec_enable ||
+ !bridge->funcs->hdmi_cec_log_addr ||
+ !bridge->funcs->hdmi_cec_transmit)
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (drm_bridge_is_last(bridge))
connector_type = bridge->type;
#ifdef CONFIG_OF
- if (!drm_bridge_get_next_bridge(bridge) &&
- bridge->of_node)
+ if (drm_bridge_is_last(bridge) && bridge->of_node)
connector->fwnode = fwnode_handle_get(of_fwnode_handle(bridge->of_node));
#endif
if (bridge->ddc)
ddc = bridge->ddc;
- if (drm_bridge_is_panel(bridge))
- panel_bridge = bridge;
+ if (drm_bridge_is_panel(bridge)) {
+ drm_bridge_put(panel_bridge);
+ panel_bridge = drm_bridge_get(bridge);
+ }
+
+ if (bridge->support_hdcp)
+ support_hdcp = true;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
if (bridge_connector->bridge_hdmi) {
- bridge = bridge_connector->bridge_hdmi;
+ if (!connector->ycbcr_420_allowed)
+ supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
@@ -608,22 +813,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc);
if (ret)
return ERR_PTR(ret);
-
- if (bridge->hdmi_audio_max_i2s_playback_channels ||
- bridge->hdmi_audio_spdif_playback) {
- if (!bridge->funcs->hdmi_audio_prepare ||
- !bridge->funcs->hdmi_audio_shutdown)
- return ERR_PTR(-EINVAL);
-
- ret = drm_connector_hdmi_audio_init(connector,
- bridge->hdmi_audio_dev,
- &drm_bridge_connector_hdmi_audio_funcs,
- bridge->hdmi_audio_max_i2s_playback_channels,
- bridge->hdmi_audio_spdif_playback,
- bridge->hdmi_audio_dai_port);
- if (ret)
- return ERR_PTR(ret);
- }
} else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
@@ -632,6 +821,52 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_audio ||
+ bridge_connector->bridge_dp_audio) {
+ struct device *dev;
+ struct drm_bridge *bridge;
+
+ if (bridge_connector->bridge_hdmi_audio)
+ bridge = bridge_connector->bridge_hdmi_audio;
+ else
+ bridge = bridge_connector->bridge_dp_audio;
+
+ dev = bridge->hdmi_audio_dev;
+
+ ret = drm_connector_hdmi_audio_init(connector, dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_i2s_formats,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec;
+
+ ret = drmm_connector_hdmi_cec_notifier_register(connector,
+ NULL,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec;
+
+ ret = drmm_connector_hdmi_cec_register(connector,
+ &drm_bridge_connector_hdmi_cec_funcs,
+ bridge->hdmi_cec_adapter_name,
+ bridge->hdmi_cec_available_las,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
@@ -643,6 +878,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (panel_bridge)
drm_panel_bridge_set_orientation(connector, panel_bridge);
+ if (support_hdcp && IS_REACHABLE(CONFIG_DRM_DISPLAY_HELPER) &&
+ IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER))
+ drm_connector_attach_content_protection_property(connector, true);
+
return connector;
}
EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index ec7eac6b595f..2d279e82922f 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -12,6 +12,7 @@
* to perform transactions on that bus.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -57,7 +58,7 @@ static int dp_aux_ep_probe(struct device *dev)
container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
int ret;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (ret)
return dev_err_probe(dev, ret, "Failed to attach to PM Domain\n");
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 007ceb281d00..436bfe9f9081 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -5,6 +5,7 @@
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -41,7 +42,7 @@
*
* https://hverkuil.home.xs4all.nl/cec-status.txt
*
- * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * Please mail me (hverkuil@kernel.org) if you find an adapter that works
* and is not yet listed there.
*
* Note that the current implementation does not support CEC over an MST hub.
@@ -96,7 +97,7 @@ static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
ssize_t err = 0;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
return (enable && err < 0) ? err : 0;
}
@@ -112,7 +113,7 @@ static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
mask[0] = la_mask & 0xff;
mask[1] = la_mask >> 8;
- err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
}
@@ -123,15 +124,14 @@ static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned int retries = min(5, attempts - 1);
ssize_t err;
- err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
- msg->msg, msg->len);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
if (err < 0)
return err;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
- (msg->len - 1) | (retries << 4) |
- DP_CEC_TX_MESSAGE_SEND);
- return err < 0 ? err : 0;
+ return drm_dp_dpcd_write_byte(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
}
static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
@@ -144,13 +144,13 @@ static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
return 0;
- err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
- if (err >= 0) {
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (!err) {
if (enable)
val |= DP_CEC_SNOOPING_ENABLE;
else
val &= ~DP_CEC_SNOOPING_ENABLE;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
}
return (enable && err < 0) ? err : 0;
}
@@ -194,7 +194,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
u8 rx_msg_info;
ssize_t err;
- err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
if (err < 0)
return err;
@@ -202,7 +202,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
return 0;
msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
- err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ err = drm_dp_dpcd_read_data(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
if (err < 0)
return err;
@@ -215,7 +215,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
struct cec_adapter *adap = aux->cec.adap;
u8 flags;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
return;
if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
@@ -230,7 +230,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
(DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES);
- drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+ drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
}
/**
@@ -253,13 +253,13 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux)
if (!aux->cec.adap)
goto unlock;
- ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
- &cec_irq);
+ ret = drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
goto unlock;
drm_dp_cec_handle_irq(aux);
- drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+ drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
unlock:
mutex_unlock(&aux->cec.lock);
}
@@ -269,7 +269,7 @@ static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
{
u8 cap = 0;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) < 0 ||
!(cap & DP_CEC_TUNNELING_CAPABLE))
return false;
if (cec_cap)
@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
if (!aux->transfer)
return;
-#ifndef CONFIG_MEDIA_CEC_RC
- /*
- * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
- * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
- *
- * Do this here as well to ensure the tests against cec_caps are
- * correct.
- */
- cec_caps &= ~CEC_CAP_RC;
-#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
- if (aux->cec.adap->capabilities == cec_caps &&
+ /* Check if the adapter properties have changed */
+ if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
+ (cec_caps & CEC_CAP_MONITOR_ALL) &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index c491e3203bf1..4c350c7f5144 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -486,16 +486,16 @@ EXPORT_SYMBOL(drm_lspcon_get_mode);
* @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: required mode of operation
+ * @time_out: LSPCON mode change settle timeout
*
* Returns:
* 0 on success, -error on failure/timeout
*/
int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
- enum drm_lspcon_mode mode)
+ enum drm_lspcon_mode mode, int time_out)
{
u8 data = 0;
int ret;
- int time_out = 200;
enum drm_lspcon_mode current_mode;
if (mode == DRM_LSPCON_MODE_PCON)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index da3c8521a7fa..f9fdf19de74a 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -24,10 +24,12 @@
#include <linux/delay.h>
#include <linux/dynamic_debug.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -122,6 +124,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS;
+}
+EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress);
+
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
@@ -327,7 +337,7 @@ static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SI
if (offset < DP_RECEIVER_CAP_SIZE) {
rd_interval = dpcd[offset];
} else {
- if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, offset, &rd_interval) < 0) {
drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* arbitrary default delay */
@@ -358,7 +368,7 @@ int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
int unit;
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) < 0) {
drm_err(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* default to max */
@@ -459,6 +469,64 @@ void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
+/**
+ * drm_dp_lttpr_wake_timeout_setup() - Grant extended time for sink to wake up
+ * @aux: The DP AUX channel to use
+ * @transparent_mode: This is true if lttpr is in transparent mode
+ *
+ * This function checks if the sink needs any extended wake time, if it does
+ * it grants this request. Post this setup the source device can keep trying
+ * the Aux transaction till the granted wake timeout.
+ * If this function is not called all Aux transactions are expected to take
+ * a default of 1ms before they throw an error.
+ */
+void drm_dp_lttpr_wake_timeout_setup(struct drm_dp_aux *aux, bool transparent_mode)
+{
+ u8 val = 1;
+ int ret;
+
+ if (transparent_mode) {
+ static const u8 timeout_mapping[] = {
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_1_MS] = 1,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_20_MS] = 20,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_40_MS] = 40,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_60_MS] = 60,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_80_MS] = 80,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_100_MS] = 100,
+ };
+
+ ret = drm_dp_dpcd_readb(aux, DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST, &val);
+ if (ret != 1) {
+ drm_dbg_kms(aux->drm_dev,
+ "Failed to read Extended sleep wake timeout request\n");
+ return;
+ }
+
+ val = (val < sizeof(timeout_mapping) && timeout_mapping[val]) ?
+ timeout_mapping[val] : 1;
+
+ if (val > 1)
+ drm_dp_dpcd_writeb(aux,
+ DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_GRANT,
+ DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_GRANTED);
+ } else {
+ ret = drm_dp_dpcd_readb(aux, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &val);
+ if (ret != 1) {
+ drm_dbg_kms(aux->drm_dev,
+ "Failed to read Extended sleep wake timeout request\n");
+ return;
+ }
+
+ val = (val & DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK) ?
+ (val & DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK) * 10 : 1;
+
+ if (val > 1)
+ drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT,
+ DP_EXTENDED_WAKE_TIMEOUT_GRANT);
+ }
+}
+EXPORT_SYMBOL(drm_dp_lttpr_wake_timeout_setup);
+
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
@@ -634,6 +702,34 @@ void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
/**
+ * drm_dp_dpcd_set_probe() - Set whether a probing before DPCD access is done
+ * @aux: DisplayPort AUX channel
+ * @enable: Enable the probing if required
+ */
+void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable)
+{
+ WRITE_ONCE(aux->dpcd_probe_disabled, !enable);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_set_probe);
+
+static bool dpcd_access_needs_probe(struct drm_dp_aux *aux)
+{
+ /*
+ * HP ZR24w corrupts the first DPCD access after entering power save
+ * mode. Eg. on a read, the entire buffer will be filled with the same
+ * byte. Do a throw away read to avoid corrupting anything we care
+ * about. Afterwards things will work correctly until the monitor
+ * gets woken up and subsequently re-enters power save mode.
+ *
+ * The user pressing any button on the monitor is enough to wake it
+ * up, so there is no particularly good place to do the workaround.
+ * We just have to do it before any DPCD access and hope that the
+ * monitor doesn't power down exactly after the throw away read.
+ */
+ return !aux->is_remote && !READ_ONCE(aux->dpcd_probe_disabled);
+}
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -646,26 +742,16 @@ EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_read_data() instead.
*/
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
int ret;
- /*
- * HP ZR24w corrupts the first DPCD access after entering power save
- * mode. Eg. on a read, the entire buffer will be filled with the same
- * byte. Do a throw away read to avoid corrupting anything we care
- * about. Afterwards things will work correctly until the monitor
- * gets woken up and subsequently re-enters power save mode.
- *
- * The user pressing any button on the monitor is enough to wake it
- * up, so there is no particularly good place to do the workaround.
- * We just have to do it before any DPCD access and hope that the
- * monitor doesn't power down exactly after the throw away read.
- */
- if (!aux->is_remote) {
- ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
+ if (dpcd_access_needs_probe(aux)) {
+ ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET);
if (ret < 0)
return ret;
}
@@ -694,6 +780,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_write_data() instead.
*/
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -716,14 +804,13 @@ EXPORT_SYMBOL(drm_dp_dpcd_write);
* @aux: DisplayPort AUX channel
* @status: buffer to store the link status in (must be at least 6 bytes)
*
- * Returns the number of bytes transferred on success or a negative error
- * code on failure.
+ * Returns a negative error code on failure or 0 on success.
*/
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE])
{
- return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
- DP_LINK_STATUS_SIZE);
+ return drm_dp_dpcd_read_data(aux, DP_LANE0_1_STATUS, status,
+ DP_LINK_STATUS_SIZE);
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
@@ -746,30 +833,20 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
{
int ret;
- if (dp_phy == DP_PHY_DPRX) {
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE);
-
- if (ret < 0)
- return ret;
-
- WARN_ON(ret != DP_LINK_STATUS_SIZE);
-
- return 0;
- }
+ if (dp_phy == DP_PHY_DPRX)
+ return drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
- link_status,
- DP_LINK_STATUS_SIZE - 1);
+ ret = drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
+ link_status,
+ DP_LINK_STATUS_SIZE - 1);
if (ret < 0)
return ret;
- WARN_ON(ret != DP_LINK_STATUS_SIZE - 1);
-
/* Convert the LTTPR to the sink PHY link status layout */
memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1],
&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS],
@@ -780,12 +857,81 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_down);
+
static int read_payload_update_status(struct drm_dp_aux *aux)
{
int ret;
u8 status;
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0)
return ret;
@@ -812,21 +958,21 @@ int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
int ret;
int retries = 0;
- drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
+ drm_dp_dpcd_write_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = vcpid;
payload_alloc[1] = start_time_slot;
payload_alloc[2] = time_slot_count;
- ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
+ ret = drm_dp_dpcd_write_data(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
goto fail;
}
retry:
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
goto fail;
@@ -982,15 +1128,15 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
{
u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
- if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
- if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_TEST_REQUEST, &link_edid_read) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_TEST_REQUEST);
return false;
@@ -1003,23 +1149,23 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
return false;
}
- if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
/* send back checksum for the last edid extension block data */
- if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
- &real_edid_checksum, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_EDID_CHECKSUM,
+ real_edid_checksum) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_EDID_CHECKSUM);
return false;
}
test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
- if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_RESPONSE, test_resp) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_RESPONSE);
return false;
@@ -1056,12 +1202,10 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
- ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext,
- sizeof(dpcd_ext));
+ ret = drm_dp_dpcd_read_data(aux, DP_DP13_DPCD_REV, &dpcd_ext,
+ sizeof(dpcd_ext));
if (ret < 0)
return ret;
- if (ret != sizeof(dpcd_ext))
- return -EIO;
if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
drm_dbg_kms(aux->drm_dev,
@@ -1098,10 +1242,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
{
int ret;
- ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
- if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0)
+ if (dpcd[DP_DPCD_REV] == 0)
return -EIO;
ret = drm_dp_read_extended_dpcd_caps(aux, dpcd);
@@ -1151,11 +1295,9 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;
- ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
+ ret = drm_dp_dpcd_read_data(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
if (ret < 0)
return ret;
- if (ret != len)
- return -EIO;
drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports);
@@ -1512,7 +1654,7 @@ EXPORT_SYMBOL(drm_dp_downstream_mode);
*/
int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
{
- return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+ return drm_dp_dpcd_read_data(aux, DP_BRANCH_ID, id, 6);
}
EXPORT_SYMBOL(drm_dp_downstream_id);
@@ -1577,13 +1719,13 @@ void drm_dp_downstream_debug(struct seq_file *m,
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+ if (!len)
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_SW_REV, rev, 2);
+ if (!len)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
@@ -1721,11 +1863,9 @@ int drm_dp_read_sink_count(struct drm_dp_aux *aux)
u8 count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count);
+ ret = drm_dp_dpcd_read_byte(aux, DP_SINK_COUNT, &count);
if (ret < 0)
return ret;
- if (ret != 1)
- return -EIO;
return DP_GET_SINK_COUNT(count);
}
@@ -2023,14 +2163,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
- drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
- /* Send a bare address packet to start the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.buffer = NULL;
- msg.size = 0;
- err = drm_dp_i2c_do_msg(aux, &msg);
+
+ if (!aux->no_zero_sized) {
+ drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+ /* Send a bare address packet to start the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.buffer = NULL;
+ msg.size = 0;
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ }
/*
* Reset msg.request in case in case it got
@@ -2049,6 +2192,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.buffer = msgs[i].buf + j;
msg.size = min(transfer_size, msgs[i].len - j);
+ if (j + msg.size == msgs[i].len && aux->no_zero_sized)
+ msg.request &= ~DP_AUX_I2C_MOT;
err = drm_dp_i2c_drain_msg(aux, &msg);
/*
@@ -2066,15 +2211,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
if (err >= 0)
err = num;
- /* Send a bare address packet to close out the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.request &= ~DP_AUX_I2C_MOT;
- msg.buffer = NULL;
- msg.size = 0;
- (void)drm_dp_i2c_do_msg(aux, &msg);
+ if (!aux->no_zero_sized) {
+ /* Send a bare address packet to close out the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.request &= ~DP_AUX_I2C_MOT;
+ msg.buffer = NULL;
+ msg.size = 0;
+ (void)drm_dp_i2c_do_msg(aux, &msg);
+ }
return err;
}
@@ -2114,13 +2261,13 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
u8 buf, count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
WARN_ON(!(buf & DP_TEST_SINK_START));
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK_MISC, &buf);
if (ret < 0)
return ret;
@@ -2134,11 +2281,7 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
* At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes
* per component (RGB or CrYCb).
*/
- ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_read_data(aux, DP_TEST_CRC_R_CR, crc, 6);
}
static void drm_dp_aux_crc_work(struct work_struct *work)
@@ -2337,11 +2480,11 @@ int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2364,11 +2507,11 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2409,6 +2552,10 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
+ /* Synaptics Panamera supports only a compressed bpp of 12 above 50% of its max DSC pixel throughput */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x22), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x31), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x33), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
};
#undef OUI
@@ -2454,11 +2601,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
struct drm_dp_dpcd_ident *ident)
{
- int ret;
-
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
-
- return ret < 0 ? ret : 0;
+ return drm_dp_dpcd_read_data(aux, offset, ident, sizeof(*ident));
}
static void drm_dp_dump_desc(struct drm_dp_aux *aux,
@@ -2544,7 +2687,7 @@ u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
- switch (bpp_increment_dpcd) {
+ switch (bpp_increment_dpcd & DP_DSC_BITS_PER_PIXEL_MASK) {
case DP_DSC_BITS_PER_PIXEL_1_16:
return 16;
case DP_DSC_BITS_PER_PIXEL_1_8:
@@ -2702,6 +2845,158 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
}
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
+/*
+ * See DP Standard v2.1a 2.8.4 Minimum Slices/Display, Table 2-159 and
+ * Appendix L.1 Derivation of Slice Count Requirements.
+ */
+static int dsc_sink_min_slice_throughput(int peak_pixel_rate)
+{
+ if (peak_pixel_rate >= 4800000)
+ return 600000;
+ else if (peak_pixel_rate >= 2700000)
+ return 400000;
+ else
+ return 340000;
+}
+
+/**
+ * drm_dp_dsc_sink_max_slice_throughput() - Get a DSC sink's maximum pixel throughput per slice
+ * @dsc_dpcd: DSC sink's capabilities from DPCD
+ * @peak_pixel_rate: Cumulative peak pixel rate in kHz
+ * @is_rgb_yuv444: The mode is either RGB or YUV444
+ *
+ * Return the DSC sink device's maximum pixel throughput per slice, based on
+ * the device's @dsc_dpcd capabilities, the @peak_pixel_rate of the transferred
+ * stream(s) and whether the output format @is_rgb_yuv444 or yuv422/yuv420.
+ *
+ * Note that @peak_pixel_rate is the total pixel rate transferred to the same
+ * DSC/display sink. For instance to calculate a tile's slice count of an MST
+ * multi-tiled display sink (not considering here the required
+ * rounding/alignment of slice count)::
+ *
+ * @peak_pixel_rate = tile_pixel_rate * tile_count
+ * total_slice_count = @peak_pixel_rate / drm_dp_dsc_sink_max_slice_throughput(@peak_pixel_rate)
+ * tile_slice_count = total_slice_count / tile_count
+ *
+ * Returns:
+ * The maximum pixel throughput per slice supported by the DSC sink device
+ * in kPixels/sec.
+ */
+int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ int peak_pixel_rate, bool is_rgb_yuv444)
+{
+ int throughput;
+ int delta = 0;
+ int base;
+
+ throughput = dsc_dpcd[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT];
+
+ if (is_rgb_yuv444) {
+ throughput = (throughput & DP_DSC_THROUGHPUT_MODE_0_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_0_SHIFT;
+
+ delta = ((dsc_dpcd[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT]) &
+ DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT; /* in units of 2 MPixels/sec */
+ delta *= 2000;
+ } else {
+ throughput = (throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_1_SHIFT;
+ }
+
+ switch (throughput) {
+ case 0:
+ return dsc_sink_min_slice_throughput(peak_pixel_rate);
+ case 1:
+ base = 340000;
+ break;
+ case 2 ... 14:
+ base = 400000 + 50000 * (throughput - 2);
+ break;
+ case 15:
+ base = 170000;
+ break;
+ }
+
+ return base + delta;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_throughput);
+
+static u8 dsc_branch_dpcd_cap(const u8 dpcd[DP_DSC_BRANCH_CAP_SIZE], int reg)
+{
+ return dpcd[reg - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
+}
+
+/**
+ * drm_dp_dsc_branch_max_overall_throughput() - Branch device's max overall DSC pixel throughput
+ * @dsc_branch_dpcd: DSC branch capabilities from DPCD
+ * @is_rgb_yuv444: The mode is either RGB or YUV444
+ *
+ * Return the branch device's maximum overall DSC pixel throughput, based on
+ * the device's DPCD DSC branch capabilities, and whether the output
+ * format @is_rgb_yuv444 or yuv422/yuv420.
+ *
+ * Returns:
+ * - 0: The maximum overall throughput capability is not indicated by
+ * the device separately and it must be determined from the per-slice
+ * max throughput (see @drm_dp_dsc_branch_slice_max_throughput())
+ * and the maximum slice count supported by the device.
+ * - > 0: The maximum overall DSC pixel throughput supported by the branch
+ * device in kPixels/sec.
+ */
+int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE],
+ bool is_rgb_yuv444)
+{
+ int throughput;
+
+ if (is_rgb_yuv444)
+ throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0);
+ else
+ throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_1);
+
+ switch (throughput) {
+ case 0:
+ return 0;
+ case 1:
+ return 680000;
+ default:
+ return 600000 + 50000 * throughput;
+ }
+}
+EXPORT_SYMBOL(drm_dp_dsc_branch_max_overall_throughput);
+
+/**
+ * drm_dp_dsc_branch_max_line_width() - Branch device's max DSC line width
+ * @dsc_branch_dpcd: DSC branch capabilities from DPCD
+ *
+ * Return the branch device's maximum overall DSC line width, based on
+ * the device's @dsc_branch_dpcd capabilities.
+ *
+ * Returns:
+ * - 0: The maximum line width is not indicated by the device
+ * separately and it must be determined from the maximum
+ * slice count and slice-width supported by the device.
+ * - %-EINVAL: The device indicates an invalid maximum line width
+ * (< 5120 pixels).
+ * - >= 5120: The maximum line width in pixels.
+ */
+int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE])
+{
+ int line_width = dsc_branch_dpcd_cap(dsc_branch_dpcd, DP_DSC_BRANCH_MAX_LINE_WIDTH);
+
+ switch (line_width) {
+ case 0:
+ return 0;
+ case 1 ... 15:
+ return -EINVAL;
+ default:
+ return line_width * 320;
+ }
+}
+EXPORT_SYMBOL(drm_dp_dsc_branch_max_line_width);
+
static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE], int address,
u8 *buf, int buf_size)
@@ -2716,13 +3011,11 @@ static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
int ret;
for (offset = 0; offset < buf_size; offset += block_size) {
- ret = drm_dp_dpcd_read(aux,
- address + offset,
- &buf[offset], block_size);
+ ret = drm_dp_dpcd_read_data(aux,
+ address + offset,
+ &buf[offset], block_size);
if (ret < 0)
return ret;
-
- WARN_ON(ret != block_size);
}
return 0;
@@ -2818,6 +3111,67 @@ int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
EXPORT_SYMBOL(drm_dp_lttpr_max_link_rate);
/**
+ * drm_dp_lttpr_set_transparent_mode() - set the LTTPR in transparent mode
+ * @aux: DisplayPort AUX channel
+ * @enable: Enable or disable transparent mode
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_lttpr_set_transparent_mode(struct drm_dp_aux *aux, bool enable)
+{
+ u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
+ DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+ int ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE, val);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret == 1) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(drm_dp_lttpr_set_transparent_mode);
+
+/**
+ * drm_dp_lttpr_init() - init LTTPR transparency mode according to DP standard
+ * @aux: DisplayPort AUX channel
+ * @lttpr_count: Number of LTTPRs. Between 0 and 8, according to DP standard.
+ * Negative error code for any non-valid number.
+ * See drm_dp_lttpr_count().
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_lttpr_init(struct drm_dp_aux *aux, int lttpr_count)
+{
+ int ret;
+
+ if (!lttpr_count)
+ return 0;
+
+ /*
+ * See DP Standard v2.0 3.6.6.1 about the explicit disabling of
+ * non-transparent mode and the disable->enable non-transparent mode
+ * sequence.
+ */
+ ret = drm_dp_lttpr_set_transparent_mode(aux, true);
+ if (ret)
+ return ret;
+
+ if (lttpr_count < 0)
+ return -ENODEV;
+
+ if (drm_dp_lttpr_set_transparent_mode(aux, false)) {
+ /*
+ * Roll-back to transparent mode if setting non-transparent
+ * mode has failed
+ */
+ drm_dp_lttpr_set_transparent_mode(aux, true);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_lttpr_init);
+
+/**
* drm_dp_lttpr_max_lane_count - get the maximum lane count supported by all LTTPRs
* @caps: LTTPR common capabilities
*
@@ -2876,12 +3230,12 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
int err;
u8 rate, lanes;
- err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LINK_RATE, &rate);
if (err < 0)
return err;
data->link_rate = drm_dp_bw_code_to_link_rate(rate);
- err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LANE_COUNT, &lanes);
if (err < 0)
return err;
data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
@@ -2889,22 +3243,22 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
if (lanes & DP_ENHANCED_FRAME_CAP)
data->enhanced_frame_cap = true;
- err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ err = drm_dp_dpcd_read_byte(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
if (err < 0)
return err;
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- &data->custom80, sizeof(data->custom80));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
if (err < 0)
return err;
break;
case DP_PHY_TEST_PATTERN_CP2520:
- err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
- &data->hbr2_reset,
- sizeof(data->hbr2_reset));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
if (err < 0)
return err;
}
@@ -2931,15 +3285,15 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
DP_LINK_QUAL_PATTERN_11_MASK;
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
if (err < 0)
return err;
} else {
for (i = 0; i < data->num_lanes; i++) {
- err = drm_dp_dpcd_writeb(aux,
- DP_LINK_QUAL_LANE0_SET + i,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
if (err < 0)
return err;
}
@@ -3146,8 +3500,8 @@ bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_C
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
- &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev,
"Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
return false;
@@ -3171,7 +3525,7 @@ bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
return false;
}
@@ -3302,16 +3656,13 @@ EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
*/
int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
{
- int ret;
u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
DP_PCON_ENABLE_LINK_FRL_MODE;
if (enable_frl_ready_hpd)
buf |= DP_PCON_ENABLE_HPD_READY;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
-
- return ret;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
@@ -3326,7 +3677,7 @@ bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3355,7 +3706,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
@@ -3390,11 +3741,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
return -EINVAL;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
@@ -3420,7 +3767,7 @@ int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
else
buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
if (ret < 0)
return ret;
@@ -3436,13 +3783,7 @@ EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
*/
int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
}
EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
@@ -3457,7 +3798,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
int ret;
u8 buf = 0;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
@@ -3466,11 +3807,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
return -EINVAL;
}
buf |= DP_PCON_ENABLE_HDMI_LINK;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
@@ -3485,7 +3822,7 @@ bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3510,7 +3847,7 @@ int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
int mode;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
if (ret < 0)
return ret;
@@ -3539,7 +3876,7 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < hdmi->max_lanes; i++) {
- if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
return;
error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
@@ -3674,7 +4011,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3685,11 +4022,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
buf |= pps_buf_config << 2;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
/**
@@ -3701,13 +4034,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
*/
int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_default);
@@ -3723,15 +4050,11 @@ int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
@@ -3748,21 +4071,17 @@ int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
- if (ret < 0)
- return ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
if (ret < 0)
return ret;
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
@@ -3778,7 +4097,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3787,11 +4106,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
else
buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
@@ -3807,28 +4122,38 @@ EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
* Returns: %0 on success, negative error code on failure
*/
int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level)
+ u32 level)
{
int ret;
- u8 buf[2] = { 0 };
+ unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB;
+ u8 buf[3] = { 0 };
+ size_t len = 2;
/* The panel uses the PWM for controlling brightness levels */
- if (!bl->aux_set)
+ if (!(bl->aux_set || bl->luminance_set))
return 0;
- if (bl->lsb_reg_used) {
+ if (bl->luminance_set) {
+ level = level * 1000;
+ level &= 0xffffff;
+ buf[0] = (level & 0x0000ff);
+ buf[1] = (level & 0x00ff00) >> 8;
+ buf[2] = (level & 0xff0000) >> 16;
+ offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE;
+ len = 3;
+ } else if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
} else {
buf[0] = level;
}
- ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_write_data(aux, offset, buf, len);
+ if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -3846,22 +4171,22 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
if (!bl->aux_enable)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (enable)
buf |= DP_EDP_BACKLIGHT_ENABLE;
else
buf &= ~DP_EDP_BACKLIGHT_ENABLE;
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -3886,7 +4211,7 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
* Returns: %0 on success, negative error code on failure.
*/
int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- const u16 level)
+ const u32 level)
{
int ret;
u8 dpcd_buf;
@@ -3896,16 +4221,20 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
else
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
+ if (bl->luminance_set)
+ dpcd_buf |= DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+
if (bl->pwmgen_bit_count) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
}
if (bl->pwm_freq_pre_divider) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_FREQ_SET,
+ bl->pwm_freq_pre_divider);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev,
"%s: Failed to write aux backlight frequency: %d\n",
aux->name, ret);
@@ -3913,8 +4242,8 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -3964,22 +4293,61 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
{
int fxp, fxp_min, fxp_max, fxp_actual, f = 1;
int ret;
- u8 pn, pn_min, pn_max;
+ u8 pn, pn_min, pn_max, bit_count;
if (!bl->aux_set)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &bit_count);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
}
- pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ bit_count &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
+ aux->name, ret);
+ return -ENODEV;
+ }
+ pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
+ aux->name, ret);
+ return -ENODEV;
+ }
+ pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ if (unlikely(pn_min > pn_max)) {
+ drm_dbg_kms(aux->drm_dev, "%s: Invalid pwmgen bit count cap min/max returned: %d %d\n",
+ aux->name, pn_min, pn_max);
+ return -EINVAL;
+ }
+
+ /*
+ * Per VESA eDP Spec v1.4b, section 3.3.10.2:
+ * If DP_EDP_PWMGEN_BIT_COUNT is less than DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN,
+ * the sink must use the MIN value as the effective PWM bit count.
+ * Clamp the reported value to the [MIN, MAX] capability range to ensure
+ * correct brightness scaling on compliant eDP panels.
+ * Only enable this logic if the [MIN, MAX] range is valid in regard to Spec.
+ */
+ pn = bit_count;
+ if (bit_count < pn_min)
+ pn = clamp(bit_count, pn_min, pn_max);
+
bl->max = (1 << pn) - 1;
- if (!driver_pwm_freq_hz)
+ if (!driver_pwm_freq_hz) {
+ if (pn != bit_count)
+ goto bit_count_write_back;
+
return 0;
+ }
/*
* Set PWM Frequency divider to match desired frequency provided by the driver.
@@ -4003,21 +4371,6 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
- if (ret != 1) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
- aux->name, ret);
- return 0;
- }
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
- if (ret != 1) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
- aux->name, ret);
- return 0;
- }
- pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
- pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
-
/* Ensure frequency is within 25% of desired value */
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
@@ -4035,12 +4388,17 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
break;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
- if (ret != 1) {
+bit_count_write_back:
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
}
+
+ if (!driver_pwm_freq_hz)
+ return 0;
+
bl->pwmgen_bit_count = pn;
bl->max = (1 << pn) - 1;
@@ -4058,11 +4416,11 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
u8 *current_mode)
{
int ret;
- u8 buf[2];
+ u8 buf[3];
u8 mode_reg;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4075,17 +4433,37 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
- ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
- if (ret != size) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
- aux->name, ret);
- return ret < 0 ? ret : -EIO;
- }
+ if (bl->luminance_set) {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ buf, sizeof(buf));
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Failed to read backlight level: %d\n",
+ aux->name, ret);
+ return ret;
+ }
- if (bl->lsb_reg_used)
- return (buf[0] << 8) | buf[1];
- else
- return buf[0];
+ /*
+ * Incase luminance is set we want to send the value back in nits but
+ * since DP_EDP_PANEL_TARGET_LUMINANCE stores values in millinits we
+ * need to divide by 1000.
+ */
+ return (buf[0] | buf[1] << 8 | buf[2] << 16) / 1000;
+ } else {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ buf, size);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Failed to read backlight level: %d\n",
+ aux->name, ret);
+ return ret;
+ }
+
+ if (bl->lsb_reg_used)
+ return (buf[0] << 8) | buf[1];
+ else
+ return buf[0];
+ }
}
/*
@@ -4100,10 +4478,12 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
* interface.
* @aux: The DP aux device to use for probing
* @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight
+ * @max_luminance: max luminance when need luminance is set as true
* @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz
* @edp_dpcd: A cached copy of the eDP DPCD
* @current_level: Where to store the probed brightness level, if any
* @current_mode: Where to store the currently set backlight control mode
+ * @need_luminance: Tells us if a we want to manipulate backlight using luminance values
*
* Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities,
* along with also probing the current and maximum supported brightness levels.
@@ -4115,8 +4495,9 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
*/
int
drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u32 max_luminance,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
- u16 *current_level, u8 *current_mode)
+ u32 *current_level, u8 *current_mode, bool need_luminance)
{
int ret;
@@ -4126,18 +4507,26 @@ drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl
bl->aux_set = true;
if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
bl->lsb_reg_used = true;
+ if ((edp_dpcd[0] & DP_EDP_15) && edp_dpcd[3] &
+ (DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) && need_luminance)
+ bl->luminance_set = true;
/* Sanity check caps */
- if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
+ if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP) &&
+ !bl->luminance_set) {
drm_dbg_kms(aux->drm_dev,
- "%s: Panel supports neither AUX or PWM brightness control? Aborting\n",
+ "%s: Panel does not support AUX, PWM or luminance-based brightness control. Aborting\n",
aux->name);
return -EINVAL;
}
- ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
- if (ret < 0)
- return ret;
+ if (bl->luminance_set) {
+ bl->max = max_luminance;
+ } else {
+ ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
+ if (ret < 0)
+ return ret;
+ }
ret = drm_edp_backlight_probe_state(aux, bl, current_mode);
if (ret < 0)
@@ -4216,7 +4605,7 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
{
struct dp_aux_backlight *bl;
struct backlight_properties props = { 0 };
- u16 current_level;
+ u32 current_level;
u8 current_mode;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
int ret;
@@ -4224,8 +4613,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
if (!panel || !panel->dev || !aux)
return -EINVAL;
- ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd,
- EDP_DISPLAY_CTL_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_DPCD_REV, edp_dpcd,
+ EDP_DISPLAY_CTL_CAP_SIZE);
if (ret < 0)
return ret;
@@ -4240,8 +4629,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
bl->aux = aux;
- ret = drm_edp_backlight_init(aux, &bl->info, 0, edp_dpcd,
- &current_level, &current_mode);
+ ret = drm_edp_backlight_init(aux, &bl->info, 0, 0, edp_dpcd,
+ &current_level, &current_mode, false);
if (ret < 0)
return ret;
@@ -4266,8 +4655,9 @@ EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
#endif
/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */
-static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
- int symbol_size, bool is_mst)
+static int drm_dp_link_data_symbol_cycles(int lane_count, int pixels,
+ int bpp_x16, int symbol_size,
+ bool is_mst)
{
int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count);
int align = is_mst ? 4 / lane_count : 1;
@@ -4275,22 +4665,42 @@ static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
return ALIGN(cycles, align);
}
-static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count,
- int bpp_x16, int symbol_size, bool is_mst)
+/**
+ * drm_dp_link_symbol_cycles - calculate the link symbol count with/without dsc
+ * @lane_count: DP link lane count
+ * @pixels: number of pixels in a scanline
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
+ * @bpp_x16: bits per pixel in .4 binary fixed format
+ * @symbol_size: DP symbol size
+ * @is_mst: %true for MST and %false for SST
+ *
+ * Calculate the link symbol cycles for both DSC (@dsc_slice_count !=0) and
+ * non-DSC case (@dsc_slice_count == 0) and return the count.
+ */
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst)
{
+ int slice_count = dsc_slice_count ? : 1;
int slice_pixels = DIV_ROUND_UP(pixels, slice_count);
- int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels,
- bpp_x16, symbol_size, is_mst);
- int slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
+ int slice_data_cycles = drm_dp_link_data_symbol_cycles(lane_count,
+ slice_pixels,
+ bpp_x16,
+ symbol_size,
+ is_mst);
+ int slice_eoc_cycles = 0;
+
+ if (dsc_slice_count)
+ slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
return slice_count * (slice_data_cycles + slice_eoc_cycles);
}
+EXPORT_SYMBOL(drm_dp_link_symbol_cycles);
/**
* drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream
* @lane_count: DP link lane count
* @hactive: pixel count of the active period in one scanline of the stream
- * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
* @bpp_x16: bits per pixel in .4 binary fixed point
* @flags: DRM_DP_OVERHEAD_x flags
*
@@ -4304,7 +4714,7 @@ static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_c
* as well as the stream's
* - @hactive timing
* - @bpp_x16 color depth
- * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC).
+ * - compression mode (@dsc_slice_count != 0)
* Note that this overhead doesn't account for the 8b/10b, 128b/132b
* channel coding efficiency, for that see
* @drm_dp_link_bw_channel_coding_efficiency().
@@ -4359,15 +4769,10 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) &&
(flags & DRM_DP_BW_OVERHEAD_FEC));
- if (flags & DRM_DP_BW_OVERHEAD_DSC)
- symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive,
- dsc_slice_count,
- bpp_x16, symbol_size,
- is_mst);
- else
- symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
- bpp_x16, symbol_size,
- is_mst);
+ symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
+ dsc_slice_count,
+ bpp_x16, symbol_size,
+ is_mst);
return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count,
overhead * 16),
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index f8cd094efa3c..64e5c176d5cc 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -23,6 +23,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -171,18 +172,30 @@ static const char *drm_dp_mst_sideband_tx_state_str(int state)
return sideband_reason_str[state];
}
+static inline u8
+drm_dp_mst_get_ufp_num_at_lct_from_rad(u8 lct, const u8 *rad)
+{
+ int idx = (lct / 2) - 1;
+ int shift = (lct % 2) ? 0 : 4;
+ u8 ufp_num;
+
+ /* mst_primary, it's rad is unset*/
+ if (lct == 1)
+ return 0;
+
+ ufp_num = (rad[idx] >> shift) & 0xf;
+
+ return ufp_num;
+}
+
static int
drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
{
int i;
- u8 unpacked_rad[16];
+ u8 unpacked_rad[16] = {};
- for (i = 0; i < lct; i++) {
- if (i % 2)
- unpacked_rad[i] = rad[i / 2] >> 4;
- else
- unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
- }
+ for (i = 0; i < lct; i++)
+ unpacked_rad[i] = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
/* TODO: Eventually add something to printk so we can format the rad
* like this: 1.2.3
@@ -2180,24 +2193,20 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
guid_copy(&mstb->guid, guid);
if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ struct drm_dp_aux *aux;
u8 buf[UUID_SIZE];
export_guid(buf, &mstb->guid);
- if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- mstb->port_parent,
- DP_GUID, sizeof(buf), buf);
- } else {
- ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, buf, sizeof(buf));
- }
- }
+ if (mstb->port_parent)
+ aux = &mstb->port_parent->aux;
+ else
+ aux = mstb->mgr->aux;
- if (ret < 16 && ret > 0)
- return -EPROTO;
+ ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));
+ }
- return ret == 16 ? 0 : ret;
+ return ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2544,9 +2553,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
if (!mstb)
goto out;
- for (i = 0; i < lct - 1; i++) {
- int shift = (i % 2) ? 0 : 4;
- int port_num = (rad[i / 2] >> shift) & 0xf;
+ for (i = 1; i < lct; i++) {
+ int port_num = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
@@ -2733,14 +2741,13 @@ retry:
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
- ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
- &msg[offset],
- tosend);
- if (ret != tosend) {
- if (ret == -EIO && retries < 5) {
- retries++;
- goto retry;
- }
+ ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ } else if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
@@ -3572,8 +3579,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
}
/**
- * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
- * @mgr: The &drm_dp_mst_topology_mgr to use
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
@@ -3584,17 +3590,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
*
* Returns the BW / timeslot value in 20.12 fixed point format.
*/
-fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count)
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
int ch_coding_efficiency =
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
fixed20_12 ret;
- if (link_rate == 0 || link_lane_count == 0)
- drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
- link_rate, link_lane_count);
-
/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
ch_coding_efficiency),
@@ -3619,7 +3620,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return DRM_DP_SST;
- if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
+ if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
return DRM_DP_SST;
if (mstm_cap & DP_MST_CAP)
@@ -3674,10 +3675,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0)
goto out_unlock;
@@ -3692,7 +3693,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
@@ -3758,8 +3759,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
@@ -3808,18 +3809,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
goto out_fail;
}
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3878,8 +3879,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
*mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
@@ -3917,9 +3918,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
- replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
@@ -4031,6 +4032,22 @@ out:
return 0;
}
+static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
+{
+ bool probing_done = false;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
+ probing_done = mgr->mst_primary->link_address_sent;
+ drm_dp_mst_topology_put_mstb(mgr->mst_primary);
+ }
+
+ mutex_unlock(&mgr->lock);
+
+ return probing_done;
+}
+
static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
@@ -4061,8 +4078,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
- hotplug = true;
+ if (!primary_mstb_probing_is_done(mgr)) {
+ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
+ } else {
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
}
drm_dp_mst_topology_put_mstb(mstb);
@@ -4144,10 +4165,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
+ drm_dp_mst_topology_put_mstb(mst_primary);
+
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
- bool handle_csn;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
conn_stat->port_number,
@@ -4156,16 +4178,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->message_capability_status,
conn_stat->input_port,
conn_stat->peer_device_type);
-
- mutex_lock(&mgr->probe_lock);
- handle_csn = mst_primary->link_address_sent;
- mutex_unlock(&mgr->probe_lock);
-
- if (!handle_csn) {
- drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
- kfree(up_req);
- goto out_put_primary;
- }
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
@@ -4180,9 +4192,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
-
-out_put_primary:
- drm_dp_mst_topology_put_mstb(mst_primary);
out_clear_reply:
reset_msg_rx_state(&mgr->up_req_recv);
return ret;
@@ -4868,9 +4877,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
- if (drm_dp_dpcd_read(mgr->aux,
- DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
- &buf[i], 16) != 16)
+ if (drm_dp_dpcd_read_data(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) < 0)
return false;
}
return true;
@@ -4959,23 +4968,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
}
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret != 2) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret < 0) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret < 0) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
- ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
+ DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret < 0) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -6099,14 +6109,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
/* Enpoint decompression with DP-to-DP peer device */
@@ -6144,8 +6154,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_dpcd_read(immediate_upstream_aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
@@ -6167,11 +6177,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* therefore the endpoint needs to be
* both DSC and FEC capable.
*/
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE))
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 48b2df120086..43f13a7c79b9 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/export.h>
#include <linux/ref_tracker.h>
#include <linux/types.h>
@@ -222,7 +223,7 @@ static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *r
while ((len = next_reg_area(&offset))) {
int address = DP_TUNNELING_BASE + offset;
- if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
return -EIO;
offset += len;
@@ -913,7 +914,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
goto out_err;
if (enable)
@@ -921,7 +922,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
else
val &= ~mask;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
goto out_err;
tunnel->bw_alloc_enabled = enable;
@@ -1039,7 +1040,7 @@ static int clear_bw_req_state(struct drm_dp_aux *aux)
{
u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
- if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+ if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
return -EIO;
return 0;
@@ -1052,7 +1053,7 @@ static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
u8 val;
int err;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
*status_changed = val & status_change_mask;
@@ -1095,7 +1096,7 @@ static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
if (err)
goto out;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
err = -EIO;
goto out;
}
@@ -1196,13 +1197,13 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
goto out_err;
val &= mask;
if (val) {
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
goto out_err;
return 1;
@@ -1215,7 +1216,7 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
* Check for estimated BW changes explicitly to account for lost
* BW change notifications.
*/
- if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
goto out_err;
if (val * tunnel->bw_granularity != tunnel->estimated_bw)
@@ -1300,7 +1301,7 @@ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *a
{
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
@@ -1896,8 +1897,8 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
*
* Creates a DP tunnel manager for @dev.
*
- * Returns a pointer to the tunnel manager if created successfully or NULL in
- * case of an error.
+ * Returns a pointer to the tunnel manager if created successfully or error
+ * pointer in case of failure.
*/
struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
@@ -1907,7 +1908,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mgr->dev = dev;
init_waitqueue_head(&mgr->bw_req_queue);
@@ -1916,18 +1917,18 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
if (!mgr->groups) {
kfree(mgr);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
- ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
+ ref_tracker_dir_init(&mgr->ref_tracker, 16, "drm_dptun");
#endif
for (i = 0; i < max_group_count; i++) {
if (!init_group(mgr, &mgr->groups[i])) {
destroy_mgr(mgr);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
mgr->group_count++;
diff --git a/drivers/gpu/drm/display/drm_dsc_helper.c b/drivers/gpu/drm/display/drm_dsc_helper.c
index 6900f4dac520..05996c526a8a 100644
--- a/drivers/gpu/drm/display/drm_dsc_helper.c
+++ b/drivers/gpu/drm/display/drm_dsc_helper.c
@@ -6,6 +6,7 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
index 05afc9f0bdd6..7d78b02c1446 100644
--- a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -3,6 +3,7 @@
* Copyright (c) 2024 Linaro Ltd
*/
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -103,7 +104,8 @@ static int drm_connector_hdmi_audio_hook_plugged_cb(struct device *dev,
connector->hdmi_audio.plugged_cb = fn;
connector->hdmi_audio.plugged_cb_dev = codec_dev;
- fn(codec_dev, connector->hdmi_audio.last_state);
+ if (fn)
+ fn(codec_dev, connector->hdmi_audio.last_state);
mutex_unlock(&connector->hdmi_audio.lock);
@@ -142,6 +144,7 @@ static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
* @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
* @funcs: callbacks for this HDMI Codec
* @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @i2s_formats: set of I2S formats (use 0 for a bus-specific set)
* @spdif_playback: set if HDMI codec has S/PDIF playback port
* @dai_port: sound DAI port, -1 if it is not enabled
*
@@ -154,6 +157,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
struct device *hdmi_codec_dev,
const struct drm_connector_hdmi_audio_funcs *funcs,
unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
bool spdif_playback,
int dai_port)
{
@@ -161,6 +165,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
.ops = &drm_connector_hdmi_audio_ops,
.max_i2s_channels = max_i2s_playback_channels,
.i2s = !!max_i2s_playback_channels,
+ .i2s_formats = i2s_formats,
.spdif = spdif_playback,
.no_i2s_capture = true,
.no_spdif_capture = true,
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
new file mode 100644
index 000000000000..3651ad0f76e0
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+
+struct drm_connector_hdmi_cec_data {
+ struct cec_adapter *adapter;
+ const struct drm_connector_hdmi_cec_funcs *funcs;
+};
+
+static int drm_connector_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->enable(connector, enable);
+}
+
+static int drm_connector_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->log_addr(connector, logical_addr);
+}
+
+static int drm_connector_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->transmit(connector, attempts, signal_free_time, msg);
+}
+
+static const struct cec_adap_ops drm_connector_hdmi_cec_adap_ops = {
+ .adap_enable = drm_connector_hdmi_cec_adap_enable,
+ .adap_log_addr = drm_connector_hdmi_cec_adap_log_addr,
+ .adap_transmit = drm_connector_hdmi_cec_adap_transmit,
+};
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_invalidate(struct drm_connector *connector)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_phys_addr_invalidate(data->adapter);
+}
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_s_phys_addr(data->adapter, addr, false);
+}
+
+static void drm_connector_hdmi_cec_adapter_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_unregister_adapter(data->adapter);
+
+ if (data->funcs->uninit)
+ data->funcs->uninit(connector);
+
+ kfree(data);
+ connector->cec.data = NULL;
+}
+
+static struct drm_connector_cec_funcs drm_connector_hdmi_cec_adapter_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_adapter_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_adapter_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev)
+{
+ struct drm_connector_hdmi_cec_data *data;
+ struct cec_connector_info conn_info;
+ struct cec_adapter *cec_adap;
+ int ret;
+
+ if (!funcs->init || !funcs->enable || !funcs->log_addr || !funcs->transmit)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->funcs = funcs;
+
+ cec_adap = cec_allocate_adapter(&drm_connector_hdmi_cec_adap_ops, connector, name,
+ CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO,
+ available_las ? : CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(cec_adap);
+ if (ret < 0)
+ goto err_free;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(cec_adap, &conn_info);
+
+ data->adapter = cec_adap;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = data;
+ connector->cec.funcs = &drm_connector_hdmi_cec_adapter_funcs;
+
+ ret = funcs->init(connector);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ /*
+ * NOTE: the CEC adapter will be unregistered by drmm cleanup from
+ * drm_managed_release(), which is called from drm_dev_release()
+ * during device unbind.
+ *
+ * However, the CEC framework cleans up the CEC adapter only when the
+ * last user has closed its file descriptor, so we don't need to handle
+ * it in DRM.
+ *
+ * Before that CEC framework makes sure that even if the userspace
+ * still holds CEC device open, all calls will be shortcut via
+ * cec_is_registered(), making sure that there is no access to the
+ * freed memory.
+ */
+ ret = cec_register_adapter(cec_adap, dev);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_adapter_unregister,
+ connector);
+
+err_delete_adapter:
+ cec_delete_adapter(cec_adap);
+
+ connector->cec.data = NULL;
+
+ mutex_unlock(&connector->cec.mutex);
+
+err_free:
+ kfree(data);
+
+ return ret;
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_register);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_received_msg(data->adapter, msg);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_received_msg);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_attempt_done(data->adapter, status);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_attempt_done);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_done(data->adapter, status,
+ arb_lost_cnt, nack_cnt, low_drive_cnt, error_cnt);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_done);
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
new file mode 100644
index 000000000000..31b8e4a93e24
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_invalidate(struct drm_connector *connector)
+{
+ cec_notifier_phys_addr_invalidate(connector->cec.data);
+}
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ cec_notifier_set_phys_addr(connector->cec.data, addr);
+}
+
+static void drm_connector_hdmi_cec_notifier_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+
+ cec_notifier_conn_unregister(connector->cec.data);
+ connector->cec.data = NULL;
+}
+
+static const struct drm_connector_cec_funcs drm_connector_cec_notifier_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_notifier_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_notifier_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ struct cec_connector_info conn_info;
+ struct cec_notifier *notifier;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ notifier = cec_notifier_conn_register(dev, port_name, &conn_info);
+ if (!notifier)
+ return -ENOMEM;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = notifier;
+ connector->cec.funcs = &drm_connector_cec_notifier_funcs;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_notifier_unregister,
+ connector);
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_notifier_register);
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 74dd4d01dd9b..a237dc55805d 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -44,7 +45,7 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
- connector->hdr_sink_metadata.hdmi_type1.eotf))
+ connector->display_info.hdr_sink_metadata.hdmi_type1.eotf))
DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
err = hdmi_drm_infoframe_init(frame);
@@ -256,3 +257,171 @@ drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
return DIV_ROUND_CLOSEST_ULL(clock * bpc, 8);
}
EXPORT_SYMBOL(drm_hdmi_compute_mode_clock);
+
+struct drm_hdmi_acr_n_cts_entry {
+ unsigned int n;
+ unsigned int cts;
+};
+
+struct drm_hdmi_acr_data {
+ unsigned long tmds_clock_khz;
+ struct drm_hdmi_acr_n_cts_entry n_cts_32k,
+ n_cts_44k1,
+ n_cts_48k;
+};
+
+static const struct drm_hdmi_acr_data hdmi_acr_n_cts[] = {
+ {
+ /* "Other" entry */
+ .n_cts_32k = { .n = 4096, },
+ .n_cts_44k1 = { .n = 6272, },
+ .n_cts_48k = { .n = 6144, },
+ }, {
+ .tmds_clock_khz = 25175,
+ .n_cts_32k = { .n = 4576, .cts = 28125, },
+ .n_cts_44k1 = { .n = 7007, .cts = 31250, },
+ .n_cts_48k = { .n = 6864, .cts = 28125, },
+ }, {
+ .tmds_clock_khz = 25200,
+ .n_cts_32k = { .n = 4096, .cts = 25200, },
+ .n_cts_44k1 = { .n = 6272, .cts = 28000, },
+ .n_cts_48k = { .n = 6144, .cts = 25200, },
+ }, {
+ .tmds_clock_khz = 27000,
+ .n_cts_32k = { .n = 4096, .cts = 27000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30000, },
+ .n_cts_48k = { .n = 6144, .cts = 27000, },
+ }, {
+ .tmds_clock_khz = 27027,
+ .n_cts_32k = { .n = 4096, .cts = 27027, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30030, },
+ .n_cts_48k = { .n = 6144, .cts = 27027, },
+ }, {
+ .tmds_clock_khz = 54000,
+ .n_cts_32k = { .n = 4096, .cts = 54000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60000, },
+ .n_cts_48k = { .n = 6144, .cts = 54000, },
+ }, {
+ .tmds_clock_khz = 54054,
+ .n_cts_32k = { .n = 4096, .cts = 54054, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60060, },
+ .n_cts_48k = { .n = 6144, .cts = 54054, },
+ }, {
+ .tmds_clock_khz = 74176,
+ .n_cts_32k = { .n = 11648, .cts = 210937, }, /* and 210938 */
+ .n_cts_44k1 = { .n = 17836, .cts = 234375, },
+ .n_cts_48k = { .n = 11648, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 74250,
+ .n_cts_32k = { .n = 4096, .cts = 74250, },
+ .n_cts_44k1 = { .n = 6272, .cts = 82500, },
+ .n_cts_48k = { .n = 6144, .cts = 74250, },
+ }, {
+ .tmds_clock_khz = 148352,
+ .n_cts_32k = { .n = 11648, .cts = 421875, },
+ .n_cts_44k1 = { .n = 8918, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 148500,
+ .n_cts_32k = { .n = 4096, .cts = 148500, },
+ .n_cts_44k1 = { .n = 6272, .cts = 165000, },
+ .n_cts_48k = { .n = 6144, .cts = 148500, },
+ }, {
+ .tmds_clock_khz = 296703,
+ .n_cts_32k = { .n = 5824, .cts = 421875, },
+ .n_cts_44k1 = { .n = 4459, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 281250, },
+ }, {
+ .tmds_clock_khz = 297000,
+ .n_cts_32k = { .n = 3072, .cts = 222750, },
+ .n_cts_44k1 = { .n = 4704, .cts = 247500, },
+ .n_cts_48k = { .n = 5120, .cts = 247500, },
+ }, {
+ .tmds_clock_khz = 593407,
+ .n_cts_32k = { .n = 5824, .cts = 843750, },
+ .n_cts_44k1 = { .n = 8918, .cts = 937500, },
+ .n_cts_48k = { .n = 5824, .cts = 562500, },
+ }, {
+ .tmds_clock_khz = 594000,
+ .n_cts_32k = { .n = 3072, .cts = 445500, },
+ .n_cts_44k1 = { .n = 9408, .cts = 990000, },
+ .n_cts_48k = { .n = 6144, .cts = 594000, },
+ },
+};
+
+static int drm_hdmi_acr_find_tmds_entry(unsigned long tmds_clock_khz)
+{
+ int i;
+
+ /* skip the "other" entry */
+ for (i = 1; i < ARRAY_SIZE(hdmi_acr_n_cts); i++) {
+ if (hdmi_acr_n_cts[i].tmds_clock_khz == tmds_clock_khz)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_hdmi_acr_get_n_cts() - get N and CTS values for Audio Clock Regeneration
+ *
+ * @tmds_char_rate: TMDS clock (char rate) as used by the HDMI connector
+ * @sample_rate: audio sample rate
+ * @out_n: a pointer to write the N value
+ * @out_cts: a pointer to write the CTS value
+ *
+ * Get the N and CTS values (either by calculating them or by returning data
+ * from the tables. This follows the HDMI 1.4b Section 7.2 "Audio Sample Clock
+ * Capture and Regeneration".
+ *
+ * Note, @sample_rate corresponds to the Fs value, see sections 7.2.4 - 7.2.6
+ * on how to select Fs for non-L-PCM formats.
+ */
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts)
+{
+ /* be a bit more tolerant, especially for the 1.001 entries */
+ unsigned long tmds_clock_khz = DIV_ROUND_CLOSEST_ULL(tmds_char_rate, 1000);
+ const struct drm_hdmi_acr_n_cts_entry *entry;
+ unsigned int n, cts, mult;
+ int tmds_idx;
+
+ tmds_idx = drm_hdmi_acr_find_tmds_entry(tmds_clock_khz);
+
+ /*
+ * Don't change the order, 192 kHz is divisible by 48k and 32k, but it
+ * should use 48k entry.
+ */
+ if (sample_rate % 48000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_48k;
+ mult = sample_rate / 48000;
+ } else if (sample_rate % 44100 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_44k1;
+ mult = sample_rate / 44100;
+ } else if (sample_rate % 32000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_32k;
+ mult = sample_rate / 32000;
+ } else {
+ entry = NULL;
+ }
+
+ if (entry) {
+ n = entry->n * mult;
+ cts = entry->cts;
+ } else {
+ /* Recommended optimal value, HDMI 1.4b, Section 7.2.1 */
+ n = 128 * sample_rate / 1000;
+ cts = 0;
+ }
+
+ if (!cts)
+ cts = DIV_ROUND_CLOSEST_ULL(tmds_char_rate * n,
+ 128 * sample_rate);
+
+ *out_n = n;
+ *out_cts = cts;
+}
+EXPORT_SYMBOL(drm_hdmi_acr_get_n_cts);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index cfc2aaee1da0..a561f124be99 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -1,15 +1,311 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
/**
+ * DOC: hdmi helpers
+ *
+ * These functions contain an implementation of the HDMI specification
+ * in the form of KMS helpers.
+ *
+ * It contains TMDS character rate computation, automatic selection of
+ * output formats, infoframes generation, etc.
+ *
+ * Infoframes Compliance
+ * ~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Drivers using the helpers will expose the various infoframes
+ * generated according to the HDMI specification in debugfs.
+ *
+ * Compliance can then be tested using ``edid-decode`` from the ``v4l-utils`` project
+ * (https://git.linuxtv.org/v4l-utils.git/). A sample run would look like:
+ *
+ * .. code-block:: bash
+ *
+ * # edid-decode \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/avi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdmi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/spd \
+ * /sys/class/drm/card1-HDMI-A-1/edid \
+ * -c
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 1e 6d f4 5b 1e ef 06 00
+ * 07 20 01 03 80 2f 34 78 ea 24 05 af 4f 42 ab 25
+ * 0f 50 54 21 08 00 d1 c0 61 40 45 40 01 01 01 01
+ * 01 01 01 01 01 01 98 d0 00 40 a1 40 d4 b0 30 20
+ * 3a 00 d1 0b 12 00 00 1a 00 00 00 fd 00 3b 3d 1e
+ * b2 31 00 0a 20 20 20 20 20 20 00 00 00 fc 00 4c
+ * 47 20 53 44 51 48 44 0a 20 20 20 20 00 00 00 ff
+ * 00 32 30 37 4e 54 52 4c 44 43 34 33 30 0a 01 46
+ *
+ * 02 03 42 72 23 09 07 07 4d 01 03 04 90 12 13 1f
+ * 22 5d 5e 5f 60 61 83 01 00 00 6d 03 0c 00 10 00
+ * b8 3c 20 00 60 01 02 03 67 d8 5d c4 01 78 80 03
+ * e3 0f 00 18 e2 00 6a e3 05 c0 00 e6 06 05 01 52
+ * 52 51 11 5d 00 a0 a0 40 29 b0 30 20 3a 00 d1 0b
+ * 12 00 00 1a 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: GSM
+ * Model: 23540
+ * Serial Number: 454430 (0x0006ef1e)
+ * Made in: week 7 of 2022
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 47 cm x 52 cm
+ * Gamma: 2.20
+ * DPMS levels: Standby Suspend Off
+ * RGB color display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.6835, 0.3105
+ * Green: 0.2587, 0.6679
+ * Blue : 0.1445, 0.0585
+ * White: 0.3134, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * Standard Timings:
+ * DMT 0x52: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * Detailed Timing Descriptors:
+ * DTD 1: 2560x2880 59.966580 Hz 8:9 185.417 kHz 534.000000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 240 Hpol P
+ * Vfront 3 Vsync 10 Vback 199 Vpol N
+ * Display Range Limits:
+ * Monitor ranges (GTF): 59-61 Hz V, 30-178 kHz H, max dotclock 490 MHz
+ * Display Product Name: 'LG SDQHD'
+ * Display Product Serial Number: '207NTRLDC430'
+ * Extension blocks: 1
+ * Checksum: 0x46
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Basic audio support
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 2
+ * Audio Data Block:
+ * Linear PCM:
+ * Max channels: 2
+ * Supported sample rates (kHz): 48 44.1 32
+ * Supported sample sizes (bits): 24 20 16
+ * Video Data Block:
+ * VIC 1: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * VIC 3: 720x480 59.940060 Hz 16:9 31.469 kHz 27.000000 MHz
+ * VIC 4: 1280x720 60.000000 Hz 16:9 45.000 kHz 74.250000 MHz
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 18: 720x576 50.000000 Hz 16:9 31.250 kHz 27.000000 MHz
+ * VIC 19: 1280x720 50.000000 Hz 16:9 37.500 kHz 74.250000 MHz
+ * VIC 31: 1920x1080 50.000000 Hz 16:9 56.250 kHz 148.500000 MHz
+ * VIC 34: 1920x1080 30.000000 Hz 16:9 33.750 kHz 74.250000 MHz
+ * VIC 93: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * VIC 94: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Speaker Allocation Data Block:
+ * FL/FR - Front Left/Right
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * Supports_AI
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 300 MHz
+ * Extended HDMI video details:
+ * HDMI VICs:
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * HDMI VIC 2: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * HDMI VIC 3: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 600 MHz
+ * SCDC Present
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: Always Underscanned
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Colorimetry Data Block:
+ * BT2020YCC
+ * BT2020RGB
+ * HDR Static Metadata Data Block:
+ * Electro optical transfer functions:
+ * Traditional gamma - SDR luminance range
+ * SMPTE ST2084
+ * Supported static metadata descriptors:
+ * Static metadata type 1
+ * Desired content max luminance: 82 (295.365 cd/m^2)
+ * Desired content max frame-average luminance: 82 (295.365 cd/m^2)
+ * Desired content min luminance: 81 (0.298 cd/m^2)
+ * Detailed Timing Descriptors:
+ * DTD 2: 2560x2880 29.986961 Hz 8:9 87.592 kHz 238.250000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 80 Hpol P
+ * Vfront 3 Vsync 10 Vback 28 Vpol N
+ * Checksum: 0xc3 Unused space in Extension Block: 43 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.29.0-5346
+ * edid-decode SHA: c363e9aa6d70 2025-03-11 11:41:18
+ *
+ * Warnings:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * IT Video Formats are overscanned by default, but normally this should be underscanned.
+ * Video Data Block: VIC 1 and the first DTD are not identical. Is this intended?
+ * Video Data Block: All VICs are in ascending order, and the first (preferred) VIC <= 4, is that intended?
+ * Video Capability Data Block: Set Selectable YCbCr Quantization to avoid interop issues.
+ * Video Capability Data Block: S_PT is equal to S_IT and S_CE, so should be set to 0 instead.
+ * Colorimetry Data Block: Set the sRGB colorimetry bit to avoid interop issues.
+ * Display Product Serial Number is set, so the Serial Number in the Base EDID should be 0.
+ * EDID:
+ * Base EDID: Some timings are out of range of the Monitor Ranges:
+ * Vertical Freq: 24.000 - 60.317 Hz (Monitor: 59.000 - 61.000 Hz)
+ * Horizontal Freq: 31.250 - 185.416 kHz (Monitor: 30.000 - 178.000 kHz)
+ * Maximum Clock: 594.000 MHz (Monitor: 490.000 MHz)
+ *
+ * Failures:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Video Capability Data Block: IT video formats are always underscanned, but bit 7 of Byte 3 of the CTA-861 Extension header is set to overscanned.
+ * EDID:
+ * CTA-861: Native progressive timings are a mix of several resolutions.
+ *
+ * EDID conformity: FAIL
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 82 02 0d 31 12 28 04 00 00 00 00 00 00 00 00 00
+ * 00
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x31
+ *
+ * AVI InfoFrame
+ * Version: 2
+ * Length: 13
+ * Y: Color Component Sample Format: RGB
+ * A: Active Format Information Present: Yes
+ * B: Bar Data Present: Bar Data not present
+ * S: Scan Information: Composed for an underscanned display
+ * C: Colorimetry: No Data
+ * M: Picture Aspect Ratio: 16:9
+ * R: Active Portion Aspect Ratio: 8
+ * ITC: IT Content: No Data
+ * EC: Extended Colorimetry: xvYCC601
+ * Q: RGB Quantization Range: Limited Range
+ * SC: Non-Uniform Picture Scaling: No Known non-uniform scaling
+ * YQ: YCC Quantization Range: Limited Range
+ * CN: IT Content Type: Graphics
+ * PR: Pixel Data Repetition Count: 0
+ * Line Number of End of Top Bar: 0
+ * Line Number of Start of Bottom Bar: 0
+ * Pixel Number of End of Left Bar: 0
+ * Pixel Number of Start of Right Bar: 0
+ *
+ * ----------------
+ *
+ * AVI InfoFrame conformity: PASS
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 81 01 05 49 03 0c 00 20 01
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x49
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03
+ * Version: 1
+ * Length: 5
+ * HDMI Video Format: HDMI_VIC is present
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ *
+ * ----------------
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 conformity: PASS
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 83 01 19 93 42 72 6f 61 64 63 6f 6d 56 69 64 65
+ * 6f 63 6f 72 65 00 00 00 00 00 00 00 09
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x93
+ *
+ * Source Product Description InfoFrame
+ * Version: 1
+ * Length: 25
+ * Vendor Name: 'Broadcom'
+ * Product Description: 'Videocore'
+ * Source Information: PC general
+ *
+ * ----------------
+ *
+ * Source Product Description InfoFrame conformity: PASS
+ *
+ * Testing
+ * ~~~~~~~
+ *
+ * The helpers have unit testing and can be tested using kunit with:
+ *
+ * .. code-block:: bash
+ *
+ * $ ./tools/testing/kunit/kunit.py run \
+ * --kunitconfig=drivers/gpu/drm/tests \
+ * drm_atomic_helper_connector_hdmi_*
+ */
+
+/**
* __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources
* @connector: DRM connector
* @new_conn_state: connector state to reset
@@ -115,6 +411,11 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return false;
}
+ if (drm_mode_is_420_only(info, mode) && format != HDMI_COLORSPACE_YUV420) {
+ drm_dbg_kms(dev, "Mode can be only supported in YUV420 format.\n");
+ return false;
+ }
+
switch (format) {
case HDMI_COLORSPACE_RGB:
drm_dbg_kms(dev, "RGB Format, checking the constraints.\n");
@@ -145,9 +446,36 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return true;
case HDMI_COLORSPACE_YUV420:
- /* TODO: YUV420 is unsupported at the moment. */
- drm_dbg_kms(dev, "YUV420 format isn't supported yet.\n");
- return false;
+ drm_dbg_kms(dev, "YUV420 format, checking the constraints.\n");
+
+ if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR420)) {
+ drm_dbg_kms(dev, "Sink doesn't support YUV420.\n");
+ return false;
+ }
+
+ if (!drm_mode_is_420(info, mode)) {
+ drm_dbg_kms(dev, "Mode cannot be supported in YUV420 format.\n");
+ return false;
+ }
+
+ if (bpc == 10 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)) {
+ drm_dbg_kms(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
+ return false;
+ }
+
+ if (bpc == 12 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)) {
+ drm_dbg_kms(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
+ return false;
+ }
+
+ if (bpc == 16 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)) {
+ drm_dbg_kms(dev, "16 BPC but sink doesn't support Deep Color 48.\n");
+ return false;
+ }
+
+ drm_dbg_kms(dev, "YUV420 format supported in that configuration.\n");
+
+ return true;
case HDMI_COLORSPACE_YUV422:
drm_dbg_kms(dev, "YUV422 format, checking the constraints.\n");
@@ -253,8 +581,9 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
struct drm_device *dev = connector->dev;
int ret;
- drm_dbg_kms(dev, "Trying %s output format\n",
- drm_hdmi_connector_get_output_format_name(fmt));
+ drm_dbg_kms(dev, "Trying %s output format with %u bpc\n",
+ drm_hdmi_connector_get_output_format_name(fmt),
+ bpc);
if (!sink_supports_format_bpc(connector, info, mode, fmt, bpc)) {
drm_dbg_kms(dev, "%s output format not supported with %u bpc\n",
@@ -271,7 +600,7 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
return false;
}
- drm_dbg_kms(dev, "%s output format supported with %u (TMDS char rate: %llu Hz)\n",
+ drm_dbg_kms(dev, "%s output format supported with %u bpc (TMDS char rate: %llu Hz)\n",
drm_hdmi_connector_get_output_format_name(fmt),
bpc, conn_state->hdmi.tmds_char_rate);
@@ -279,23 +608,35 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
}
static int
-hdmi_compute_format(const struct drm_connector *connector,
- struct drm_connector_state *conn_state,
- const struct drm_display_mode *mode,
- unsigned int bpc)
+hdmi_compute_format_bpc(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode,
+ unsigned int max_bpc, enum hdmi_colorspace fmt)
{
struct drm_device *dev = connector->dev;
+ unsigned int bpc;
+ int ret;
+
+ for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
+ ret = hdmi_try_format_bpc(connector, conn_state, mode, bpc, fmt);
+ if (!ret)
+ continue;
+
+ conn_state->hdmi.output_bpc = bpc;
+ conn_state->hdmi.output_format = fmt;
+
+ drm_dbg_kms(dev,
+ "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
+ conn_state->hdmi.output_bpc,
+ drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
+ conn_state->hdmi.tmds_char_rate);
- /*
- * TODO: Add support for YCbCr420 output for HDMI 2.0 capable
- * devices, for modes that only support YCbCr420.
- */
- if (hdmi_try_format_bpc(connector, conn_state, mode, bpc, HDMI_COLORSPACE_RGB)) {
- conn_state->hdmi.output_format = HDMI_COLORSPACE_RGB;
return 0;
}
- drm_dbg_kms(dev, "Failed. No Format Supported for that bpc count.\n");
+ drm_dbg_kms(dev, "Failed. %s output format not supported for any bpc count.\n",
+ drm_hdmi_connector_get_output_format_name(fmt));
return -EINVAL;
}
@@ -305,33 +646,29 @@ hdmi_compute_config(const struct drm_connector *connector,
struct drm_connector_state *conn_state,
const struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
unsigned int max_bpc = clamp_t(unsigned int,
conn_state->max_bpc,
8, connector->max_bpc);
- unsigned int bpc;
int ret;
- for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
- drm_dbg_kms(dev, "Trying with a %d bpc output\n", bpc);
-
- ret = hdmi_compute_format(connector, conn_state, mode, bpc);
- if (ret)
- continue;
-
- conn_state->hdmi.output_bpc = bpc;
-
- drm_dbg_kms(dev,
- "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
- mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
- conn_state->hdmi.output_bpc,
- drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
- conn_state->hdmi.tmds_char_rate);
-
- return 0;
+ ret = hdmi_compute_format_bpc(connector, conn_state, mode, max_bpc,
+ HDMI_COLORSPACE_RGB);
+ if (ret) {
+ if (connector->ycbcr_420_allowed) {
+ ret = hdmi_compute_format_bpc(connector, conn_state,
+ mode, max_bpc,
+ HDMI_COLORSPACE_YUV420);
+ if (ret)
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format doesn't work.\n");
+ } else {
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format not allowed for connector.\n");
+ ret = -EINVAL;
+ }
}
- return -EINVAL;
+ return ret;
}
static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
@@ -503,12 +840,15 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
connector_state_get_mode(new_conn_state);
int ret;
- new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
+ if (!new_conn_state->crtc || !new_conn_state->best_encoder)
+ return 0;
ret = hdmi_compute_config(connector, new_conn_state, mode);
if (ret)
return ret;
+ new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
+
ret = hdmi_generate_infoframes(connector, new_conn_state);
if (ret)
return ret;
@@ -539,7 +879,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
*/
enum drm_mode_status
drm_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
unsigned long long clock;
@@ -786,8 +1126,11 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
const struct drm_edid *drm_edid;
if (status == connector_status_disconnected) {
- // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ // TODO: also handle scramber, HDMI sink disconnected.
drm_connector_hdmi_audio_plugged_notify(connector, false);
+ drm_edid_connector_update(connector, NULL);
+ drm_connector_cec_phys_addr_invalidate(connector);
+ return;
}
if (connector->hdmi.funcs->read_edid)
@@ -800,8 +1143,9 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
drm_edid_free(drm_edid);
if (status == connector_status_connected) {
- // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ // TODO: also handle scramber, HDMI sink is now connected.
drm_connector_hdmi_audio_plugged_notify(connector, true);
+ drm_connector_cec_phys_addr_set(connector);
}
}
@@ -811,7 +1155,7 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
* @status: Connection status
*
* This function should be called as a part of the .detect() / .detect_ctx()
- * callbacks, updating the HDMI-specific connector's data.
+ * callbacks for all status changes.
*/
void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
enum drm_connector_status status)
diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c
index 6d2f244e5830..df878aad4a36 100644
--- a/drivers/gpu/drm/display/drm_scdc_helper.c
+++ b/drivers/gpu/drm/display/drm_scdc_helper.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ea2611770f4..67e095e398a3 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -26,7 +26,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-
+#include <linux/export.h>
#include <linux/sync_file.h>
#include <drm/drm_atomic.h>
@@ -42,6 +42,7 @@
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
+#include <drm/drm_colorop.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -107,6 +108,7 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state)
kfree(state->connectors);
kfree(state->crtcs);
kfree(state->planes);
+ kfree(state->colorops);
kfree(state->private_objs);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
@@ -138,6 +140,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
sizeof(*state->planes), GFP_KERNEL);
if (!state->planes)
goto fail;
+ state->colorops = kcalloc(dev->mode_config.num_colorop,
+ sizeof(*state->colorops), GFP_KERNEL);
+ if (!state->colorops)
+ goto fail;
/*
* Because drm_atomic_state can be committed asynchronously we need our
@@ -200,6 +206,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
drm_dbg_atomic(dev, "Clearing atomic state %p\n", state);
+ state->checked = false;
+
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i].ptr;
@@ -207,9 +215,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
connector->funcs->atomic_destroy_state(connector,
- state->connectors[i].state);
+ state->connectors[i].state_to_destroy);
state->connectors[i].ptr = NULL;
- state->connectors[i].state = NULL;
+ state->connectors[i].state_to_destroy = NULL;
state->connectors[i].old_state = NULL;
state->connectors[i].new_state = NULL;
drm_connector_put(connector);
@@ -222,10 +230,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
crtc->funcs->atomic_destroy_state(crtc,
- state->crtcs[i].state);
+ state->crtcs[i].state_to_destroy);
state->crtcs[i].ptr = NULL;
- state->crtcs[i].state = NULL;
+ state->crtcs[i].state_to_destroy = NULL;
state->crtcs[i].old_state = NULL;
state->crtcs[i].new_state = NULL;
@@ -242,20 +250,34 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
plane->funcs->atomic_destroy_state(plane,
- state->planes[i].state);
+ state->planes[i].state_to_destroy);
state->planes[i].ptr = NULL;
- state->planes[i].state = NULL;
+ state->planes[i].state_to_destroy = NULL;
state->planes[i].old_state = NULL;
state->planes[i].new_state = NULL;
}
+ for (i = 0; i < config->num_colorop; i++) {
+ struct drm_colorop *colorop = state->colorops[i].ptr;
+
+ if (!colorop)
+ continue;
+
+ drm_colorop_atomic_destroy_state(colorop,
+ state->colorops[i].state);
+ state->colorops[i].ptr = NULL;
+ state->colorops[i].state = NULL;
+ state->colorops[i].old_state = NULL;
+ state->colorops[i].new_state = NULL;
+ }
+
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
obj->funcs->atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
state->private_objs[i].ptr = NULL;
- state->private_objs[i].state = NULL;
+ state->private_objs[i].state_to_destroy = NULL;
state->private_objs[i].old_state = NULL;
state->private_objs[i].new_state = NULL;
}
@@ -348,8 +370,9 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
WARN_ON(!state->acquire_ctx);
+ drm_WARN_ON(state->dev, state->checked);
- crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (crtc_state)
return crtc_state;
@@ -361,7 +384,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
if (!crtc_state)
return ERR_PTR(-ENOMEM);
- state->crtcs[index].state = crtc_state;
+ state->crtcs[index].state_to_destroy = crtc_state;
state->crtcs[index].old_state = crtc->state;
state->crtcs[index].new_state = crtc_state;
state->crtcs[index].ptr = crtc;
@@ -480,8 +503,8 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
}
if (state->crtc)
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state,
+ state->crtc);
if (writeback_job->fb && !crtc_state->active) {
drm_dbg_atomic(connector->dev,
@@ -528,13 +551,14 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane_state *plane_state;
WARN_ON(!state->acquire_ctx);
+ drm_WARN_ON(state->dev, state->checked);
/* the legacy pointers should never be set */
WARN_ON(plane->fb);
WARN_ON(plane->old_fb);
WARN_ON(plane->crtc);
- plane_state = drm_atomic_get_existing_plane_state(state, plane);
+ plane_state = drm_atomic_get_new_plane_state(state, plane);
if (plane_state)
return plane_state;
@@ -546,7 +570,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
if (!plane_state)
return ERR_PTR(-ENOMEM);
- state->planes[index].state = plane_state;
+ state->planes[index].state_to_destroy = plane_state;
state->planes[index].ptr = plane;
state->planes[index].old_state = plane->state;
state->planes[index].new_state = plane_state;
@@ -568,6 +592,55 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_plane_state);
+/**
+ * drm_atomic_get_colorop_state - get colorop state
+ * @state: global atomic state object
+ * @colorop: colorop to get state object for
+ *
+ * This function returns the colorop state for the given colorop, allocating it
+ * if needed. It will also grab the relevant plane lock to make sure that the
+ * state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_colorop_state *
+drm_atomic_get_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop)
+{
+ int ret, index = drm_colorop_index(colorop);
+ struct drm_colorop_state *colorop_state;
+
+ WARN_ON(!state->acquire_ctx);
+
+ colorop_state = drm_atomic_get_new_colorop_state(state, colorop);
+ if (colorop_state)
+ return colorop_state;
+
+ ret = drm_modeset_lock(&colorop->plane->mutex, state->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ colorop_state = drm_atomic_helper_colorop_duplicate_state(colorop);
+ if (!colorop_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->colorops[index].state = colorop_state;
+ state->colorops[index].ptr = colorop;
+ state->colorops[index].old_state = colorop->state;
+ state->colorops[index].new_state = colorop_state;
+ colorop_state->state = state;
+
+ drm_dbg_atomic(colorop->dev, "Added [COLOROP:%d:%d] %p state to %p\n",
+ colorop->base.id, colorop->type, colorop_state, state);
+
+ return colorop_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_colorop_state);
+
static bool
plane_switching_crtc(const struct drm_plane_state *old_plane_state,
const struct drm_plane_state *new_plane_state)
@@ -707,6 +780,46 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
return 0;
}
+static void drm_atomic_colorop_print_state(struct drm_printer *p,
+ const struct drm_colorop_state *state)
+{
+ struct drm_colorop *colorop = state->colorop;
+
+ drm_printf(p, "colorop[%u]:\n", colorop->base.id);
+ drm_printf(p, "\ttype=%s\n", drm_get_colorop_type_name(colorop->type));
+ if (colorop->bypass_property)
+ drm_printf(p, "\tbypass=%u\n", state->bypass);
+
+ switch (colorop->type) {
+ case DRM_COLOROP_1D_CURVE:
+ drm_printf(p, "\tcurve_1d_type=%s\n",
+ drm_get_colorop_curve_1d_type_name(state->curve_1d_type));
+ break;
+ case DRM_COLOROP_1D_LUT:
+ drm_printf(p, "\tsize=%d\n", colorop->size);
+ drm_printf(p, "\tinterpolation=%s\n",
+ drm_get_colorop_lut1d_interpolation_name(colorop->lut1d_interpolation));
+ drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
+ break;
+ case DRM_COLOROP_CTM_3X4:
+ drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
+ break;
+ case DRM_COLOROP_MULTIPLIER:
+ drm_printf(p, "\tmultiplier=%llu\n", state->multiplier);
+ break;
+ case DRM_COLOROP_3D_LUT:
+ drm_printf(p, "\tsize=%d\n", colorop->size);
+ drm_printf(p, "\tinterpolation=%s\n",
+ drm_get_colorop_lut3d_interpolation_name(colorop->lut3d_interpolation));
+ drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
+ break;
+ default:
+ break;
+ }
+
+ drm_printf(p, "\tnext=%d\n", colorop->next ? colorop->next->base.id : 0);
+}
+
static void drm_atomic_plane_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
@@ -728,7 +841,8 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_printf(p, "\tcolor-range=%s\n",
drm_get_color_range_name(state->color_range));
drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
-
+ drm_printf(p, "\tcolor-pipeline=%d\n",
+ state->color_pipeline ? state->color_pipeline->base.id : 0);
if (plane->funcs->atomic_print_state)
plane->funcs->atomic_print_state(p, state);
}
@@ -831,14 +945,17 @@ struct drm_private_state *
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
- int index, num_objs, i, ret;
+ int index, num_objs, ret;
size_t size;
struct __drm_private_objs_state *arr;
struct drm_private_state *obj_state;
- for (i = 0; i < state->num_private_objs; i++)
- if (obj == state->private_objs[i].ptr)
- return state->private_objs[i].state;
+ WARN_ON(!state->acquire_ctx);
+ drm_WARN_ON(state->dev, state->checked);
+
+ obj_state = drm_atomic_get_new_private_obj_state(state, obj);
+ if (obj_state)
+ return obj_state;
ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
if (ret)
@@ -858,7 +975,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
if (!obj_state)
return ERR_PTR(-ENOMEM);
- state->private_objs[index].state = obj_state;
+ state->private_objs[index].state_to_destroy = obj_state;
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
@@ -933,6 +1050,9 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* state). This is especially true in enable hooks because the pipeline has
* changed.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The old connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -967,6 +1087,9 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* attached to @encoder vs ones that do (and to inspect their state). This is
* especially true in disable hooks because the pipeline will change.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The new connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -988,6 +1111,59 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
/**
+ * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder
+ * @encoder: The encoder to find the connector of
+ * @ctx: Modeset locking context
+ *
+ * This function finds and returns the connector currently assigned to
+ * an @encoder.
+ *
+ * It is similar to the drm_atomic_get_old_connector_for_encoder() and
+ * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't
+ * require access to the atomic state. If you have access to it, prefer
+ * using these. This helper is typically useful in situations where you
+ * don't have access to the atomic state, like detect, link repair,
+ * threaded interrupt handlers, or hooks from other frameworks (ALSA,
+ * CEC, etc.).
+ *
+ * Returns:
+ * The connector connected to @encoder, or an error pointer otherwise.
+ * When the error is EDEADLK, a deadlock has been detected and the
+ * sequence must be restarted.
+ */
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *out_connector = ERR_PTR(-EINVAL);
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (!connector->state)
+ continue;
+
+ if (encoder == connector->state->best_encoder) {
+ out_connector = connector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return out_connector;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder);
+
+
+/**
* drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the crtc state for
@@ -1070,6 +1246,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector_state *connector_state;
WARN_ON(!state->acquire_ctx);
+ drm_WARN_ON(state->dev, state->checked);
ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
if (ret)
@@ -1093,15 +1270,16 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
state->num_connector = alloc;
}
- if (state->connectors[index].state)
- return state->connectors[index].state;
+ connector_state = drm_atomic_get_new_connector_state(state, connector);
+ if (connector_state)
+ return connector_state;
connector_state = connector->funcs->atomic_duplicate_state(connector);
if (!connector_state)
return ERR_PTR(-ENOMEM);
drm_connector_get(connector);
- state->connectors[index].state = connector_state;
+ state->connectors[index].state_to_destroy = connector_state;
state->connectors[index].old_state = connector->state;
state->connectors[index].new_state = connector_state;
state->connectors[index].ptr = connector;
@@ -1249,7 +1427,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_bridge_state *bridge_state;
- struct drm_bridge *bridge;
if (!encoder)
return 0;
@@ -1258,7 +1435,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
"Adding all bridges for [encoder:%d:%s] to %p\n",
encoder->base.id, encoder->name, state);
- drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
/* Skip bridges that don't implement the atomic state hooks. */
if (!bridge->funcs->atomic_duplicate_state)
continue;
@@ -1379,6 +1556,52 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_add_affected_planes);
/**
+ * drm_atomic_add_affected_colorops - add colorops for plane
+ * @state: atomic state
+ * @plane: DRM plane
+ *
+ * This function walks the current configuration and adds all colorops
+ * currently used by @plane to the atomic configuration @state. This is useful
+ * when an atomic commit also needs to check all currently enabled colorop on
+ * @plane, e.g. when changing the mode. It's also useful when re-enabling a plane
+ * to avoid special code to force-enable all colorops.
+ *
+ * Since acquiring a colorop state will always also acquire the w/w mutex of the
+ * current plane for that colorop (if there is any) adding all the colorop states for
+ * a plane will not reduce parallelism of atomic updates.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_affected_colorops(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ struct drm_colorop *colorop;
+ struct drm_colorop_state *colorop_state;
+
+ WARN_ON(!drm_atomic_get_new_plane_state(state, plane));
+
+ drm_dbg_atomic(plane->dev,
+ "Adding all current colorops for [PLANE:%d:%s] to %p\n",
+ plane->base.id, plane->name, state);
+
+ drm_for_each_colorop(colorop, plane->dev) {
+ if (colorop->plane != plane)
+ continue;
+
+ colorop_state = drm_atomic_get_colorop_state(state, colorop);
+ if (IS_ERR(colorop_state))
+ return PTR_ERR(colorop_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_affected_colorops);
+
+/**
* drm_atomic_check_only - check whether a given config would work
* @state: atomic configuration to check
*
@@ -1482,6 +1705,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
requested_crtc, affected_crtc);
}
+ state->checked = true;
+
return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -1774,6 +1999,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
bool take_locks)
{
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_colorop *colorop;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_connector *connector;
@@ -1783,6 +2009,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
if (!drm_drv_uses_atomic_modeset(dev))
return;
+ list_for_each_entry(colorop, &config->colorop_list, head) {
+ if (take_locks)
+ drm_modeset_lock(&colorop->plane->mutex, NULL);
+ drm_atomic_colorop_print_state(p, colorop->state);
+ if (take_locks)
+ drm_modeset_unlock(&colorop->plane->mutex);
+ }
+
list_for_each_entry(plane, &config->plane_list, head) {
if (take_locks)
drm_modeset_lock(&plane->mutex, NULL);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5186d2114a50..10adac9397cf 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -25,6 +25,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/ktime.h>
@@ -455,6 +456,7 @@ mode_fixup(struct drm_atomic_state *state)
ret = drm_atomic_bridge_chain_check(bridge,
new_crtc_state,
new_conn_state);
+ drm_bridge_put(bridge);
if (ret) {
drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
return ret;
@@ -526,6 +528,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
bridge = drm_bridge_chain_get_first_bridge(encoder);
ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
mode);
+ drm_bridge_put(bridge);
if (ret != MODE_OK) {
drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
return ret;
@@ -574,6 +577,30 @@ mode_valid(struct drm_atomic_state *state)
return 0;
}
+static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
+ if (!drm_enc->possible_clones) {
+ DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
+ continue;
+ }
+
+ if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
+ crtc_state->encoder_mask) {
+ DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
+ crtc->base.id, crtc_state->encoder_mask);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
@@ -666,8 +693,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
}
if (new_crtc_state->enable != has_connectors) {
- drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n",
+ crtc->base.id, crtc->name,
+ new_crtc_state->enable, has_connectors);
return -EINVAL;
}
@@ -745,6 +773,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
+
+ ret = drm_atomic_check_valid_clones(state, crtc);
+ if (ret != 0)
+ return ret;
}
/*
@@ -1059,6 +1091,15 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
* For example enable/disable of a cursor plane which have fixed zpos value
* would trigger all other enabled planes to be forced to the state change.
*
+ * IMPORTANT:
+ *
+ * As this function calls drm_atomic_helper_check_modeset() internally, its
+ * restrictions also apply:
+ * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
+ * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
+ * without a full modeset) _must_ call drm_atomic_helper_check_modeset()
+ * function again after that change.
+ *
* RETURNS:
* Zero for success or -errno
*/
@@ -1122,15 +1163,14 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
}
static void
-disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
- struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
@@ -1142,11 +1182,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state->crtc)
continue;
- old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
if (new_conn_state->crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(
- old_state,
+ state,
new_conn_state->crtc);
else
new_crtc_state = NULL;
@@ -1173,12 +1213,13 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* it away), so we won't call disable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_disable(bridge, old_state);
+ drm_atomic_bridge_chain_disable(bridge, state);
+ drm_bridge_put(bridge);
/* Right function depends upon target state. */
if (funcs) {
if (funcs->atomic_disable)
- funcs->atomic_disable(encoder, old_state);
+ funcs->atomic_disable(encoder, state);
else if (new_conn_state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
@@ -1186,11 +1227,17 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
else if (funcs->dpms)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
-
- drm_atomic_bridge_chain_post_disable(bridge, old_state);
}
+}
+
+static void
+crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
int ret;
@@ -1211,7 +1258,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (new_crtc_state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->atomic_disable)
- funcs->atomic_disable(crtc, old_state);
+ funcs->atomic_disable(crtc, state);
else if (funcs->disable)
funcs->disable(crtc);
else if (funcs->dpms)
@@ -1236,10 +1283,73 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
}
}
+static void
+encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ /*
+ * Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state.
+ */
+ if (!old_conn_state->crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
+
+ if (new_conn_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_conn_state->crtc);
+ else
+ new_crtc_state = NULL;
+
+ if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /*
+ * We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call disable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_post_disable(bridge, state);
+ drm_bridge_put(bridge);
+ }
+}
+
+static void
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ encoder_bridge_disable(dev, state);
+
+ crtc_disable(dev, state);
+
+ encoder_bridge_post_disable(dev, state);
+}
+
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function updates all the various legacy modeset state pointers in
* connectors, encoders and CRTCs.
@@ -1255,7 +1365,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
*/
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1264,7 +1374,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
int i;
/* clear out existing links and update dpms */
- for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
if (connector->encoder) {
WARN_ON(!connector->encoder->crtc);
@@ -1285,7 +1395,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
}
/* set new links */
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
if (!new_conn_state->crtc)
continue;
@@ -1297,7 +1407,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
}
/* set legacy state in the crtc structure */
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct drm_plane *primary = crtc->primary;
struct drm_plane_state *new_plane_state;
@@ -1305,7 +1415,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
crtc->enabled = new_crtc_state->enable;
new_plane_state =
- drm_atomic_get_new_plane_state(old_state, primary);
+ drm_atomic_get_new_plane_state(state, primary);
if (new_plane_state && new_plane_state->crtc == crtc) {
crtc->x = new_plane_state->src_x >> 16;
@@ -1337,7 +1447,7 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
static void
-crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -1345,7 +1455,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
if (!new_crtc_state->mode_changed)
@@ -1361,7 +1471,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
}
}
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
@@ -1376,7 +1486,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
- if (!new_crtc_state->mode_changed)
+ if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
continue;
drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
@@ -1395,13 +1505,14 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
+ drm_bridge_put(bridge);
}
}
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
@@ -1413,25 +1524,25 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- disable_outputs(dev, old_state);
+ disable_outputs(dev, state);
- drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
- drm_atomic_helper_calc_timestamping_constants(old_state);
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_atomic_helper_calc_timestamping_constants(state);
- crtc_set_mode(dev, old_state);
+ crtc_set_mode(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_connector_helper_funcs *funcs;
funcs = connector->helper_private;
@@ -1440,36 +1551,53 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
- funcs->atomic_commit(connector, old_state);
+ funcs->atomic_commit(connector, state);
}
}
}
-/**
- * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
- * @dev: DRM device
- * @old_state: atomic state object with old state structures
- *
- * This function enables all the outputs with the new configuration which had to
- * be turned off for the update.
- *
- * For compatibility with legacy CRTC helpers this should be called after
- * drm_atomic_helper_commit_planes(), which is what the default commit function
- * does. But drivers with different needs can group the modeset commits together
- * and do the plane commits at the end. This is useful for drivers doing runtime
- * PM since planes updates then only happen when the CRTC is actually enabled.
- */
-void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+static void
+encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ if (!new_conn_state->best_encoder)
+ continue;
+
+ if (!new_conn_state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
+ continue;
+
+ encoder = new_conn_state->best_encoder;
+
+ drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call enable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_pre_enable(bridge, state);
+ drm_bridge_put(bridge);
+ }
+}
+
+static void
+crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- struct drm_connector *connector;
- struct drm_connector_state *new_conn_state;
int i;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
@@ -1485,13 +1613,21 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (funcs->atomic_enable)
- funcs->atomic_enable(crtc, old_state);
+ funcs->atomic_enable(crtc, state);
else if (funcs->commit)
funcs->commit(crtc);
}
}
+}
+
+static void
+encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
@@ -1514,21 +1650,45 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* it away), so we won't call enable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_pre_enable(bridge, old_state);
if (funcs) {
if (funcs->atomic_enable)
- funcs->atomic_enable(encoder, old_state);
+ funcs->atomic_enable(encoder, state);
else if (funcs->enable)
funcs->enable(encoder);
else if (funcs->commit)
funcs->commit(encoder);
}
- drm_atomic_bridge_chain_enable(bridge, old_state);
+ drm_atomic_bridge_chain_enable(bridge, state);
+ drm_bridge_put(bridge);
}
+}
- drm_atomic_helper_commit_writebacks(dev, old_state);
+/**
+ * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy CRTC helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ encoder_bridge_pre_enable(dev, state);
+
+ crtc_enable(dev, state);
+
+ encoder_bridge_enable(dev, state);
+
+ drm_atomic_helper_commit_writebacks(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@@ -1630,7 +1790,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
/**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* Helper to, after atomic commit, wait for vblanks on all affected
* CRTCs (ie. before cleaning up old framebuffers using
@@ -1644,7 +1804,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
*/
void
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1655,10 +1815,10 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates).
*/
- if (old_state->legacy_cursor_update)
+ if (state->legacy_cursor_update)
return;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!new_crtc_state->active)
continue;
@@ -1667,17 +1827,19 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
continue;
crtc_mask |= drm_crtc_mask(crtc);
- old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
+ state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
}
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc);
+
if (!(crtc_mask & drm_crtc_mask(crtc)))
continue;
- ret = wait_event_timeout(dev->vblank[i].queue,
- old_state->crtcs[i].last_vblank_count !=
- drm_crtc_vblank_count(crtc),
- msecs_to_jiffies(100));
+ ret = wait_event_timeout(*queue,
+ state->crtcs[i].last_vblank_count !=
+ drm_crtc_vblank_count(crtc),
+ msecs_to_jiffies(100));
WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
crtc->base.id, crtc->name);
@@ -1690,7 +1852,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
* drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* Helper to, after atomic commit, wait for page flips on all affected
* crtcs (ie. before cleaning up old framebuffers using
@@ -1703,16 +1865,16 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
* initialized using drm_atomic_helper_setup_commit().
*/
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
int i;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
+ struct drm_crtc_commit *commit = state->crtcs[i].commit;
int ret;
- crtc = old_state->crtcs[i].ptr;
+ crtc = state->crtcs[i].ptr;
if (!crtc || !commit)
continue;
@@ -1723,14 +1885,14 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
crtc->base.id, crtc->name);
}
- if (old_state->fake_commit)
- complete_all(&old_state->fake_commit->flip_done);
+ if (state->fake_commit)
+ complete_all(&state->fake_commit->flip_done);
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This is the default implementation for the
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
@@ -1741,29 +1903,29 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
* Note that the default ordering of how the various stages are called is to
* match the legacy modeset helper library closest.
*/
-void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
+ drm_atomic_helper_commit_planes(dev, state, 0);
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_fake_vblank(old_state);
+ drm_atomic_helper_fake_vblank(state);
- drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_cleanup_planes(dev, old_state);
+ drm_atomic_helper_cleanup_planes(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
/**
* drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
- * @old_state: new modeset state to be committed
+ * @state: new modeset state to be committed
*
* This is an alternative implementation for the
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
@@ -1771,30 +1933,30 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
* commit. Otherwise, one should use the default implementation
* drm_atomic_helper_commit_tail().
*/
-void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_planes(dev, old_state,
+ drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
- drm_atomic_helper_fake_vblank(old_state);
+ drm_atomic_helper_fake_vblank(state);
- drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_cleanup_planes(dev, old_state);
+ drm_atomic_helper_cleanup_planes(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
-static void commit_tail(struct drm_atomic_state *old_state)
+static void commit_tail(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
const struct drm_mode_config_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
@@ -1816,33 +1978,33 @@ static void commit_tail(struct drm_atomic_state *old_state)
*/
start = ktime_get();
- drm_atomic_helper_wait_for_fences(dev, old_state, false);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
- drm_atomic_helper_wait_for_dependencies(old_state);
+ drm_atomic_helper_wait_for_dependencies(state);
/*
* We cannot safely access new_crtc_state after
* drm_atomic_helper_commit_hw_done() so figure out which crtc's have
* self-refresh active beforehand:
*/
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
if (new_crtc_state->self_refresh_active)
new_self_refresh_mask |= BIT(i);
if (funcs && funcs->atomic_commit_tail)
- funcs->atomic_commit_tail(old_state);
+ funcs->atomic_commit_tail(state);
else
- drm_atomic_helper_commit_tail(old_state);
+ drm_atomic_helper_commit_tail(state);
commit_time_ms = ktime_ms_delta(ktime_get(), start);
if (commit_time_ms > 0)
- drm_self_refresh_helper_update_avg_times(old_state,
+ drm_self_refresh_helper_update_avg_times(state,
(unsigned long)commit_time_ms,
new_self_refresh_mask);
- drm_atomic_helper_commit_cleanup_done(old_state);
+ drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_put(old_state);
+ drm_atomic_state_put(state);
}
static void commit_work(struct work_struct *work)
@@ -1928,7 +2090,7 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
return -EBUSY;
}
- ret = funcs->atomic_async_check(plane, state);
+ ret = funcs->atomic_async_check(plane, state, false);
if (ret != 0)
drm_dbg_atomic(dev,
"[PLANE:%d:%s] driver async check failed\n",
@@ -2385,17 +2547,17 @@ EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function waits for all preceding commits that touch the same CRTC as
- * @old_state to both be committed to the hardware (as signalled by
+ * @state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -2406,7 +2568,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
int i;
long ret;
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
ret = drm_crtc_commit_wait(old_crtc_state->commit);
if (ret)
drm_err(crtc->dev,
@@ -2414,7 +2576,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
crtc->base.id, crtc->name);
}
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(state, conn, old_conn_state, i) {
ret = drm_crtc_commit_wait(old_conn_state->commit);
if (ret)
drm_err(conn->dev,
@@ -2422,7 +2584,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
conn->base.id, conn->name);
}
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
ret = drm_crtc_commit_wait(old_plane_state->commit);
if (ret)
drm_err(plane->dev,
@@ -2434,7 +2596,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
* drm_atomic_helper_fake_vblank - fake VBLANK events if needed
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function walks all CRTCs and fakes VBLANK events on those with
* &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
@@ -2450,32 +2612,32 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
unsigned long flags;
if (!new_crtc_state->no_vblank)
continue;
- spin_lock_irqsave(&old_state->dev->event_lock, flags);
+ spin_lock_irqsave(&state->dev->event_lock, flags);
if (new_crtc_state->event) {
drm_crtc_send_vblank_event(crtc,
new_crtc_state->event);
new_crtc_state->event = NULL;
}
- spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
+ spin_unlock_irqrestore(&state->dev->event_lock, flags);
}
}
EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function is used to signal completion of the hardware commit step. After
* this step the driver is not allowed to read or change any permanent software
@@ -2488,14 +2650,14 @@ EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
commit = new_crtc_state->commit;
if (!commit)
continue;
@@ -2515,32 +2677,32 @@ void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
complete_all(&commit->hw_done);
}
- if (old_state->fake_commit) {
- complete_all(&old_state->fake_commit->hw_done);
- complete_all(&old_state->fake_commit->flip_done);
+ if (state->fake_commit) {
+ complete_all(&state->fake_commit->hw_done);
+ complete_all(&state->fake_commit->flip_done);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
/**
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
- * This signals completion of the atomic update @old_state, including any
+ * This signals completion of the atomic update @state, including any
* cleanup work. If used, it must be called right before calling
* drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
commit = old_crtc_state->commit;
if (WARN_ON(!commit))
continue;
@@ -2553,9 +2715,9 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
spin_unlock(&crtc->commit_lock);
}
- if (old_state->fake_commit) {
- complete_all(&old_state->fake_commit->cleanup_done);
- WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
+ if (state->fake_commit) {
+ complete_all(&state->fake_commit->cleanup_done);
+ WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done));
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
@@ -2693,7 +2855,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
/**
* drm_atomic_helper_commit_planes - commit plane state
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
* @flags: flags for committing plane state
*
* This function commits the new plane state using the plane and atomic helper
@@ -2701,7 +2863,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
* been pushed into the relevant object state pointers, since this step can no
* longer fail.
*
- * It still requires the global state object @old_state to know which planes and
+ * It still requires the global state object @state to know which planes and
* crtcs need to be updated though.
*
* Note that this function does all plane updates across all CRTCs in one step.
@@ -2732,7 +2894,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
* This should not be copied blindly by drivers.
*/
void drm_atomic_helper_commit_planes(struct drm_device *dev,
- struct drm_atomic_state *old_state,
+ struct drm_atomic_state *state,
uint32_t flags)
{
struct drm_crtc *crtc;
@@ -2743,7 +2905,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -2754,10 +2916,10 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (active_only && !new_crtc_state->active)
continue;
- funcs->atomic_begin(crtc, old_state);
+ funcs->atomic_begin(crtc, state);
}
- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
bool disabling;
@@ -2795,18 +2957,18 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
no_disable)
continue;
- funcs->atomic_disable(plane, old_state);
+ funcs->atomic_disable(plane, state);
} else if (new_plane_state->crtc || disabling) {
- funcs->atomic_update(plane, old_state);
+ funcs->atomic_update(plane, state);
if (!disabling && funcs->atomic_enable) {
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
- funcs->atomic_enable(plane, old_state);
+ funcs->atomic_enable(plane, state);
}
}
}
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -2817,14 +2979,14 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (active_only && !new_crtc_state->active)
continue;
- funcs->atomic_flush(crtc, old_state);
+ funcs->atomic_flush(crtc, state);
}
/*
* Signal end of framebuffer access here before hw_done. After hw_done,
* a later commit might have already released the plane state.
*/
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
@@ -2951,10 +3113,10 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
/**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function cleans up plane state, specifically framebuffers, from the old
- * configuration. Hence the old configuration must be perserved in @old_state to
+ * configuration. Hence the old configuration must be perserved in @state to
* be able to call this function.
*
* This function may not be called on the new state when the atomic update
@@ -2962,13 +3124,13 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
* drm_atomic_helper_unprepare_planes() in this case.
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
int i;
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->cleanup_fb)
@@ -3022,6 +3184,8 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_colorop *colorop;
+ struct drm_colorop_state *old_colorop_state, *new_colorop_state;
struct drm_crtc_commit *commit;
struct drm_private_obj *obj;
struct drm_private_state *old_obj_state, *new_obj_state;
@@ -3076,7 +3240,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_conn_state->state = state;
new_conn_state->state = NULL;
- state->connectors[i].state = old_conn_state;
+ state->connectors[i].state_to_destroy = old_conn_state;
connector->state = new_conn_state;
}
@@ -3086,7 +3250,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_crtc_state->state = state;
new_crtc_state->state = NULL;
- state->crtcs[i].state = old_crtc_state;
+ state->crtcs[i].state_to_destroy = old_crtc_state;
crtc->state = new_crtc_state;
if (new_crtc_state->commit) {
@@ -3099,6 +3263,16 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
}
}
+ for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) {
+ WARN_ON(colorop->state != old_colorop_state);
+
+ old_colorop_state->state = state;
+ new_colorop_state->state = NULL;
+
+ state->colorops[i].state = old_colorop_state;
+ colorop->state = new_colorop_state;
+ }
+
drm_panic_lock(state->dev, flags);
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
WARN_ON(plane->state != old_plane_state);
@@ -3106,7 +3280,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_plane_state->state = state;
new_plane_state->state = NULL;
- state->planes[i].state = old_plane_state;
+ state->planes[i].state_to_destroy = old_plane_state;
plane->state = new_plane_state;
}
drm_panic_unlock(state->dev, flags);
@@ -3117,7 +3291,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_obj_state->state = state;
new_obj_state->state = NULL;
- state->private_objs[i].state = old_obj_state;
+ state->private_objs[i].state_to_destroy = old_obj_state;
obj->state = new_obj_state;
}
@@ -3363,6 +3537,54 @@ free:
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
/**
+ * drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC
+ * @crtc: DRM CRTC
+ * @ctx: lock acquisition context
+ *
+ * Reset the active outputs by indicating that connectors have changed.
+ * This implies a reset of all active components available between the CRTC and
+ * connectors.
+ *
+ * A variant of this function exists with
+ * drm_bridge_helper_reset_crtc(), dedicated to bridges.
+ *
+ * NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
+ * For drivers which optimize out unnecessary modesets this will result in
+ * a no-op commit, achieving nothing.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+out:
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_reset_crtc);
+
+/**
* drm_atomic_helper_shutdown - shutdown all CRTC
* @dev: DRM device
*
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 519228eb1095..cee6d8fc44ad 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
@@ -267,6 +268,11 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
plane_state->color_range = val;
}
+ if (plane->color_pipeline_property) {
+ /* default is always NULL, i.e., bypass */
+ plane_state->color_pipeline = NULL;
+ }
+
if (plane->zpos_property) {
if (!drm_object_property_get_default_value(&plane->base,
plane->zpos_property,
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 370dc676e3aa..7320db4b8489 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -27,14 +27,17 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_writeback.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_colorop.h>
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/uaccess.h>
#include <linux/sync_file.h>
@@ -256,6 +259,34 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
/**
+ * drm_atomic_set_colorop_for_plane - set colorop for plane
+ * @plane_state: atomic state object for the plane
+ * @colorop: colorop to use for the plane
+ *
+ * Helper function to select the color pipeline on a plane by setting
+ * it to the first drm_colorop element of the pipeline.
+ */
+void
+drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_plane *plane = plane_state->plane;
+
+ if (colorop)
+ drm_dbg_atomic(plane->dev,
+ "Set [COLOROP:%d] for [PLANE:%d:%s] state %p\n",
+ colorop->base.id, plane->base.id, plane->name,
+ plane_state);
+ else
+ drm_dbg_atomic(plane->dev,
+ "Set [NOCOLOROP] for [PLANE:%d:%s] state %p\n",
+ plane->base.id, plane->name, plane_state);
+
+ plane_state->color_pipeline = colorop;
+}
+EXPORT_SYMBOL(drm_atomic_set_colorop_for_plane);
+
+/**
* drm_atomic_set_crtc_for_connector - set CRTC for connector
* @conn_state: atomic state object for the connector
* @crtc: CRTC to use for the connector
@@ -417,6 +448,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (property == crtc->scaling_filter_property) {
state->scaling_filter = val;
+ } else if (property == crtc->sharpness_strength_property) {
+ state->sharpness_strength = val;
} else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
} else {
@@ -454,6 +487,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = 0;
else if (property == crtc->scaling_filter_property)
*val = state->scaling_filter;
+ else if (property == crtc->sharpness_strength_property)
+ *val = state->sharpness_strength;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else {
@@ -538,6 +573,16 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->color_encoding = val;
} else if (property == plane->color_range_property) {
state->color_range = val;
+ } else if (property == plane->color_pipeline_property) {
+ /* find DRM colorop object */
+ struct drm_colorop *colorop = NULL;
+
+ colorop = drm_colorop_find(dev, file_priv, val);
+
+ if (val && !colorop)
+ return -EACCES;
+
+ drm_atomic_set_colorop_for_plane(state, colorop);
} else if (property == config->prop_fb_damage_clips) {
ret = drm_property_replace_blob_from_id(dev,
&state->fb_damage_clips,
@@ -620,6 +665,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->color_encoding;
} else if (property == plane->color_range_property) {
*val = state->color_range;
+ } else if (property == plane->color_pipeline_property) {
+ *val = (state->color_pipeline) ? state->color_pipeline->base.id : 0;
} else if (property == config->prop_fb_damage_clips) {
*val = (state->fb_damage_clips) ?
state->fb_damage_clips->base.id : 0;
@@ -642,6 +689,96 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
+static int drm_atomic_color_set_data_property(struct drm_colorop *colorop,
+ struct drm_colorop_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ ssize_t elem_size = -1;
+ ssize_t size = -1;
+ bool replaced = false;
+
+ switch (colorop->type) {
+ case DRM_COLOROP_1D_LUT:
+ size = colorop->size * sizeof(struct drm_color_lut32);
+ break;
+ case DRM_COLOROP_CTM_3X4:
+ size = sizeof(struct drm_color_ctm_3x4);
+ break;
+ case DRM_COLOROP_3D_LUT:
+ size = colorop->size * colorop->size * colorop->size *
+ sizeof(struct drm_color_lut32);
+ break;
+ default:
+ /* should never get here */
+ return -EINVAL;
+ }
+
+ return drm_property_replace_blob_from_id(colorop->dev,
+ &state->data,
+ val,
+ size,
+ elem_size,
+ &replaced);
+}
+
+static int drm_atomic_colorop_set_property(struct drm_colorop *colorop,
+ struct drm_colorop_state *state,
+ struct drm_file *file_priv,
+ struct drm_property *property,
+ uint64_t val)
+{
+ if (property == colorop->bypass_property) {
+ state->bypass = val;
+ } else if (property == colorop->lut1d_interpolation_property) {
+ colorop->lut1d_interpolation = val;
+ } else if (property == colorop->curve_1d_type_property) {
+ state->curve_1d_type = val;
+ } else if (property == colorop->multiplier_property) {
+ state->multiplier = val;
+ } else if (property == colorop->lut3d_interpolation_property) {
+ colorop->lut3d_interpolation = val;
+ } else if (property == colorop->data_property) {
+ return drm_atomic_color_set_data_property(colorop, state,
+ property, val);
+ } else {
+ drm_dbg_atomic(colorop->dev,
+ "[COLOROP:%d:%d] unknown property [PROP:%d:%s]\n",
+ colorop->base.id, colorop->type,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+drm_atomic_colorop_get_property(struct drm_colorop *colorop,
+ const struct drm_colorop_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ if (property == colorop->type_property)
+ *val = colorop->type;
+ else if (property == colorop->bypass_property)
+ *val = state->bypass;
+ else if (property == colorop->lut1d_interpolation_property)
+ *val = colorop->lut1d_interpolation;
+ else if (property == colorop->curve_1d_type_property)
+ *val = state->curve_1d_type;
+ else if (property == colorop->multiplier_property)
+ *val = state->multiplier;
+ else if (property == colorop->size_property)
+ *val = colorop->size;
+ else if (property == colorop->lut3d_interpolation_property)
+ *val = colorop->lut3d_interpolation;
+ else if (property == colorop->data_property)
+ *val = (state->data) ? state->data->base.id : 0;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
static int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
@@ -908,6 +1045,15 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
plane->state, property, val);
break;
}
+ case DRM_MODE_OBJECT_COLOROP: {
+ struct drm_colorop *colorop = obj_to_colorop(obj);
+
+ if (colorop->plane)
+ WARN_ON(!drm_modeset_is_locked(&colorop->plane->mutex));
+
+ ret = drm_atomic_colorop_get_property(colorop, colorop->state, property, val);
+ break;
+ }
default:
drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id);
ret = -EINVAL;
@@ -956,6 +1102,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
+
+ if (connector->dpms == mode)
+ goto out;
+
connector->dpms = mode;
crtc = connector->state->crtc;
@@ -1063,6 +1213,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
struct drm_mode_config *config = &plane->dev->mode_config;
+ const struct drm_plane_helper_funcs *plane_funcs = plane->helper_private;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -1070,20 +1221,51 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
- if (async_flip &&
- (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY ||
- (prop != config->prop_fb_id &&
- prop != config->prop_in_fence_fd &&
- prop != config->prop_fb_damage_clips))) {
+ if (async_flip) {
+ /* no-op changes are always allowed */
ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- break;
+
+ /* fail everything that isn't no-op or a pure flip */
+ if (ret && prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips) {
+ break;
+ }
+
+ if (ret && plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ /* ask the driver if this non-primary plane is supported */
+ if (plane_funcs && plane_funcs->atomic_async_check)
+ ret = plane_funcs->atomic_async_check(plane, state, true);
+
+ if (ret) {
+ drm_dbg_atomic(prop->dev,
+ "[PLANE:%d:%s] does not support async flips\n",
+ obj->id, plane->name);
+ break;
+ }
+ }
}
ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv,
prop, prop_value);
+
+ break;
+ }
+ case DRM_MODE_OBJECT_COLOROP: {
+ struct drm_colorop *colorop = obj_to_colorop(obj);
+ struct drm_colorop_state *colorop_state;
+
+ colorop_state = drm_atomic_get_colorop_state(state, colorop);
+ if (IS_ERR(colorop_state)) {
+ ret = PTR_ERR(colorop_state);
+ break;
+ }
+
+ ret = drm_atomic_colorop_set_property(colorop, colorop_state,
+ file_priv, prop, prop_value);
break;
}
default:
@@ -1423,6 +1605,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state->acquire_ctx = &ctx;
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
+ state->plane_color_pipeline = file_priv->plane_color_pipeline;
retry:
copied_objs = 0;
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 22aa015df387..a2556d16bed6 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -28,6 +28,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_auth.h>
@@ -95,7 +96,7 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
struct drm_auth *auth = data;
int ret = 0;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!file_priv->magic) {
ret = idr_alloc(&file_priv->master->magic_map, file_priv,
1, 0, GFP_KERNEL);
@@ -103,7 +104,6 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->magic = ret;
}
auth->magic = file_priv->magic;
- mutex_unlock(&dev->master_mutex);
drm_dbg_core(dev, "%u\n", auth->magic);
@@ -118,13 +118,12 @@ int drm_authmagic(struct drm_device *dev, void *data,
drm_dbg_core(dev, "%u\n", auth->magic);
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
if (file) {
file->authenticated = 1;
idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
}
- mutex_unlock(&dev->master_mutex);
return file ? 0 : -EINVAL;
}
@@ -248,41 +247,33 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
if (drm_is_current_master_locked(file_priv))
- goto out_unlock;
+ return ret;
- if (dev->master) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (dev->master)
+ return -EBUSY;
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!file_priv->master)
+ return -EINVAL;
- if (!file_priv->is_master) {
- ret = drm_new_set_master(dev, file_priv);
- goto out_unlock;
- }
+ if (!file_priv->is_master)
+ return drm_new_set_master(dev, file_priv);
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to set lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_set_master(dev, file_priv, false);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -299,33 +290,27 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
- if (!drm_is_current_master_locked(file_priv)) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!drm_is_current_master_locked(file_priv))
+ return -EINVAL;
- if (!dev->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!dev->master)
+ return -EINVAL;
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to drop lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_drop_master(dev, file_priv);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -337,7 +322,7 @@ int drm_master_open(struct drm_file *file_priv)
/* if there is no current master make this fd it, but do not create
* any master object for render clients
*/
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!dev->master) {
ret = drm_new_set_master(dev, file_priv);
} else {
@@ -345,7 +330,6 @@ int drm_master_open(struct drm_file *file_priv)
file_priv->master = drm_master_get(dev->master);
spin_unlock(&file_priv->master_lookup_lock);
}
- mutex_unlock(&dev->master_mutex);
return ret;
}
@@ -355,7 +339,7 @@ void drm_master_release(struct drm_file *file_priv)
struct drm_device *dev = file_priv->minor->dev;
struct drm_master *master;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
@@ -376,7 +360,6 @@ out:
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
- mutex_unlock(&dev->master_mutex);
}
/**
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 6e74de833466..6852d73c931c 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -75,6 +75,12 @@
* the currently visible vertical area of the &drm_crtc.
* FB_ID:
* Mode object ID of the &drm_framebuffer this plane should scan out.
+ *
+ * When a KMS client is performing front-buffer rendering, it should set
+ * FB_ID to the same front-buffer FB on each atomic commit. This implies
+ * to the driver that it needs to re-read the same FB again. Otherwise
+ * drivers which do not employ continuously repeated scanout cycles might
+ * not update the screen.
* CRTC_ID:
* Mode object ID of the &drm_crtc this plane should be connected to.
*
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c6af46dd02bf..8f355df883d8 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -21,7 +21,9 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -195,18 +197,134 @@
* driver.
*/
+/* Protect bridge_list and bridge_lingering_list */
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
+static LIST_HEAD(bridge_lingering_list);
+
+static void __drm_bridge_free(struct kref *kref)
+{
+ struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+
+ mutex_lock(&bridge_lock);
+ list_del(&bridge->list);
+ mutex_unlock(&bridge_lock);
+
+ if (bridge->funcs->destroy)
+ bridge->funcs->destroy(bridge);
+
+ kfree(bridge->container);
+}
+
+/**
+ * drm_bridge_get - Acquire a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function increments the bridge's refcount.
+ *
+ * Returns:
+ * Pointer to @bridge.
+ */
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_get(&bridge->refcount);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drm_bridge_get);
/**
- * drm_bridge_add - add the given bridge to the global bridge list
+ * drm_bridge_put - Release a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function decrements the bridge's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_bridge_put(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_put(&bridge->refcount, __drm_bridge_free);
+}
+EXPORT_SYMBOL(drm_bridge_put);
+
+/**
+ * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_bridge, cast to a void pointer
+ *
+ * Wrapper of drm_bridge_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_bridge_put_void(void *data)
+{
+ struct drm_bridge *bridge = (struct drm_bridge *)data;
+
+ drm_bridge_put(bridge);
+}
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs)
+{
+ void *container;
+ struct drm_bridge *bridge;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ bridge = container + offset;
+ INIT_LIST_HEAD(&bridge->list);
+ bridge->container = container;
+ bridge->funcs = funcs;
+ kref_init(&bridge->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
+ if (err)
+ return ERR_PTR(err);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_bridge_alloc);
+
+/**
+ * drm_bridge_add - register a bridge
*
* @bridge: bridge control structure
+ *
+ * Add the given bridge to the global list of bridges, where they can be
+ * found by users via of_drm_find_bridge().
+ *
+ * The bridge to be added must have been allocated by
+ * devm_drm_bridge_alloc().
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
+ if (!bridge->container)
+ DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
+
+ drm_bridge_get(bridge);
+
+ /*
+ * If the bridge was previously added and then removed, it is now
+ * in bridge_lingering_list. Remove it or bridge_lingering_list will be
+ * corrupted when adding this bridge to bridge_list below.
+ */
+ if (!list_empty(&bridge->list))
+ list_del_init(&bridge->list);
+
mutex_init(&bridge->hpd_mutex);
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
+ BIT(HDMI_COLORSPACE_YUV420));
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
@@ -237,17 +355,24 @@ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
EXPORT_SYMBOL(devm_drm_bridge_add);
/**
- * drm_bridge_remove - remove the given bridge from the global bridge list
+ * drm_bridge_remove - unregister a bridge
*
* @bridge: bridge control structure
+ *
+ * Remove the given bridge from the global list of registered bridges, so
+ * it won't be found by users via of_drm_find_bridge(), and add it to the
+ * lingering bridge list, to keep track of it until its allocated memory is
+ * eventually freed.
*/
void drm_bridge_remove(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
- list_del_init(&bridge->list);
+ list_move_tail(&bridge->list, &bridge_lingering_list);
mutex_unlock(&bridge_lock);
mutex_destroy(&bridge->hpd_mutex);
+
+ drm_bridge_put(bridge);
}
EXPORT_SYMBOL(drm_bridge_remove);
@@ -276,6 +401,11 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
};
+static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
+{
+ return bridge->funcs->atomic_reset != NULL;
+}
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
@@ -292,6 +422,9 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
* If non-NULL the previous bridge must be already attached by a call to this
* function.
*
+ * The bridge to be attached must have been previously added by
+ * drm_bridge_add().
+ *
* Note that bridges attached to encoders are auto-detached during encoder
* cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
* *not* be balanced with a drm_bridge_detach() in driver code.
@@ -308,11 +441,23 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
if (!encoder || !bridge)
return -EINVAL;
- if (previous && (!previous->dev || previous->encoder != encoder))
- return -EINVAL;
+ if (!bridge->container)
+ DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
- if (bridge->dev)
- return -EBUSY;
+ if (list_empty(&bridge->list))
+ DRM_WARN("Missing drm_bridge_add() before attach\n");
+
+ drm_bridge_get(bridge);
+
+ if (previous && (!previous->dev || previous->encoder != encoder)) {
+ ret = -EINVAL;
+ goto err_put_bridge;
+ }
+
+ if (bridge->dev) {
+ ret = -EBUSY;
+ goto err_put_bridge;
+ }
bridge->dev = encoder->dev;
bridge->encoder = encoder;
@@ -323,12 +468,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge, flags);
+ ret = bridge->funcs->attach(bridge, encoder, flags);
if (ret < 0)
goto err_reset_bridge;
}
- if (bridge->funcs->atomic_reset) {
+ if (drm_bridge_is_atomic(bridge)) {
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
@@ -361,6 +506,8 @@ err_reset_bridge:
"failed to attach bridge %pOF to encoder %s\n",
bridge->of_node, encoder->name);
+err_put_bridge:
+ drm_bridge_put(bridge);
return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -373,7 +520,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
- if (bridge->funcs->atomic_reset)
+ if (drm_bridge_is_atomic(bridge))
drm_atomic_private_obj_fini(&bridge->base);
if (bridge->funcs->detach)
@@ -381,6 +528,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
list_del(&bridge->chain_node);
bridge->dev = NULL;
+ drm_bridge_put(bridge);
}
/**
@@ -546,7 +694,7 @@ EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_disable (falls back on
* &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
@@ -556,7 +704,7 @@ EXPORT_SYMBOL(drm_bridge_chain_mode_set);
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *iter;
@@ -567,15 +715,7 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
if (iter->funcs->atomic_disable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- iter);
- if (WARN_ON(!old_bridge_state))
- return;
-
- iter->funcs->atomic_disable(iter, old_bridge_state);
+ iter->funcs->atomic_disable(iter, state);
} else if (iter->funcs->disable) {
iter->funcs->disable(iter);
}
@@ -587,29 +727,19 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- if (old_state && bridge->funcs->atomic_post_disable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_post_disable(bridge,
- old_bridge_state);
- } else if (bridge->funcs->post_disable) {
+ if (state && bridge->funcs->atomic_post_disable)
+ bridge->funcs->atomic_post_disable(bridge, state);
+ else if (bridge->funcs->post_disable)
bridge->funcs->post_disable(bridge);
- }
}
/**
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
* in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_post_disable (falls back on
* &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
@@ -630,7 +760,7 @@ static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *next, *limit;
@@ -677,12 +807,12 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
break;
drm_atomic_bridge_call_post_disable(next,
- old_state);
+ state);
}
}
}
- drm_atomic_bridge_call_post_disable(bridge, old_state);
+ drm_atomic_bridge_call_post_disable(bridge, state);
if (limit)
/* Jump all bridges that we have already post_disabled */
@@ -692,28 +822,19 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- if (old_state && bridge->funcs->atomic_pre_enable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
- } else if (bridge->funcs->pre_enable) {
+ if (state && bridge->funcs->atomic_pre_enable)
+ bridge->funcs->atomic_pre_enable(bridge, state);
+ else if (bridge->funcs->pre_enable)
bridge->funcs->pre_enable(bridge);
- }
}
/**
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
* the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
* &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
@@ -733,7 +854,7 @@ static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *iter, *next, *limit;
@@ -772,11 +893,11 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
*/
break;
- drm_atomic_bridge_call_pre_enable(next, old_state);
+ drm_atomic_bridge_call_pre_enable(next, state);
}
}
- drm_atomic_bridge_call_pre_enable(iter, old_state);
+ drm_atomic_bridge_call_pre_enable(iter, state);
if (iter->pre_enable_prev_first)
/* Jump all bridges that we have already pre_enabled */
@@ -791,7 +912,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
/**
* drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_enable (falls back on
* &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
@@ -801,7 +922,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
@@ -811,15 +932,7 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
if (bridge->funcs->atomic_enable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_enable(bridge, old_bridge_state);
+ bridge->funcs->atomic_enable(bridge, state);
} else if (bridge->funcs->enable) {
bridge->funcs->enable(bridge);
}
@@ -861,11 +974,11 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
{
unsigned int i, num_in_bus_fmts = 0;
struct drm_bridge_state *cur_state;
- struct drm_bridge *prev_bridge;
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) =
+ drm_bridge_get_prev_bridge(cur_bridge);
u32 *in_bus_fmts;
int ret;
- prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
cur_bridge);
@@ -982,12 +1095,12 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
struct drm_encoder *encoder = bridge->encoder;
struct drm_bridge_state *last_bridge_state;
unsigned int i, num_out_bus_fmts = 0;
- struct drm_bridge *last_bridge;
u32 *out_bus_fmts;
int ret = 0;
- last_bridge = list_last_entry(&encoder->bridge_chain,
- struct drm_bridge, chain_node);
+ struct drm_bridge *last_bridge __free(drm_bridge_put) =
+ drm_bridge_get(list_last_entry(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
last_bridge);
@@ -1041,7 +1154,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct drm_bridge_state *bridge_state, *next_bridge_state;
- struct drm_bridge *next_bridge;
u32 output_flags = 0;
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
@@ -1050,7 +1162,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
if (!bridge_state)
return;
- next_bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
/*
* Let's try to apply the most common case here, that is, propagate
@@ -1147,6 +1259,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
/**
* drm_bridge_detect - check if anything is attached to the bridge output
* @bridge: bridge control structure
+ * @connector: attached connector
*
* If the bridge supports output detection, as reported by the
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
@@ -1157,12 +1270,13 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
* The detection status on success, or connector_status_unknown if the bridge
* doesn't support output detection.
*/
-enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+enum drm_connector_status
+drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
return connector_status_unknown;
- return bridge->funcs->detect(bridge);
+ return bridge->funcs->detect(bridge, connector);
}
EXPORT_SYMBOL_GPL(drm_bridge_detect);
@@ -1331,6 +1445,100 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+/**
+ * devm_drm_put_bridge - Release a bridge reference obtained via devm
+ * @dev: device that got the bridge via devm
+ * @bridge: pointer to a struct drm_bridge obtained via devm
+ *
+ * Same as drm_bridge_put() for bridge pointers obtained via devm functions
+ * such as devm_drm_bridge_alloc().
+ *
+ * This function is a temporary workaround and MUST NOT be used. Manual
+ * handling of bridge lifetime is inherently unsafe.
+ */
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
+{
+ devm_release_action(dev, drm_bridge_put_void, bridge);
+}
+EXPORT_SYMBOL(devm_drm_put_bridge);
+
+static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
+ struct drm_bridge *bridge,
+ unsigned int idx,
+ bool lingering)
+{
+ drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+
+ drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount),
+ lingering ? " [lingering]" : "");
+
+ drm_printf(p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+
+ /* The OF node could be freed after drm_bridge_remove() */
+ if (bridge->of_node && !lingering)
+ drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
+
+ drm_printf(p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(p, " modes");
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ drm_puts(p, " hdmi");
+ drm_puts(p, "\n");
+}
+
+static int allbridges_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ mutex_lock(&bridge_lock);
+
+ list_for_each_entry(bridge, &bridge_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
+
+ list_for_each_entry(bridge, &bridge_lingering_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true);
+
+ mutex_unlock(&bridge_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(allbridges);
+
+static int encoder_bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
+
+void drm_bridge_debugfs_params(struct dentry *root)
+{
+ debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
+}
+
+void drm_bridge_debugfs_encoder_params(struct dentry *root,
+ struct drm_encoder *encoder)
+{
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
+}
+
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
new file mode 100644
index 000000000000..420f29cf3e54
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/export.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_modeset_lock.h>
+
+/**
+ * drm_bridge_helper_reset_crtc - Reset the pipeline feeding a bridge
+ * @bridge: DRM bridge to reset
+ * @ctx: lock acquisition context
+ *
+ * Reset a @bridge pipeline. It will power-cycle all active components
+ * between the CRTC and connector that bridge is connected to.
+ *
+ * As it relies on drm_atomic_helper_reset_crtc(), the same limitations
+ * apply.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. If the error
+ * returned is EDEADLK, the whole atomic sequence must be restarted.
+ */
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector *connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ connector = drm_atomic_get_connector_for_encoder(encoder, ctx);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ goto out;
+ }
+
+ if (!connector->state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = connector->state->crtc;
+ ret = drm_atomic_helper_reset_crtc(crtc, ctx);
+ if (ret)
+ goto out;
+
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_helper_reset_crtc);
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 103c185bb1c8..2f279b46bd2c 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -3,14 +3,27 @@
* Copyright © 2021 Intel Corporation
*/
+#include <kunit/test-bug.h>
+
+#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <drm/drm_buddy.h>
+#include <drm/drm_print.h>
+
+enum drm_buddy_free_tree {
+ DRM_BUDDY_CLEAR_TREE = 0,
+ DRM_BUDDY_DIRTY_TREE,
+ DRM_BUDDY_MAX_FREE_TREES,
+};
static struct kmem_cache *slab_blocks;
+#define for_each_free_tree(tree) \
+ for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++)
+
static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
struct drm_buddy_block *parent,
unsigned int order,
@@ -28,6 +41,8 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
block->header |= order;
block->parent = parent;
+ RB_CLEAR_NODE(&block->rb);
+
BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
return block;
}
@@ -38,23 +53,64 @@ static void drm_block_free(struct drm_buddy *mm,
kmem_cache_free(slab_blocks, block);
}
-static void list_insert_sorted(struct drm_buddy *mm,
- struct drm_buddy_block *block)
+static enum drm_buddy_free_tree
+get_block_tree(struct drm_buddy_block *block)
{
- struct drm_buddy_block *node;
- struct list_head *head;
+ return drm_buddy_block_is_clear(block) ?
+ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+}
- head = &mm->free_list[drm_buddy_block_order(block)];
- if (list_empty(head)) {
- list_add(&block->link, head);
- return;
- }
+static struct drm_buddy_block *
+rbtree_get_free_block(const struct rb_node *node)
+{
+ return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
+}
- list_for_each_entry(node, head, link)
- if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
- break;
+static struct drm_buddy_block *
+rbtree_last_free_block(struct rb_root *root)
+{
+ return rbtree_get_free_block(rb_last(root));
+}
+
+static bool rbtree_is_empty(struct rb_root *root)
+{
+ return RB_EMPTY_ROOT(root);
+}
+
+static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
+ const struct drm_buddy_block *node)
+{
+ return drm_buddy_block_offset(block) < drm_buddy_block_offset(node);
+}
+
+static bool rbtree_block_offset_less(struct rb_node *block,
+ const struct rb_node *node)
+{
+ return drm_buddy_block_offset_less(rbtree_get_free_block(block),
+ rbtree_get_free_block(node));
+}
+
+static void rbtree_insert(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ enum drm_buddy_free_tree tree)
+{
+ rb_add(&block->rb,
+ &mm->free_trees[tree][drm_buddy_block_order(block)],
+ rbtree_block_offset_less);
+}
+
+static void rbtree_remove(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ unsigned int order = drm_buddy_block_order(block);
+ enum drm_buddy_free_tree tree;
+ struct rb_root *root;
+
+ tree = get_block_tree(block);
+ root = &mm->free_trees[tree][order];
- __list_add(&block->link, node->link.prev, &node->link);
+ rb_erase(&block->rb, root);
+ RB_CLEAR_NODE(&block->rb);
}
static void clear_reset(struct drm_buddy_block *block)
@@ -67,29 +123,34 @@ static void mark_cleared(struct drm_buddy_block *block)
block->header |= DRM_BUDDY_HEADER_CLEAR;
}
-static void mark_allocated(struct drm_buddy_block *block)
+static void mark_allocated(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_ALLOCATED;
- list_del(&block->link);
+ rbtree_remove(mm, block);
}
static void mark_free(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
+ enum drm_buddy_free_tree tree;
+
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
- list_insert_sorted(mm, block);
+ tree = get_block_tree(block);
+ rbtree_insert(mm, block, tree);
}
-static void mark_split(struct drm_buddy_block *block)
+static void mark_split(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_SPLIT;
- list_del(&block->link);
+ rbtree_remove(mm, block);
}
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
@@ -145,7 +206,7 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm,
mark_cleared(parent);
}
- list_del(&buddy->link);
+ rbtree_remove(mm, buddy);
if (force_merge && drm_buddy_block_is_clear(buddy))
mm->clear_avail -= drm_buddy_block_size(mm, buddy);
@@ -166,7 +227,7 @@ static int __force_merge(struct drm_buddy *mm,
u64 end,
unsigned int min_order)
{
- unsigned int order;
+ unsigned int tree, order;
int i;
if (!min_order)
@@ -175,44 +236,48 @@ static int __force_merge(struct drm_buddy *mm,
if (min_order > mm->max_order)
return -EINVAL;
- for (i = min_order - 1; i >= 0; i--) {
- struct drm_buddy_block *block, *prev;
+ for_each_free_tree(tree) {
+ for (i = min_order - 1; i >= 0; i--) {
+ struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
- list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
- struct drm_buddy_block *buddy;
- u64 block_start, block_end;
+ while (iter) {
+ struct drm_buddy_block *block, *buddy;
+ u64 block_start, block_end;
- if (!block->parent)
- continue;
+ block = rbtree_get_free_block(iter);
+ iter = rb_prev(iter);
- block_start = drm_buddy_block_offset(block);
- block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+ if (!block || !block->parent)
+ continue;
- if (!contains(start, end, block_start, block_end))
- continue;
+ block_start = drm_buddy_block_offset(block);
+ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
- buddy = __get_buddy(block);
- if (!drm_buddy_block_is_free(buddy))
- continue;
+ if (!contains(start, end, block_start, block_end))
+ continue;
- WARN_ON(drm_buddy_block_is_clear(block) ==
- drm_buddy_block_is_clear(buddy));
+ buddy = __get_buddy(block);
+ if (!drm_buddy_block_is_free(buddy))
+ continue;
- /*
- * If the prev block is same as buddy, don't access the
- * block in the next iteration as we would free the
- * buddy block as part of the free function.
- */
- if (prev == buddy)
- prev = list_prev_entry(prev, link);
+ WARN_ON(drm_buddy_block_is_clear(block) ==
+ drm_buddy_block_is_clear(buddy));
- list_del(&block->link);
- if (drm_buddy_block_is_clear(block))
- mm->clear_avail -= drm_buddy_block_size(mm, block);
+ /*
+ * Advance to the next node when the current node is the buddy,
+ * as freeing the block will also remove its buddy from the tree.
+ */
+ if (iter == &buddy->rb)
+ iter = rb_prev(iter);
- order = __drm_buddy_free(mm, block, true);
- if (order >= min_order)
- return 0;
+ rbtree_remove(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+ order = __drm_buddy_free(mm, block, true);
+ if (order >= min_order)
+ return 0;
+ }
}
}
@@ -233,8 +298,8 @@ static int __force_merge(struct drm_buddy *mm,
*/
int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
{
- unsigned int i;
- u64 offset;
+ unsigned int i, j, root_count = 0;
+ u64 offset = 0;
if (size < chunk_size)
return -EINVAL;
@@ -255,14 +320,22 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
- mm->free_list = kmalloc_array(mm->max_order + 1,
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!mm->free_list)
+ mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES,
+ sizeof(*mm->free_trees),
+ GFP_KERNEL);
+ if (!mm->free_trees)
return -ENOMEM;
- for (i = 0; i <= mm->max_order; ++i)
- INIT_LIST_HEAD(&mm->free_list[i]);
+ for_each_free_tree(i) {
+ mm->free_trees[i] = kmalloc_array(mm->max_order + 1,
+ sizeof(struct rb_root),
+ GFP_KERNEL);
+ if (!mm->free_trees[i])
+ goto out_free_tree;
+
+ for (j = 0; j <= mm->max_order; ++j)
+ mm->free_trees[i][j] = RB_ROOT;
+ }
mm->n_roots = hweight64(size);
@@ -270,10 +343,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
sizeof(struct drm_buddy_block *),
GFP_KERNEL);
if (!mm->roots)
- goto out_free_list;
-
- offset = 0;
- i = 0;
+ goto out_free_tree;
/*
* Split into power-of-two blocks, in case we are given a size that is
@@ -293,24 +363,26 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
mark_free(mm, root);
- BUG_ON(i > mm->max_order);
+ BUG_ON(root_count > mm->max_order);
BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
- mm->roots[i] = root;
+ mm->roots[root_count] = root;
offset += root_size;
size -= root_size;
- i++;
+ root_count++;
} while (size);
return 0;
out_free_roots:
- while (i--)
- drm_block_free(mm, mm->roots[i]);
+ while (root_count--)
+ drm_block_free(mm, mm->roots[root_count]);
kfree(mm->roots);
-out_free_list:
- kfree(mm->free_list);
+out_free_tree:
+ while (i--)
+ kfree(mm->free_trees[i]);
+ kfree(mm->free_trees);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buddy_init);
@@ -320,11 +392,11 @@ EXPORT_SYMBOL(drm_buddy_init);
*
* @mm: DRM buddy manager to free
*
- * Cleanup memory manager resources and the freelist
+ * Cleanup memory manager resources and the freetree
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
- u64 root_size, size;
+ u64 root_size, size, start;
unsigned int order;
int i;
@@ -332,9 +404,12 @@ void drm_buddy_fini(struct drm_buddy *mm)
for (i = 0; i < mm->n_roots; ++i) {
order = ilog2(size) - ilog2(mm->chunk_size);
- __force_merge(mm, 0, size, order);
+ start = drm_buddy_block_offset(mm->roots[i]);
+ __force_merge(mm, start, start + size, order);
+
+ if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i])))
+ kunit_fail_current_test("buddy_fini() root");
- WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
drm_block_free(mm, mm->roots[i]);
root_size = mm->chunk_size << order;
@@ -343,8 +418,9 @@ void drm_buddy_fini(struct drm_buddy *mm)
WARN_ON(mm->avail != mm->size);
+ for_each_free_tree(i)
+ kfree(mm->free_trees[i]);
kfree(mm->roots);
- kfree(mm->free_list);
}
EXPORT_SYMBOL(drm_buddy_fini);
@@ -368,8 +444,7 @@ static int split_block(struct drm_buddy *mm,
return -ENOMEM;
}
- mark_free(mm, block->left);
- mark_free(mm, block->right);
+ mark_split(mm, block);
if (drm_buddy_block_is_clear(block)) {
mark_cleared(block->left);
@@ -377,7 +452,8 @@ static int split_block(struct drm_buddy *mm,
clear_reset(block);
}
- mark_split(block);
+ mark_free(mm, block->left);
+ mark_free(mm, block->right);
return 0;
}
@@ -400,6 +476,55 @@ drm_get_buddy(struct drm_buddy_block *block)
EXPORT_SYMBOL(drm_get_buddy);
/**
+ * drm_buddy_reset_clear - reset blocks clear state
+ *
+ * @mm: DRM buddy manager
+ * @is_clear: blocks clear state
+ *
+ * Reset the clear state based on @is_clear value for each block
+ * in the freetree.
+ */
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+{
+ enum drm_buddy_free_tree src_tree, dst_tree;
+ u64 root_size, size, start;
+ unsigned int order;
+ int i;
+
+ size = mm->size;
+ for (i = 0; i < mm->n_roots; ++i) {
+ order = ilog2(size) - ilog2(mm->chunk_size);
+ start = drm_buddy_block_offset(mm->roots[i]);
+ __force_merge(mm, start, start + size, order);
+
+ root_size = mm->chunk_size << order;
+ size -= root_size;
+ }
+
+ src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
+ dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+
+ for (i = 0; i <= mm->max_order; ++i) {
+ struct rb_root *root = &mm->free_trees[src_tree][i];
+ struct drm_buddy_block *block, *tmp;
+
+ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
+ rbtree_remove(mm, block);
+ if (is_clear) {
+ mark_cleared(block);
+ mm->clear_avail += drm_buddy_block_size(mm, block);
+ } else {
+ clear_reset(block);
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+ }
+
+ rbtree_insert(mm, block, dst_tree);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_buddy_reset_clear);
+
+/**
* drm_buddy_free_block - free a block
*
* @mm: DRM buddy manager
@@ -583,23 +708,17 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm,
}
static struct drm_buddy_block *
-get_maxblock(struct drm_buddy *mm, unsigned int order,
- unsigned long flags)
+get_maxblock(struct drm_buddy *mm,
+ unsigned int order,
+ enum drm_buddy_free_tree tree)
{
struct drm_buddy_block *max_block = NULL, *block = NULL;
+ struct rb_root *root;
unsigned int i;
for (i = order; i <= mm->max_order; ++i) {
- struct drm_buddy_block *tmp_block;
-
- list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
- if (block_incompatible(tmp_block, flags))
- continue;
-
- block = tmp_block;
- break;
- }
-
+ root = &mm->free_trees[tree][i];
+ block = rbtree_last_free_block(root);
if (!block)
continue;
@@ -618,46 +737,44 @@ get_maxblock(struct drm_buddy *mm, unsigned int order,
}
static struct drm_buddy_block *
-alloc_from_freelist(struct drm_buddy *mm,
+alloc_from_freetree(struct drm_buddy *mm,
unsigned int order,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
+ struct rb_root *root;
+ enum drm_buddy_free_tree tree;
unsigned int tmp;
int err;
+ tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ?
+ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
- block = get_maxblock(mm, order, flags);
+ block = get_maxblock(mm, order, tree);
if (block)
/* Store the obtained block order */
tmp = drm_buddy_block_order(block);
} else {
for (tmp = order; tmp <= mm->max_order; ++tmp) {
- struct drm_buddy_block *tmp_block;
-
- list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
- if (block_incompatible(tmp_block, flags))
- continue;
-
- block = tmp_block;
- break;
- }
-
+ /* Get RB tree root for this order and tree */
+ root = &mm->free_trees[tree][tmp];
+ block = rbtree_last_free_block(root);
if (block)
break;
}
}
if (!block) {
- /* Fallback method */
+ /* Try allocating from the other tree */
+ tree = (tree == DRM_BUDDY_CLEAR_TREE) ?
+ DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
+
for (tmp = order; tmp <= mm->max_order; ++tmp) {
- if (!list_empty(&mm->free_list[tmp])) {
- block = list_last_entry(&mm->free_list[tmp],
- struct drm_buddy_block,
- link);
- if (block)
- break;
- }
+ root = &mm->free_trees[tree][tmp];
+ block = rbtree_last_free_block(root);
+ if (block)
+ break;
}
if (!block)
@@ -722,7 +839,7 @@ static int __alloc_range(struct drm_buddy *mm,
if (contains(start, end, block_start, block_end)) {
if (drm_buddy_block_is_free(block)) {
- mark_allocated(block);
+ mark_allocated(mm, block);
total_allocated += drm_buddy_block_size(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
@@ -800,10 +917,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
{
u64 rhs_offset, lhs_offset, lhs_size, filled;
struct drm_buddy_block *block;
- struct list_head *list;
+ unsigned int tree, order;
LIST_HEAD(blocks_lhs);
unsigned long pages;
- unsigned int order;
u64 modify_size;
int err;
@@ -813,35 +929,45 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
if (order == 0)
return -ENOSPC;
- list = &mm->free_list[order];
- if (list_empty(list))
- return -ENOSPC;
+ for_each_free_tree(tree) {
+ struct rb_root *root;
+ struct rb_node *iter;
- list_for_each_entry_reverse(block, list, link) {
- /* Allocate blocks traversing RHS */
- rhs_offset = drm_buddy_block_offset(block);
- err = __drm_buddy_alloc_range(mm, rhs_offset, size,
- &filled, blocks);
- if (!err || err != -ENOSPC)
- return err;
-
- lhs_size = max((size - filled), min_block_size);
- if (!IS_ALIGNED(lhs_size, min_block_size))
- lhs_size = round_up(lhs_size, min_block_size);
-
- /* Allocate blocks traversing LHS */
- lhs_offset = drm_buddy_block_offset(block) - lhs_size;
- err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
- NULL, &blocks_lhs);
- if (!err) {
- list_splice(&blocks_lhs, blocks);
- return 0;
- } else if (err != -ENOSPC) {
+ root = &mm->free_trees[tree][order];
+ if (rbtree_is_empty(root))
+ continue;
+
+ iter = rb_last(root);
+ while (iter) {
+ block = rbtree_get_free_block(iter);
+
+ /* Allocate blocks traversing RHS */
+ rhs_offset = drm_buddy_block_offset(block);
+ err = __drm_buddy_alloc_range(mm, rhs_offset, size,
+ &filled, blocks);
+ if (!err || err != -ENOSPC)
+ return err;
+
+ lhs_size = max((size - filled), min_block_size);
+ if (!IS_ALIGNED(lhs_size, min_block_size))
+ lhs_size = round_up(lhs_size, min_block_size);
+
+ /* Allocate blocks traversing LHS */
+ lhs_offset = drm_buddy_block_offset(block) - lhs_size;
+ err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
+ NULL, &blocks_lhs);
+ if (!err) {
+ list_splice(&blocks_lhs, blocks);
+ return 0;
+ } else if (err != -ENOSPC) {
+ drm_buddy_free_list_internal(mm, blocks);
+ return err;
+ }
+ /* Free blocks for the next iteration */
drm_buddy_free_list_internal(mm, blocks);
- return err;
+
+ iter = rb_prev(iter);
}
- /* Free blocks for the next iteration */
- drm_buddy_free_list_internal(mm, blocks);
}
return -ENOSPC;
@@ -927,7 +1053,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
list_add(&block->tmp_link, &dfs);
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
if (err) {
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -950,8 +1076,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
return __drm_buddy_alloc_range_bias(mm, start, end,
order, flags);
else
- /* Allocate from freelist */
- return alloc_from_freelist(mm, order, flags);
+ /* Allocate from freetree */
+ return alloc_from_freetree(mm, order, flags);
}
/**
@@ -968,8 +1094,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
* alloc_range_bias() called on range limitations, which traverses
* the tree and returns the desired block.
*
- * alloc_from_freelist() called when *no* range restrictions
- * are enforced, which picks the block from the freelist.
+ * alloc_from_freetree() called when *no* range restrictions
+ * are enforced, which picks the block from the freetree.
*
* Returns:
* 0 on success, error code on failure.
@@ -1071,7 +1197,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
}
} while (1);
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -1152,12 +1278,18 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
for (order = mm->max_order; order >= 0; order--) {
- struct drm_buddy_block *block;
+ struct drm_buddy_block *block, *tmp;
+ struct rb_root *root;
u64 count = 0, free;
+ unsigned int tree;
+
+ for_each_free_tree(tree) {
+ root = &mm->free_trees[tree][order];
- list_for_each_entry(block, &mm->free_list[order], link) {
- BUG_ON(!drm_buddy_block_is_free(block));
- count++;
+ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
+ BUG_ON(!drm_buddy_block_is_free(block));
+ count++;
+ }
}
drm_printf(p, "order-%2d ", order);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 7051c9c909c2..ea1d2d5d2c66 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -93,8 +93,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
return;
}
- if (wbinvd_on_all_cpus())
- pr_err("Timed out waiting for cache flush\n");
+ wbinvd_on_all_cpus();
#elif defined(__powerpc__)
unsigned long i;
@@ -139,8 +138,7 @@ drm_clflush_sg(struct sg_table *st)
return;
}
- if (wbinvd_on_all_cpus())
- pr_err("Timed out waiting for cache flush\n");
+ wbinvd_on_all_cpus();
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
@@ -172,8 +170,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
return;
}
- if (wbinvd_on_all_cpus())
- pr_err("Timed out waiting for cache flush\n");
+ wbinvd_on_all_cpus();
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 549b28a5918c..a82d741e6630 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -10,12 +11,14 @@
#include <linux/slab.h>
#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
@@ -167,29 +170,59 @@ void drm_client_release(struct drm_client_dev *client)
drm_client_modeset_free(client);
drm_client_close(client);
+
+ if (client->funcs && client->funcs->free)
+ client->funcs->free(client);
+
drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_client_release);
-static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+/**
+ * drm_client_buffer_delete - Delete a client buffer
+ * @buffer: DRM client buffer
+ */
+void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
- if (buffer->gem) {
- drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
- drm_gem_object_put(buffer->gem);
- }
+ struct drm_gem_object *gem;
+ int ret;
+
+ if (!buffer)
+ return;
+
+ gem = buffer->fb->obj[0];
+ drm_gem_vunmap(gem, &buffer->map);
+
+ ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
+ if (ret)
+ drm_err(buffer->client->dev,
+ "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+
+ drm_gem_object_put(buffer->gem);
kfree(buffer);
}
+EXPORT_SYMBOL(drm_client_buffer_delete);
static struct drm_client_buffer *
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
- u32 format, u32 *handle)
+ u32 format, u32 handle, u32 pitch)
{
- const struct drm_format_info *info = drm_format_info(format);
- struct drm_mode_create_dumb dumb_args = { };
+ struct drm_mode_fb_cmd2 fb_req = {
+ .width = width,
+ .height = height,
+ .pixel_format = format,
+ .handles = {
+ handle,
+ },
+ .pitches = {
+ pitch,
+ },
+ };
struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer;
struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -198,28 +231,38 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
buffer->client = client;
- dumb_args.width = width;
- dumb_args.height = height;
- dumb_args.bpp = drm_format_info_bpp(info, 0);
- ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
- if (ret)
- goto err_delete;
-
- obj = drm_gem_object_lookup(client->file, dumb_args.handle);
+ obj = drm_gem_object_lookup(client->file, handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
- buffer->pitch = dumb_args.pitch;
+ ret = drm_mode_addfb2(dev, &fb_req, client->file);
+ if (ret)
+ goto err_drm_gem_object_put;
+
+ fb = drm_framebuffer_lookup(dev, client->file, fb_req.fb_id);
+ if (drm_WARN_ON(dev, !fb)) {
+ ret = -ENOENT;
+ goto err_drm_mode_rmfb;
+ }
+
+ /* drop the reference we picked up in framebuffer lookup */
+ drm_framebuffer_put(fb);
+
+ strscpy(fb->comm, client->name, TASK_COMM_LEN);
+
buffer->gem = obj;
- *handle = dumb_args.handle;
+ buffer->fb = fb;
return buffer;
+err_drm_mode_rmfb:
+ drm_mode_rmfb(dev, fb_req.fb_id, client->file);
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
err_delete:
- drm_client_buffer_delete(buffer);
-
+ kfree(buffer);
return ERR_PTR(ret);
}
@@ -246,13 +289,13 @@ err_delete:
int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
- struct drm_gem_object *gem = buffer->gem;
+ struct drm_gem_object *gem = buffer->fb->obj[0];
struct iosys_map *map = &buffer->map;
int ret;
drm_gem_lock(gem);
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap_unlocked;
*map_copy = *map;
@@ -275,10 +318,10 @@ EXPORT_SYMBOL(drm_client_buffer_vmap_local);
*/
void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
{
- struct drm_gem_object *gem = buffer->gem;
+ struct drm_gem_object *gem = buffer->fb->obj[0];
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
@@ -303,34 +346,18 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int
-drm_client_buffer_vmap(struct drm_client_buffer *buffer,
- struct iosys_map *map_copy)
+int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
+ struct drm_gem_object *gem = buffer->fb->obj[0];
int ret;
- drm_gem_lock(gem);
-
- ret = drm_gem_pin_locked(gem);
+ ret = drm_gem_vmap(gem, &buffer->map);
if (ret)
- goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap(gem, map);
- if (ret)
- goto err_drm_gem_vmap;
-
- drm_gem_unlock(gem);
-
- *map_copy = *map;
+ return ret;
+ *map_copy = buffer->map;
return 0;
-
-err_drm_gem_vmap:
- drm_gem_unpin_locked(buffer->gem);
-err_drm_gem_pin_locked:
- drm_gem_unlock(gem);
- return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@@ -344,63 +371,14 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
+ struct drm_gem_object *gem = buffer->fb->obj[0];
- drm_gem_lock(gem);
- drm_gem_vunmap(gem, map);
- drm_gem_unpin_locked(gem);
- drm_gem_unlock(gem);
+ drm_gem_vunmap(gem, &buffer->map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
-static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
-{
- int ret;
-
- if (!buffer->fb)
- return;
-
- ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
- if (ret)
- drm_err(buffer->client->dev,
- "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
-
- buffer->fb = NULL;
-}
-
-static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
- u32 width, u32 height, u32 format,
- u32 handle)
-{
- struct drm_client_dev *client = buffer->client;
- struct drm_mode_fb_cmd2 fb_req = { };
- int ret;
-
- fb_req.width = width;
- fb_req.height = height;
- fb_req.pixel_format = format;
- fb_req.handles[0] = handle;
- fb_req.pitches[0] = buffer->pitch;
-
- ret = drm_mode_addfb2(client->dev, &fb_req, client->file);
- if (ret)
- return ret;
-
- buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id);
- if (WARN_ON(!buffer->fb))
- return -ENOENT;
-
- /* drop the reference we picked up in framebuffer lookup */
- drm_framebuffer_put(buffer->fb);
-
- strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN);
-
- return 0;
-}
-
/**
- * drm_client_framebuffer_create - Create a client framebuffer
+ * drm_client_buffer_create_dumb - Create a client buffer backed by a dumb buffer
* @client: DRM client
* @width: Framebuffer width
* @height: Framebuffer height
@@ -408,24 +386,33 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
*
* This function creates a &drm_client_buffer which consists of a
* &drm_framebuffer backed by a dumb buffer.
- * Call drm_client_framebuffer_delete() to free the buffer.
+ * Call drm_client_buffer_delete() to free the buffer.
*
* Returns:
* Pointer to a client buffer or an error pointer on failure.
*/
struct drm_client_buffer *
-drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
+ const struct drm_format_info *info = drm_format_info(format);
+ struct drm_device *dev = client->dev;
+ struct drm_mode_create_dumb dumb_args = { };
struct drm_client_buffer *buffer;
- u32 handle;
int ret;
- buffer = drm_client_buffer_create(client, width, height, format,
- &handle);
- if (IS_ERR(buffer))
- return buffer;
+ dumb_args.width = width;
+ dumb_args.height = height;
+ dumb_args.bpp = drm_format_info_bpp(info, 0);
+ ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
+ if (ret)
+ return ERR_PTR(ret);
- ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
+ buffer = drm_client_buffer_create(client, width, height, format,
+ dumb_args.handle, dumb_args.pitch);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
+ goto err_drm_mode_destroy_dumb;
+ }
/*
* The handle is only needed for creating the framebuffer, destroy it
@@ -433,34 +420,19 @@ drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 heig
* object as DMA-buf. The framebuffer and our buffer structure are still
* holding references to the GEM object to prevent its destruction.
*/
- drm_mode_destroy_dumb(client->dev, handle, client->file);
-
- if (ret) {
- drm_client_buffer_delete(buffer);
- return ERR_PTR(ret);
- }
+ drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file);
return buffer;
-}
-EXPORT_SYMBOL(drm_client_framebuffer_create);
-
-/**
- * drm_client_framebuffer_delete - Delete a client framebuffer
- * @buffer: DRM client buffer (can be NULL)
- */
-void drm_client_framebuffer_delete(struct drm_client_buffer *buffer)
-{
- if (!buffer)
- return;
- drm_client_buffer_rmfb(buffer);
- drm_client_buffer_delete(buffer);
+err_drm_mode_destroy_dumb:
+ drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file);
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL(drm_client_framebuffer_delete);
+EXPORT_SYMBOL(drm_client_buffer_create_dumb);
/**
- * drm_client_framebuffer_flush - Manually flush client framebuffer
- * @buffer: DRM client buffer (can be NULL)
+ * drm_client_buffer_flush - Manually flush client buffer
+ * @buffer: DRM client buffer
* @rect: Damage rectangle (if NULL flushes all)
*
* This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes
@@ -469,7 +441,7 @@ EXPORT_SYMBOL(drm_client_framebuffer_delete);
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect)
+int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect)
{
if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty)
return 0;
@@ -489,4 +461,4 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re
return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file,
0, 0, NULL, 0);
}
-EXPORT_SYMBOL(drm_client_framebuffer_flush);
+EXPORT_SYMBOL(drm_client_buffer_flush);
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index e303de564485..7b3e362f7926 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
@@ -38,17 +39,41 @@ void drm_client_dev_unregister(struct drm_device *dev)
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
list_del(&client->list);
- if (client->funcs && client->funcs->unregister) {
+ /*
+ * Unregistering consumes and frees the client.
+ */
+ if (client->funcs && client->funcs->unregister)
client->funcs->unregister(client);
- } else {
+ else
drm_client_release(client);
- kfree(client);
- }
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_unregister);
+static void drm_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (!client->funcs || !client->funcs->hotplug)
+ return;
+
+ if (client->hotplug_failed)
+ return;
+
+ if (client->suspended) {
+ client->hotplug_pending = true;
+ return;
+ }
+
+ client->hotplug_pending = false;
+ ret = client->funcs->hotplug(client);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+ if (ret)
+ client->hotplug_failed = true;
+}
+
/**
* drm_client_dev_hotplug - Send hotplug event to clients
* @dev: DRM device
@@ -61,7 +86,6 @@ EXPORT_SYMBOL(drm_client_dev_unregister);
void drm_client_dev_hotplug(struct drm_device *dev)
{
struct drm_client_dev *client;
- int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -72,23 +96,13 @@ void drm_client_dev_hotplug(struct drm_device *dev)
}
mutex_lock(&dev->clientlist_mutex);
- list_for_each_entry(client, &dev->clientlist, list) {
- if (!client->funcs || !client->funcs->hotplug)
- continue;
-
- if (client->hotplug_failed)
- continue;
-
- ret = client->funcs->hotplug(client);
- drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
- if (ret)
- client->hotplug_failed = true;
- }
+ list_for_each_entry(client, &dev->clientlist, list)
+ drm_client_hotplug(client);
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_hotplug);
-void drm_client_dev_restore(struct drm_device *dev)
+void drm_client_dev_restore(struct drm_device *dev, bool force)
{
struct drm_client_dev *client;
int ret;
@@ -101,7 +115,7 @@ void drm_client_dev_restore(struct drm_device *dev)
if (!client->funcs || !client->funcs->restore)
continue;
- ret = client->funcs->restore(client);
+ ret = client->funcs->restore(client, force);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (!ret) /* The first one to return zero gets the privilege to restore */
break;
@@ -109,7 +123,7 @@ void drm_client_dev_restore(struct drm_device *dev)
mutex_unlock(&dev->clientlist_mutex);
}
-static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_client_suspend(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret = 0;
@@ -118,7 +132,7 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_
return 0;
if (client->funcs && client->funcs->suspend)
- ret = client->funcs->suspend(client, holds_console_lock);
+ ret = client->funcs->suspend(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
client->suspended = true;
@@ -126,20 +140,20 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_
return ret;
}
-void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock)
+void drm_client_dev_suspend(struct drm_device *dev)
{
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->suspended)
- drm_client_suspend(client, holds_console_lock);
+ drm_client_suspend(client);
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_suspend);
-static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_client_resume(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret = 0;
@@ -148,22 +162,25 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l
return 0;
if (client->funcs && client->funcs->resume)
- ret = client->funcs->resume(client, holds_console_lock);
+ ret = client->funcs->resume(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
client->suspended = false;
+ if (client->hotplug_pending)
+ drm_client_hotplug(client);
+
return ret;
}
-void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock)
+void drm_client_dev_resume(struct drm_device *dev)
{
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (client->suspended)
- drm_client_resume(client, holds_console_lock);
+ drm_client_resume(client);
}
mutex_unlock(&dev->clientlist_mutex);
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 251f94313717..fc4caf7da5fc 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -8,6 +8,8 @@
*/
#include "drm/drm_modeset_lock.h"
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -39,7 +41,7 @@ int drm_client_modeset_create(struct drm_client_dev *client)
unsigned int max_connector_count = 1;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
- unsigned int i = 0;
+ int i = 0;
/* Add terminating zero entry to enable index less iteration */
client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL);
@@ -73,9 +75,10 @@ err_free:
static void drm_client_modeset_release(struct drm_client_dev *client)
{
struct drm_mode_set *modeset;
- unsigned int i;
drm_client_for_each_modeset(modeset, client) {
+ int i;
+
drm_mode_destroy(client->dev, modeset->mode);
modeset->mode = NULL;
modeset->fb = NULL;
@@ -117,10 +120,10 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_get_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -130,10 +133,10 @@ drm_connector_get_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -144,10 +147,10 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_preferred_mode(struct drm_connector *connector, int width, int height)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay > width ||
@@ -159,16 +162,18 @@ drm_connector_preferred_mode(struct drm_connector *connector, int width, int hei
return NULL;
}
-static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_first_mode(struct drm_connector *connector)
{
return list_first_entry_or_null(&connector->modes,
struct drm_display_mode, head);
}
-static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
- struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode;
+ const struct drm_cmdline_mode *cmdline_mode;
+ const struct drm_display_mode *mode;
bool prefer_non_interlace;
/*
@@ -237,9 +242,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
return enable;
}
-static void drm_client_connectors_enabled(struct drm_connector **connectors,
+static void drm_client_connectors_enabled(struct drm_connector *connectors[],
unsigned int connector_count,
- bool *enabled)
+ bool enabled[])
{
bool any_enabled = false;
struct drm_connector *connector;
@@ -263,16 +268,35 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
enabled[i] = drm_connector_enabled(connectors[i], false);
}
+static void mode_replace(struct drm_device *dev,
+ const struct drm_display_mode **dst,
+ const struct drm_display_mode *src)
+{
+ drm_mode_destroy(dev, (struct drm_display_mode *)*dst);
+
+ *dst = src ? drm_mode_duplicate(dev, src) : NULL;
+}
+
+static void modes_destroy(struct drm_device *dev,
+ const struct drm_display_mode *modes[],
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mode_replace(dev, &modes[i], NULL);
+}
+
static bool drm_client_target_cloned(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
- int count, i, j;
+ int count, i;
bool can_clone = false;
- struct drm_display_mode *dmt_mode, *mode;
+ struct drm_display_mode *dmt_mode;
/* only contemplate cloning in the single crtc case */
if (dev->mode_config.num_crtc > 1)
@@ -291,9 +315,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
for (i = 0; i < connector_count; i++) {
+ int j;
+
if (!enabled[i])
continue;
- modes[i] = drm_connector_pick_cmdline_mode(connectors[i]);
+
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connectors[i]));
if (!modes[i]) {
can_clone = false;
break;
@@ -323,6 +351,8 @@ static bool drm_client_target_cloned(struct drm_device *dev,
goto fail;
for (i = 0; i < connector_count; i++) {
+ const struct drm_display_mode *mode;
+
if (!enabled[i])
continue;
@@ -332,7 +362,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
- modes[i] = mode;
+ mode_replace(dev, &modes[i], mode);
}
if (!modes[i])
can_clone = false;
@@ -349,19 +379,19 @@ fail:
}
static int drm_client_get_tile_offsets(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
int idx,
int h_idx, int v_idx)
{
- struct drm_connector *connector;
int i;
int hoffset = 0, voffset = 0;
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+
if (!connector->has_tile)
continue;
@@ -384,14 +414,13 @@ static int drm_client_get_tile_offsets(struct drm_device *dev,
}
static bool drm_client_target_preferred(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const u64 mask = BIT_ULL(connector_count) - 1;
- struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
int num_tiled_conns = 0;
@@ -405,7 +434,9 @@ static bool drm_client_target_preferred(struct drm_device *dev,
retry:
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+ const char *mode_type;
+
if (conn_configured & BIT_ULL(i))
continue;
@@ -438,20 +469,23 @@ retry:
modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
- /* got for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
+
if (!modes[i]) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
- connector->base.id, connector->name,
- connector->tile_group ? connector->tile_group->id : 0);
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred modes, pick one off the list */
- if (!modes[i])
- modes[i] = drm_connector_first_mode(connector);
+
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
+ }
+
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
@@ -466,18 +500,24 @@ retry:
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
} else {
- modes[i] = drm_connector_get_tiled_mode(connector);
+ mode_type = "tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_get_tiled_mode(connector));
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n",
- connector->base.id, connector->name,
- modes[i] ? modes[i]->name : "none");
+ if (modes[i])
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] found %s mode: %s\n",
+ connector->base.id, connector->name,
+ mode_type, modes[i]->name);
+ else
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] no mode found\n",
+ connector->base.id, connector->name);
+
conn_configured |= BIT_ULL(i);
}
@@ -502,18 +542,17 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
}
static int drm_client_pick_crtcs(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
+ struct drm_crtc *best_crtcs[],
+ const struct drm_display_mode *modes[],
int n, int width, int height)
{
struct drm_device *dev = client->dev;
struct drm_connector *connector;
int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
+ struct drm_crtc **crtcs;
struct drm_mode_set *modeset;
- int o;
if (n == connector_count)
return 0;
@@ -543,7 +582,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
* remaining connectors
*/
drm_client_for_each_modeset(modeset, client) {
- crtc = modeset->crtc;
+ struct drm_crtc *crtc = modeset->crtc;
+ int o;
if (!connector_has_possible_crtc(connector, crtc))
continue;
@@ -577,17 +617,17 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
/* Try to read the BIOS display configuration and use it for the initial config */
static bool drm_client_firmware_config(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ struct drm_crtc *crtcs[],
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq, mask;
struct drm_device *dev = client->dev;
- int i, j;
+ int i;
bool *save_enabled;
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
@@ -621,11 +661,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
- struct drm_connector *connector;
+ struct drm_connector *connector = connectors[i];
struct drm_encoder *encoder;
- struct drm_crtc *new_crtc;
-
- connector = connectors[i];
+ struct drm_crtc *crtc;
+ const char *mode_type;
+ int j;
if (conn_configured & BIT(i))
continue;
@@ -664,7 +704,7 @@ retry:
num_connectors_enabled++;
- new_crtc = connector->state->crtc;
+ crtc = connector->state->crtc;
/*
* Make sure we're not trying to drive multiple connectors
@@ -672,69 +712,52 @@ retry:
* match the BIOS.
*/
for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- drm_dbg_kms(dev, "fallback: cloned configuration\n");
+ if (crtcs[j] == crtc) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n",
+ connector->base.id, connector->name);
goto bail;
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
- /* go for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
-
- /* try for preferred next */
if (!modes[i]) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
- connector->base.id, connector->name,
- str_yes_no(connector->has_tile));
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_first_mode(connector);
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
}
/* last resort: use current mode */
if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly.
- */
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n",
- connector->base.id, connector->name);
- modes[i] = &connector->state->crtc->mode;
+ mode_type = "current";
+ mode_replace(dev, &modes[i],
+ &crtc->state->mode);
}
+
/*
* In case of tiled modes, if all tiles are not present
* then fallback to a non tiled mode.
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
}
- crtcs[i] = new_crtc;
+ crtcs[i] = crtc;
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n",
+ drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n",
connector->base.id, connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+ crtc->base.id, crtc->name,
+ mode_type, modes[i]->name);
fallback = false;
conn_configured |= BIT(i);
@@ -743,6 +766,15 @@ retry:
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
+ for (i = 0; i < count; i++) {
+ struct drm_connector *connector = connectors[i];
+
+ if (connector->has_tile)
+ drm_client_get_tile_offsets(dev, connectors, connector_count,
+ modes, offsets, i,
+ connector->tile_h_loc, connector->tile_v_loc);
+ }
+
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
@@ -790,8 +822,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
- /* points to modes protected by mode_config.mutex */
- struct drm_display_mode **modes;
+ const struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
@@ -842,7 +873,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
modes, offsets, enabled, width, height)) {
- memset(modes, 0, connector_count * sizeof(*modes));
+ modes_destroy(dev, modes, connector_count);
memset(crtcs, 0, connector_count * sizeof(*crtcs));
memset(offsets, 0, connector_count * sizeof(*offsets));
@@ -859,10 +890,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
crtcs, modes, 0, width, height);
}
+ mutex_unlock(&dev->mode_config.mutex);
+
drm_client_modeset_release(client);
for (i = 0; i < connector_count; i++) {
- struct drm_display_mode *mode = modes[i];
+ const struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
struct drm_client_offset *offset = &offsets[i];
@@ -893,11 +926,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
- mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
kfree(crtcs);
+ modes_destroy(dev, modes, connector_count);
kfree(modes);
kfree(offsets);
kfree(enabled);
@@ -929,7 +962,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
struct drm_plane *plane = modeset->crtc->primary;
struct drm_cmdline_mode *cmdline;
u64 valid_mask = 0;
- unsigned int i;
+ int i;
if (!modeset->num_connectors)
return false;
@@ -1210,11 +1243,12 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
struct drm_connector *connector;
struct drm_mode_set *modeset;
struct drm_modeset_acquire_ctx ctx;
- int j;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
+ int j;
+
if (!modeset->crtc->enabled)
continue;
@@ -1259,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode)
}
EXPORT_SYMBOL(drm_client_modeset_dpms);
+/**
+ * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur
+ * @client: DRM client
+ * @crtc_index: The ndex of the CRTC to wait on
+ *
+ * Block the caller until the given CRTC has seen a VBLANK. Do nothing
+ * if the CRTC is disabled. If there's another DRM master present, fail
+ * with -EBUSY.
+ *
+ * Returns:
+ * 0 on success, or negative error code otherwise.
+ */
+int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /*
+ * Rate-limit update frequency to vblank. If there's a DRM master
+ * present, it could interfere while we're waiting for the vblank
+ * event. Don't wait in this case.
+ */
+ if (!drm_master_internal_acquire(dev))
+ return -EBUSY;
+
+ crtc = client->modesets[crtc_index].crtc;
+
+ /*
+ * Only wait for a vblank event if the CRTC is enabled, otherwise
+ * just don't do anything, not even report an error.
+ */
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+
+ drm_master_internal_release(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank);
+
#ifdef CONFIG_DRM_KUNIT_TEST
#include "tests/drm_client_modeset_test.c"
#endif
diff --git a/drivers/gpu/drm/drm_client_sysrq.c b/drivers/gpu/drm/drm_client_sysrq.c
new file mode 100644
index 000000000000..eea660096f1b
--- /dev/null
+++ b/drivers/gpu/drm/drm_client_sysrq.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+#include <linux/sysrq.h>
+
+#include <drm/drm_client_event.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include "drm_internal.h"
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static LIST_HEAD(drm_client_sysrq_dev_list);
+static DEFINE_MUTEX(drm_client_sysrq_dev_lock);
+
+/* emergency restore, don't bother with error reporting */
+static void drm_client_sysrq_restore_work_fn(struct work_struct *ignored)
+{
+ struct drm_device *dev;
+
+ guard(mutex)(&drm_client_sysrq_dev_lock);
+
+ list_for_each_entry(dev, &drm_client_sysrq_dev_list, client_sysrq_list) {
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
+ drm_client_dev_restore(dev, true);
+ }
+}
+
+static DECLARE_WORK(drm_client_sysrq_restore_work, drm_client_sysrq_restore_work_fn);
+
+static void drm_client_sysrq_restore_handler(u8 ignored)
+{
+ schedule_work(&drm_client_sysrq_restore_work);
+}
+
+static const struct sysrq_key_op drm_client_sysrq_restore_op = {
+ .handler = drm_client_sysrq_restore_handler,
+ .help_msg = "force-fb(v)",
+ .action_msg = "Restore framebuffer console",
+};
+
+void drm_client_sysrq_register(struct drm_device *dev)
+{
+ guard(mutex)(&drm_client_sysrq_dev_lock);
+
+ if (list_empty(&drm_client_sysrq_dev_list))
+ register_sysrq_key('v', &drm_client_sysrq_restore_op);
+
+ list_add(&dev->client_sysrq_list, &drm_client_sysrq_dev_list);
+}
+
+void drm_client_sysrq_unregister(struct drm_device *dev)
+{
+ guard(mutex)(&drm_client_sysrq_dev_lock);
+
+ /* remove device from global restore list */
+ if (!drm_WARN_ON(dev, list_empty(&dev->client_sysrq_list)))
+ list_del(&dev->client_sysrq_list);
+
+ /* no devices left; unregister key */
+ if (list_empty(&drm_client_sysrq_dev_list))
+ unregister_sysrq_key('v', &drm_client_sysrq_restore_op);
+}
+#endif
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3969dc548cff..c598b99673fc 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
@@ -28,6 +29,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+#include <kunit/visibility.h>
#include "drm_crtc_internal.h"
@@ -494,6 +496,7 @@ const char *drm_get_color_encoding_name(enum drm_color_encoding encoding)
return color_encoding_name[encoding];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_encoding_name);
/**
* drm_get_color_range_name - return a string for color range
@@ -509,6 +512,7 @@ const char *drm_get_color_range_name(enum drm_color_range range)
return color_range_name[range];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_range_name);
/**
* drm_plane_create_color_properties - color encoding related plane properties
@@ -630,3 +634,286 @@ int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests)
return 0;
}
EXPORT_SYMBOL(drm_color_lut_check);
+
+/*
+ * Gamma-LUT programming
+ */
+
+/**
+ * drm_crtc_load_gamma_888 - Programs gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component.
+ */
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_gamma(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_888);
+
+/**
+ * drm_crtc_load_gamma_565_from_888 - Programs gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/6/5.
+ */
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 4 + i / 16].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i) {
+ g = lut[i * 4 + i / 16].green;
+ set_gamma(crtc, i, 0, g, 0);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_565_from_888);
+
+/**
+ * drm_crtc_load_gamma_555_from_888 - Programs gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/5/5.
+ */
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 8 + i / 4].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_555_from_888);
+
+static void fill_gamma_888(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 8) | r;
+ g = (g << 8) | g;
+ b = (b << 8) | b;
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_888 - Programs a default gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_gamma_888(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_888);
+
+static void fill_gamma_565(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 10) | (g << 4) | (g >> 2);
+ b = (b << 11) | (b << 6) | (b << 1) | (b >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_565 - Programs a default gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_565(crtc, i, i, i, i, set_gamma);
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i)
+ fill_gamma_565(crtc, i, 0, i, 0, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_565);
+
+static void fill_gamma_555(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 11) | (g << 6) | (g << 1) | (g >> 4);
+ b = (b << 11) | (b << 6) | (b << 1) | (r >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_555 - Programs a default gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_555(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_555);
+
+/*
+ * Color-LUT programming
+ */
+
+/**
+ * drm_crtc_load_palette_8 - Programs palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The palette to program
+ * @set_palette: Callback for programming the hardware palette
+ *
+ * Programs the palette specified in @lut to hardware. The input palette
+ * must have 256 entries per color component.
+ */
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_palette(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_palette_8);
+
+static void fill_palette_332(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i = (r << 5) | (g << 2) | b; /* 8-bit palette index */
+
+ /* Expand R (3-bit) G (3-bit) and B (2-bit) values to 16-bit values */
+ r = (r << 13) | (r << 10) | (r << 7) | (r << 4) | (r << 1) | (r >> 2);
+ g = (g << 13) | (g << 10) | (g << 7) | (g << 4) | (g << 1) | (g >> 2);
+ b = (b << 14) | (b << 12) | (b << 10) | (b << 8) | (b << 6) | (b << 4) | (b << 2) | b;
+
+ set_palette(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_palette_332 - Programs a default palette for R332-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs an RGB332 palette to hardware.
+ */
+void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int r, g, b;
+
+ /* Limits of 8-8-4 are the maximum number of values for each channel. */
+ for (r = 0; r < 8; ++r) {
+ for (g = 0; g < 8; ++g) {
+ for (b = 0; b < 4; ++b)
+ fill_palette_332(crtc, r, g, b, set_palette);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_332);
+
+static void fill_palette_8(struct drm_crtc *crtc, unsigned int i,
+ drm_crtc_set_lut_func set_palette)
+{
+ u16 Y = (i << 8) | i; // relative luminance
+
+ set_palette(crtc, i, Y, Y, Y);
+}
+
+/**
+ * drm_crtc_fill_palette_8 - Programs a default palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default palette to hardware.
+ */
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_palette_8(crtc, i, set_palette);
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_8);
+
+/**
+ * drm_color_lut32_check - check validity of extended lookup table
+ * @lut: property blob containing extended LUT to check
+ * @tests: bitmask of tests to run
+ *
+ * Helper to check whether a userspace-provided extended lookup table is valid and
+ * satisfies hardware requirements. Drivers pass a bitmask indicating which of
+ * the tests in &drm_color_lut_tests should be performed.
+ *
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests)
+{
+ const struct drm_color_lut32 *entry;
+ int i;
+
+ if (!lut || !tests)
+ return 0;
+
+ entry = lut->data;
+ for (i = 0; i < drm_color_lut32_size(lut); i++) {
+ if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) {
+ if (entry[i].red != entry[i].blue ||
+ entry[i].red != entry[i].green) {
+ DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n");
+ return -EINVAL;
+ }
+ }
+
+ if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) {
+ if (entry[i].red < entry[i - 1].red ||
+ entry[i].green < entry[i - 1].green ||
+ entry[i].blue < entry[i - 1].blue) {
+ DRM_DEBUG_KMS("LUT entries must never decrease.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_color_lut32_check);
diff --git a/drivers/gpu/drm/drm_colorop.c b/drivers/gpu/drm/drm_colorop.c
new file mode 100644
index 000000000000..44eb823585d2
--- /dev/null
+++ b/drivers/gpu/drm/drm_colorop.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drm_colorop.h>
+#include <drm/drm_print.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_plane.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * When userspace signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE it
+ * should use the COLOR_PIPELINE plane property and associated colorops
+ * for any color operation on the &drm_plane. Setting of all old color
+ * properties, such as COLOR_ENCODING and COLOR_RANGE, will be rejected
+ * and the values of the properties will be ignored.
+ *
+ * Colorops are only advertised and valid for atomic drivers and atomic
+ * userspace that signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE
+ * client cap.
+ *
+ * A colorop represents a single color operation. Colorops are chained
+ * via the NEXT property and make up color pipelines. Color pipelines
+ * are advertised and selected via the COLOR_PIPELINE &drm_plane
+ * property.
+ *
+ * A colorop will be of a certain type, advertised by the read-only TYPE
+ * property. Each type of colorop will advertise a different set of
+ * properties and is programmed in a different manner. Types can be
+ * enumerated 1D curves, 1D LUTs, 3D LUTs, matrices, etc. See the
+ * &drm_colorop_type documentation for information on each type.
+ *
+ * If a colorop advertises the BYPASS property it can be bypassed.
+ *
+ * Information about colorop and color pipeline design decisions can be
+ * found at rfc/color_pipeline.rst, but note that this document will
+ * grow stale over time.
+ */
+
+static const struct drm_prop_enum_list drm_colorop_type_enum_list[] = {
+ { DRM_COLOROP_1D_CURVE, "1D Curve" },
+ { DRM_COLOROP_1D_LUT, "1D LUT" },
+ { DRM_COLOROP_CTM_3X4, "3x4 Matrix"},
+ { DRM_COLOROP_MULTIPLIER, "Multiplier"},
+ { DRM_COLOROP_3D_LUT, "3D LUT"},
+};
+
+static const char * const colorop_curve_1d_type_names[] = {
+ [DRM_COLOROP_1D_CURVE_SRGB_EOTF] = "sRGB EOTF",
+ [DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF] = "sRGB Inverse EOTF",
+ [DRM_COLOROP_1D_CURVE_PQ_125_EOTF] = "PQ 125 EOTF",
+ [DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF] = "PQ 125 Inverse EOTF",
+ [DRM_COLOROP_1D_CURVE_BT2020_INV_OETF] = "BT.2020 Inverse OETF",
+ [DRM_COLOROP_1D_CURVE_BT2020_OETF] = "BT.2020 OETF",
+ [DRM_COLOROP_1D_CURVE_GAMMA22] = "Gamma 2.2",
+ [DRM_COLOROP_1D_CURVE_GAMMA22_INV] = "Gamma 2.2 Inverse",
+};
+
+static const struct drm_prop_enum_list drm_colorop_lut1d_interpolation_list[] = {
+ { DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, "Linear" },
+};
+
+
+static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] = {
+ { DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, "Tetrahedral" },
+};
+
+/* Init Helpers */
+
+static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, enum drm_colorop_type type,
+ uint32_t flags)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property *prop;
+ int ret = 0;
+
+ ret = drm_mode_object_add(dev, &colorop->base, DRM_MODE_OBJECT_COLOROP);
+ if (ret)
+ return ret;
+
+ colorop->base.properties = &colorop->properties;
+ colorop->dev = dev;
+ colorop->type = type;
+ colorop->plane = plane;
+ colorop->next = NULL;
+
+ list_add_tail(&colorop->head, &config->colorop_list);
+ colorop->index = config->num_colorop++;
+
+ /* add properties */
+
+ /* type */
+ prop = drm_property_create_enum(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "TYPE", drm_colorop_type_enum_list,
+ ARRAY_SIZE(drm_colorop_type_enum_list));
+
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->type_property = prop;
+
+ drm_object_attach_property(&colorop->base,
+ colorop->type_property,
+ colorop->type);
+
+ if (flags & DRM_COLOROP_FLAG_ALLOW_BYPASS) {
+ /* bypass */
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
+ "BYPASS");
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->bypass_property = prop;
+ drm_object_attach_property(&colorop->base,
+ colorop->bypass_property,
+ 1);
+ }
+
+ /* next */
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
+ "NEXT", DRM_MODE_OBJECT_COLOROP);
+ if (!prop)
+ return -ENOMEM;
+ colorop->next_property = prop;
+ drm_object_attach_property(&colorop->base,
+ colorop->next_property,
+ 0);
+
+ return ret;
+}
+
+/**
+ * drm_colorop_cleanup - Cleanup a drm_colorop object in color_pipeline
+ *
+ * @colorop: The drm_colorop object to be cleaned
+ */
+void drm_colorop_cleanup(struct drm_colorop *colorop)
+{
+ struct drm_device *dev = colorop->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ list_del(&colorop->head);
+ config->num_colorop--;
+
+ if (colorop->state && colorop->state->data) {
+ drm_property_blob_put(colorop->state->data);
+ colorop->state->data = NULL;
+ }
+
+ kfree(colorop->state);
+}
+EXPORT_SYMBOL(drm_colorop_cleanup);
+
+/**
+ * drm_colorop_pipeline_destroy - Helper for color pipeline destruction
+ *
+ * @dev: - The drm_device containing the drm_planes with the color_pipelines
+ *
+ * Provides a default color pipeline destroy handler for drm_device.
+ */
+void drm_colorop_pipeline_destroy(struct drm_device *dev)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_colorop *colorop, *next;
+
+ list_for_each_entry_safe(colorop, next, &config->colorop_list, head) {
+ drm_colorop_cleanup(colorop);
+ kfree(colorop);
+ }
+}
+EXPORT_SYMBOL(drm_colorop_pipeline_destroy);
+
+/**
+ * drm_plane_colorop_curve_1d_init - Initialize a DRM_COLOROP_1D_CURVE
+ *
+ * @dev: DRM device
+ * @colorop: The drm_colorop object to initialize
+ * @plane: The associated drm_plane
+ * @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values,
+ * created using BIT(curve_type) and combined with the OR '|'
+ * operator.
+ * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
+ * @return zero on success, -E value on failure
+ */
+int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, u64 supported_tfs, uint32_t flags)
+{
+ struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT];
+ int i, len;
+
+ struct drm_property *prop;
+ int ret;
+
+ if (!supported_tfs) {
+ drm_err(dev,
+ "No supported TFs for new 1D curve colorop on [PLANE:%d:%s]\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ if ((supported_tfs & -BIT(DRM_COLOROP_1D_CURVE_COUNT)) != 0) {
+ drm_err(dev, "Unknown TF provided on [PLANE:%d:%s]\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags);
+ if (ret)
+ return ret;
+
+ len = 0;
+ for (i = 0; i < DRM_COLOROP_1D_CURVE_COUNT; i++) {
+ if ((supported_tfs & BIT(i)) == 0)
+ continue;
+
+ enum_list[len].type = i;
+ enum_list[len].name = colorop_curve_1d_type_names[i];
+ len++;
+ }
+
+ if (WARN_ON(len <= 0))
+ return -EINVAL;
+
+ /* initialize 1D curve only attribute */
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ATOMIC, "CURVE_1D_TYPE",
+ enum_list, len);
+
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->curve_1d_type_property = prop;
+ drm_object_attach_property(&colorop->base, colorop->curve_1d_type_property,
+ enum_list[0].type);
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_curve_1d_init);
+
+static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_colorop *colorop)
+{
+ struct drm_property *prop;
+
+ /* data */
+ prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "DATA", 0);
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->data_property = prop;
+ drm_object_attach_property(&colorop->base,
+ colorop->data_property,
+ 0);
+
+ return 0;
+}
+
+/**
+ * drm_plane_colorop_curve_1d_lut_init - Initialize a DRM_COLOROP_1D_LUT
+ *
+ * @dev: DRM device
+ * @colorop: The drm_colorop object to initialize
+ * @plane: The associated drm_plane
+ * @lut_size: LUT size supported by driver
+ * @interpolation: 1D LUT interpolation type
+ * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
+ * @return zero on success, -E value on failure
+ */
+int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t lut_size,
+ enum drm_colorop_lut1d_interpolation_type interpolation,
+ uint32_t flags)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags);
+ if (ret)
+ return ret;
+
+ /* initialize 1D LUT only attribute */
+ /* LUT size */
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
+ "SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->size_property = prop;
+ drm_object_attach_property(&colorop->base, colorop->size_property, lut_size);
+ colorop->size = lut_size;
+
+ /* interpolation */
+ prop = drm_property_create_enum(dev, 0, "LUT1D_INTERPOLATION",
+ drm_colorop_lut1d_interpolation_list,
+ ARRAY_SIZE(drm_colorop_lut1d_interpolation_list));
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->lut1d_interpolation_property = prop;
+ drm_object_attach_property(&colorop->base, prop, interpolation);
+ colorop->lut1d_interpolation = interpolation;
+
+ /* data */
+ ret = drm_colorop_create_data_prop(dev, colorop);
+ if (ret)
+ return ret;
+
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init);
+
+int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t flags)
+{
+ int ret;
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags);
+ if (ret)
+ return ret;
+
+ ret = drm_colorop_create_data_prop(dev, colorop);
+ if (ret)
+ return ret;
+
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init);
+
+/**
+ * drm_plane_colorop_mult_init - Initialize a DRM_COLOROP_MULTIPLIER
+ *
+ * @dev: DRM device
+ * @colorop: The drm_colorop object to initialize
+ * @plane: The associated drm_plane
+ * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
+ * @return zero on success, -E value on failure
+ */
+int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t flags)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags);
+ if (ret)
+ return ret;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "MULTIPLIER", 0, U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->multiplier_property = prop;
+ drm_object_attach_property(&colorop->base, colorop->multiplier_property, 0);
+
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_mult_init);
+
+int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane,
+ uint32_t lut_size,
+ enum drm_colorop_lut3d_interpolation_type interpolation,
+ uint32_t flags)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags);
+ if (ret)
+ return ret;
+
+ /* LUT size */
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
+ "SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->size_property = prop;
+ drm_object_attach_property(&colorop->base, colorop->size_property, lut_size);
+ colorop->size = lut_size;
+
+ /* interpolation */
+ prop = drm_property_create_enum(dev, 0, "LUT3D_INTERPOLATION",
+ drm_colorop_lut3d_interpolation_list,
+ ARRAY_SIZE(drm_colorop_lut3d_interpolation_list));
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->lut3d_interpolation_property = prop;
+ drm_object_attach_property(&colorop->base, prop, interpolation);
+ colorop->lut3d_interpolation = interpolation;
+
+ /* data */
+ ret = drm_colorop_create_data_prop(dev, colorop);
+ if (ret)
+ return ret;
+
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_3dlut_init);
+
+static void __drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop,
+ struct drm_colorop_state *state)
+{
+ memcpy(state, colorop->state, sizeof(*state));
+
+ if (state->data)
+ drm_property_blob_get(state->data);
+
+ state->bypass = true;
+}
+
+struct drm_colorop_state *
+drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop)
+{
+ struct drm_colorop_state *state;
+
+ if (WARN_ON(!colorop->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_colorop_duplicate_state(colorop, state);
+
+ return state;
+}
+
+void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop,
+ struct drm_colorop_state *state)
+{
+ kfree(state);
+}
+
+/**
+ * __drm_colorop_state_reset - resets colorop state to default values
+ * @colorop_state: atomic colorop state, must not be NULL
+ * @colorop: colorop object, must not be NULL
+ *
+ * Initializes the newly allocated @colorop_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
+ */
+static void __drm_colorop_state_reset(struct drm_colorop_state *colorop_state,
+ struct drm_colorop *colorop)
+{
+ u64 val;
+
+ colorop_state->colorop = colorop;
+ colorop_state->bypass = true;
+
+ if (colorop->curve_1d_type_property) {
+ drm_object_property_get_default_value(&colorop->base,
+ colorop->curve_1d_type_property,
+ &val);
+ colorop_state->curve_1d_type = val;
+ }
+}
+
+/**
+ * __drm_colorop_reset - reset state on colorop
+ * @colorop: drm colorop
+ * @colorop_state: colorop state to assign
+ *
+ * Initializes the newly allocated @colorop_state and assigns it to
+ * the &drm_crtc->state pointer of @colorop, usually required when
+ * initializing the drivers or when called from the &drm_colorop_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the colorop state.
+ */
+static void __drm_colorop_reset(struct drm_colorop *colorop,
+ struct drm_colorop_state *colorop_state)
+{
+ if (colorop_state)
+ __drm_colorop_state_reset(colorop_state, colorop);
+
+ colorop->state = colorop_state;
+}
+
+void drm_colorop_reset(struct drm_colorop *colorop)
+{
+ kfree(colorop->state);
+ colorop->state = kzalloc(sizeof(*colorop->state), GFP_KERNEL);
+
+ if (colorop->state)
+ __drm_colorop_reset(colorop, colorop->state);
+}
+
+static const char * const colorop_type_name[] = {
+ [DRM_COLOROP_1D_CURVE] = "1D Curve",
+ [DRM_COLOROP_1D_LUT] = "1D LUT",
+ [DRM_COLOROP_CTM_3X4] = "3x4 Matrix",
+ [DRM_COLOROP_MULTIPLIER] = "Multiplier",
+ [DRM_COLOROP_3D_LUT] = "3D LUT",
+};
+
+static const char * const colorop_lu3d_interpolation_name[] = {
+ [DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL] = "Tetrahedral",
+};
+
+static const char * const colorop_lut1d_interpolation_name[] = {
+ [DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR] = "Linear",
+};
+
+const char *drm_get_colorop_type_name(enum drm_colorop_type type)
+{
+ if (WARN_ON(type >= ARRAY_SIZE(colorop_type_name)))
+ return "unknown";
+
+ return colorop_type_name[type];
+}
+
+const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type)
+{
+ if (WARN_ON(type >= ARRAY_SIZE(colorop_curve_1d_type_names)))
+ return "unknown";
+
+ return colorop_curve_1d_type_names[type];
+}
+
+/**
+ * drm_get_colorop_lut1d_interpolation_name: return a string for interpolation type
+ * @type: interpolation type to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type)
+{
+ if (WARN_ON(type >= ARRAY_SIZE(colorop_lut1d_interpolation_name)))
+ return "unknown";
+
+ return colorop_lut1d_interpolation_name[type];
+}
+
+/**
+ * drm_get_colorop_lut3d_interpolation_name - return a string for interpolation type
+ * @type: interpolation type to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type)
+{
+ if (WARN_ON(type >= ARRAY_SIZE(colorop_lu3d_interpolation_name)))
+ return "unknown";
+
+ return colorop_lu3d_interpolation_name[type];
+}
+
+/**
+ * drm_colorop_set_next_property - sets the next pointer
+ * @colorop: drm colorop
+ * @next: next colorop
+ *
+ * Should be used when constructing the color pipeline
+ */
+void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next)
+{
+ drm_object_property_set_value(&colorop->base,
+ colorop->next_property,
+ next ? next->base.id : 0);
+ colorop->next = next;
+}
+EXPORT_SYMBOL(drm_colorop_set_next_property);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 1383fa9fff9b..4d6dc9ebfdb5 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/uaccess.h>
@@ -279,6 +280,7 @@ static int drm_connector_init_only(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->cec.mutex);
mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
@@ -592,6 +594,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
return -EINVAL;
+ if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
+ return -EINVAL;
+
if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
return -EINVAL;
@@ -699,6 +704,46 @@ static void drm_mode_remove(struct drm_connector *connector,
}
/**
+ * drm_connector_cec_phys_addr_invalidate - invalidate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Invalidated CEC physical address set for this DRM connector.
+ */
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector)
+{
+ mutex_lock(&connector->cec.mutex);
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_invalidate)
+ connector->cec.funcs->phys_addr_invalidate(connector);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_invalidate);
+
+/**
+ * drm_connector_cec_phys_addr_set - propagate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Propagate CEC physical address from the display_info to this DRM connector.
+ */
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector)
+{
+ u16 addr;
+
+ mutex_lock(&connector->cec.mutex);
+
+ addr = connector->display_info.source_physical_address;
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_set)
+ connector->cec.funcs->phys_addr_set(connector, addr);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_set);
+
+/**
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
@@ -1424,6 +1469,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core.
*
+ * On atomic drivers any DPMS setproperty ioctl where the value does not
+ * change is completely skipped, otherwise a full atomic commit will occur.
+ * On legacy drivers the exact behavior is driver specific.
+ *
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
* userspace must use "ACTIVE" on the CRTC instead.
*
@@ -1638,7 +1687,7 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* structure from userspace. This is received as blob and stored in
* &drm_connector_state.hdr_output_metadata. It parses EDID and saves the
* sink metadata in &struct hdr_sink_metadata, as
- * &drm_connector.hdr_sink_metadata. Driver uses
+ * &drm_connector.display_info.hdr_sink_metadata. Driver uses
* drm_hdmi_infoframe_set_hdr_metadata() helper to set the HDR metadata,
* hdmi_drm_infoframe_pack() to pack the infoframe as per spec, in case of
* HDMI encoder.
@@ -3390,6 +3439,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
* properties reflect the latest status.
*/
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+ file_priv->plane_color_pipeline,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3488ff067c69..a7797d260f1e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -229,6 +229,25 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
* Driver's default scaling filter
* Nearest Neighbor:
* Nearest Neighbor scaling filter
+ * SHARPNESS_STRENGTH:
+ * Atomic property for setting the sharpness strength/intensity by userspace.
+ *
+ * The value of this property is set as an integer value ranging
+ * from 0 - 255 where:
+ *
+ * 0: Sharpness feature is disabled(default value).
+ *
+ * 1: Minimum sharpness.
+ *
+ * 255: Maximum sharpness.
+ *
+ * User can gradually increase or decrease the sharpness level and can
+ * set the optimum value depending on content.
+ * This value will be passed to kernel through the UAPI.
+ * The setting of this property does not require modeset.
+ * The sharpness effect takes place post blending on the final composed output.
+ * If the feature is disabled, the content remains same without any sharpening effect
+ * and when this feature is applied, it enhances the clarity of the content.
*/
__printf(6, 0)
@@ -939,3 +958,39 @@ int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
return 0;
}
EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property);
+
+int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_property *prop =
+ drm_property_create_range(dev, 0, "SHARPNESS_STRENGTH", 0, 255);
+
+ if (!prop)
+ return -ENOMEM;
+
+ crtc->sharpness_strength_property = prop;
+ drm_object_attach_property(&crtc->base, prop, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_create_sharpness_strength_property);
+
+/**
+ * drm_crtc_in_clone_mode - check if the given CRTC state is in clone mode
+ *
+ * @crtc_state: CRTC state to check
+ *
+ * This function determines if the given CRTC state is being cloned by multiple
+ * encoders.
+ *
+ * RETURNS:
+ * True if the CRTC state is in clone mode. False otherwise
+ */
+bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state)
+{
+ if (!crtc_state)
+ return false;
+
+ return hweight32(crtc_state->encoder_mask) > 1;
+}
+EXPORT_SYMBOL(drm_crtc_in_clone_mode);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 0955f1c385dd..39497493f74c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -334,7 +334,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 8059f65c5d6c..bae73936acf9 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -43,7 +43,7 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode);
int
drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 89706aa8232f..c09409229644 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -163,6 +163,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object);
int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+ bool plane_color_pipeline,
uint32_t __user *prop_ptr,
uint64_t __user *prop_values,
uint32_t *arg_count_props);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index afb02aae707b..6a49e7a0ab84 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -30,6 +30,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
@@ -308,7 +310,7 @@ EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
* True if there is valid plane damage otherwise false.
*/
bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
- struct drm_plane_state *state,
+ const struct drm_plane_state *state,
struct drm_rect *rect)
{
struct drm_atomic_helper_damage_iter iter;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 536409a35df4..365cf337529f 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -44,6 +44,9 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
+static struct dentry *accel_debugfs_root;
+static struct dentry *drm_debugfs_root;
+
/***************************************************
* Initialization, etc.
**************************************************/
@@ -77,14 +80,15 @@ static int drm_clients_info(struct seq_file *m, void *data)
kuid_t uid;
seq_printf(m,
- "%20s %5s %3s master a %5s %10s %*s\n",
+ "%20s %5s %3s master a %5s %10s %*s %20s\n",
"command",
"tgid",
"dev",
"uid",
"magic",
DRM_CLIENT_NAME_MAX_LEN,
- "name");
+ "name",
+ "id");
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
@@ -100,7 +104,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
pid = rcu_dereference(priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n",
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s %20llu\n",
task ? task->comm : "<unknown>",
pid_vnr(pid),
priv->minor->index,
@@ -109,7 +113,8 @@ static int drm_clients_info(struct seq_file *m, void *data)
from_kuid_munged(seq_user_ns(m), uid),
priv->magic,
DRM_CLIENT_NAME_MAX_LEN,
- priv->client_name ? priv->client_name : "<unset>");
+ priv->client_name ? priv->client_name : "<unset>",
+ priv->client_id);
rcu_read_unlock();
mutex_unlock(&priv->client_name_lock);
}
@@ -285,16 +290,120 @@ int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
+void drm_debugfs_bridge_params(void)
+{
+ drm_bridge_debugfs_params(drm_debugfs_root);
+}
+
+void drm_debugfs_init_root(void)
+{
+ drm_debugfs_root = debugfs_create_dir("dri", NULL);
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ accel_debugfs_root = debugfs_create_dir("accel", NULL);
+#endif
+}
+
+void drm_debugfs_remove_root(void)
+{
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ debugfs_remove(accel_debugfs_root);
+#endif
+ debugfs_remove(drm_debugfs_root);
+}
+
+static int drm_debugfs_proc_info_show(struct seq_file *m, void *unused)
+{
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_file *file = m->private;
+
+ if (!file)
+ return -EINVAL;
+
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ seq_printf(m, "pid: %d\n", task ? task->pid : 0);
+ seq_printf(m, "comm: %s\n", task ? task->comm : "Unset");
+ rcu_read_unlock();
+ return 0;
+}
+
+static int drm_debufs_proc_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, drm_debugfs_proc_info_show, inode->i_private);
+}
+
+static const struct file_operations drm_debugfs_proc_info_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_debufs_proc_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * drm_debugfs_clients_add - Add a per client debugfs directory
+ * @file: drm_file for a client
+ *
+ * Create the debugfs directory for each client. This will be used to populate
+ * driver specific data for each client.
+ *
+ * Also add the process information debugfs file for each client to tag
+ * which client belongs to which process.
+ */
+void drm_debugfs_clients_add(struct drm_file *file)
+{
+ char *client;
+
+ client = kasprintf(GFP_KERNEL, "client-%llu", file->client_id);
+ if (!client)
+ return;
+
+ /* Create a debugfs directory for the client in root on drm debugfs */
+ file->debugfs_client = debugfs_create_dir(client, drm_debugfs_root);
+ kfree(client);
+
+ debugfs_create_file("proc_info", 0444, file->debugfs_client, file,
+ &drm_debugfs_proc_info_fops);
+
+ client = kasprintf(GFP_KERNEL, "../%s", file->minor->dev->unique);
+ if (!client)
+ return;
+
+ /* Create a link from client_id to the drm device this client id belongs to */
+ debugfs_create_symlink("device", file->debugfs_client, client);
+ kfree(client);
+}
+
+/**
+ * drm_debugfs_clients_remove - removes all debugfs directories and files
+ * @file: drm_file for a client
+ *
+ * Removes the debugfs directories recursively from the client directory.
+ *
+ * There is also a possibility that debugfs files are open while the drm_file
+ * is released.
+ */
+void drm_debugfs_clients_remove(struct drm_file *file)
+{
+ debugfs_remove_recursive(file->debugfs_client);
+ file->debugfs_client = NULL;
+}
+
/**
* drm_debugfs_dev_init - create debugfs directory for the device
* @dev: the device which we want to create the directory for
- * @root: the parent directory depending on the device type
*
* Creates the debugfs directory for the device under the given root directory.
*/
-void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+void drm_debugfs_dev_init(struct drm_device *dev)
{
- dev->debugfs_root = debugfs_create_dir(dev->unique, root);
+ if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ dev->debugfs_root = debugfs_create_dir(dev->unique, accel_debugfs_root);
+ else
+ dev->debugfs_root = debugfs_create_dir(dev->unique, drm_debugfs_root);
}
/**
@@ -321,14 +430,13 @@ void drm_debugfs_dev_register(struct drm_device *dev)
drm_atomic_debugfs_init(dev);
}
-int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root)
+int drm_debugfs_register(struct drm_minor *minor, int minor_id)
{
struct drm_device *dev = minor->dev;
char name[64];
sprintf(name, "%d", minor_id);
- minor->debugfs_symlink = debugfs_create_symlink(name, root,
+ minor->debugfs_symlink = debugfs_create_symlink(name, drm_debugfs_root,
dev->unique);
/* TODO: Only for compatibility with drivers */
@@ -740,40 +848,6 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
-static int bridges_show(struct seq_file *m, void *data)
-{
- struct drm_encoder *encoder = m->private;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
- unsigned int idx = 0;
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs);
- drm_printf(&p, "\ttype: [%d] %s\n",
- bridge->type,
- drm_get_connector_type_name(bridge->type));
-
- if (bridge->of_node)
- drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
-
- drm_printf(&p, "\tops: [0x%x]", bridge->ops);
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- drm_puts(&p, " detect");
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- drm_puts(&p, " edid");
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- drm_puts(&p, " hpd");
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- drm_puts(&p, " modes");
- if (bridge->ops & DRM_BRIDGE_OP_HDMI)
- drm_puts(&p, " hdmi");
- drm_puts(&p, "\n");
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(bridges);
-
void drm_debugfs_encoder_add(struct drm_encoder *encoder)
{
struct drm_minor *minor = encoder->dev->primary;
@@ -789,9 +863,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder)
encoder->debugfs_entry = root;
- /* bridges list */
- debugfs_create_file("bridges", 0444, root, encoder,
- &bridges_fops);
+ drm_bridge_debugfs_encoder_params(root, encoder);
if (encoder->funcs && encoder->funcs->debugfs_init)
encoder->funcs->debugfs_init(encoder, root);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index bbc3bc4ba844..6b43b1cf2327 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -29,6 +29,7 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index b4fd43783c50..58d0bb6d2676 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -9,6 +9,34 @@
#include "drm_crtc_internal.h"
#include "drm_displayid_internal.h"
+enum {
+ QUIRK_IGNORE_CHECKSUM,
+};
+
+struct displayid_quirk {
+ const struct drm_edid_ident ident;
+ u8 quirks;
+};
+
+static const struct displayid_quirk quirks[] = {
+ {
+ .ident = DRM_EDID_IDENT_INIT('C', 'S', 'O', 5142, "MNE007ZA1-5"),
+ .quirks = BIT(QUIRK_IGNORE_CHECKSUM),
+ },
+};
+
+static u8 get_quirks(const struct drm_edid *drm_edid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(quirks); i++) {
+ if (drm_edid_match(drm_edid, &quirks[i].ident))
+ return quirks[i].quirks;
+ }
+
+ return 0;
+}
+
static const struct displayid_header *
displayid_get_header(const u8 *displayid, int length, int index)
{
@@ -23,7 +51,7 @@ displayid_get_header(const u8 *displayid, int length, int index)
}
static const struct displayid_header *
-validate_displayid(const u8 *displayid, int length, int idx)
+validate_displayid(const u8 *displayid, int length, int idx, bool ignore_checksum)
{
int i, dispid_length;
u8 csum = 0;
@@ -41,33 +69,35 @@ validate_displayid(const u8 *displayid, int length, int idx)
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
- DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
- return ERR_PTR(-EINVAL);
+ DRM_NOTE("DisplayID checksum invalid, remainder is %d%s\n", csum,
+ ignore_checksum ? " (ignoring)" : "");
+
+ if (!ignore_checksum)
+ return ERR_PTR(-EINVAL);
}
return base;
}
-static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
- int *length, int *idx,
- int *ext_index)
+static const u8 *find_next_displayid_extension(struct displayid_iter *iter)
{
const struct displayid_header *base;
const u8 *displayid;
+ bool ignore_checksum = iter->quirks & BIT(QUIRK_IGNORE_CHECKSUM);
- displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index);
+ displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index);
if (!displayid)
return NULL;
/* EDID extensions block checksum isn't for us */
- *length = EDID_LENGTH - 1;
- *idx = 1;
+ iter->length = EDID_LENGTH - 1;
+ iter->idx = 1;
- base = validate_displayid(displayid, *length, *idx);
+ base = validate_displayid(displayid, iter->length, iter->idx, ignore_checksum);
if (IS_ERR(base))
return NULL;
- *length = *idx + sizeof(*base) + base->bytes;
+ iter->length = iter->idx + sizeof(*base) + base->bytes;
return displayid;
}
@@ -78,6 +108,7 @@ void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
memset(iter, 0, sizeof(*iter));
iter->drm_edid = drm_edid;
+ iter->quirks = get_quirks(drm_edid);
}
static const struct displayid_block *
@@ -126,10 +157,7 @@ __displayid_iter_next(struct displayid_iter *iter)
/* The first section we encounter is the base section */
bool base_section = !iter->section;
- iter->section = drm_find_displayid_extension(iter->drm_edid,
- &iter->length,
- &iter->idx,
- &iter->ext_index);
+ iter->section = find_next_displayid_extension(iter);
if (!iter->section) {
iter->drm_edid = NULL;
return NULL;
diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h
index aee1b86a73c1..5b1b32f73516 100644
--- a/drivers/gpu/drm/drm_displayid_internal.h
+++ b/drivers/gpu/drm/drm_displayid_internal.h
@@ -66,6 +66,7 @@ struct drm_edid;
#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_TYPE_10_FORMULA_TIMING 0x2a
#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
@@ -114,20 +115,32 @@ struct displayid_tiled_block {
struct displayid_detailed_timings_1 {
u8 pixel_clock[3];
u8 flags;
- u8 hactive[2];
- u8 hblank[2];
- u8 hsync[2];
- u8 hsw[2];
- u8 vactive[2];
- u8 vblank[2];
- u8 vsync[2];
- u8 vsw[2];
+ __le16 hactive;
+ __le16 hblank;
+ __le16 hsync;
+ __le16 hsw;
+ __le16 vactive;
+ __le16 vblank;
+ __le16 vsync;
+ __le16 vsw;
} __packed;
struct displayid_detailed_timing_block {
struct displayid_block base;
struct displayid_detailed_timings_1 timings[];
-};
+} __packed;
+
+struct displayid_formula_timings_9 {
+ u8 flags;
+ __le16 hactive;
+ __le16 vactive;
+ u8 vrefresh;
+} __packed;
+
+struct displayid_formula_timing_block {
+ struct displayid_block base;
+ struct displayid_formula_timings_9 timings[];
+} __packed;
#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
@@ -154,6 +167,8 @@ struct displayid_iter {
u8 version;
u8 primary_use;
+
+ u8 quirks;
};
void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index cb2ad12bce57..5b956229c82f 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -5,91 +5,15 @@
*/
#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/types.h>
#include <drm/drm_fourcc.h>
#include "drm_draw_internal.h"
-
-/*
- * Conversions from xrgb8888
- */
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
-{
- pix = ((pix & 0x00FF0000) >> 14) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x000000FF) << 22);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
+#include "drm_format_internal.h"
/**
* drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
@@ -104,28 +28,28 @@ u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
{
switch (format) {
case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
+ return drm_pixel_xrgb8888_to_rgb565(color);
case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
+ return drm_pixel_xrgb8888_to_rgba5551(color);
case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
+ return drm_pixel_xrgb8888_to_xrgb1555(color);
case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
+ return drm_pixel_xrgb8888_to_argb1555(color);
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
return color;
case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
+ return drm_pixel_xrgb8888_to_argb8888(color);
case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
+ return drm_pixel_xrgb8888_to_xbgr8888(color);
case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
+ return drm_pixel_xrgb8888_to_abgr8888(color);
case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
+ return drm_pixel_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
+ return drm_pixel_xrgb8888_to_argb2101010(color);
case DRM_FORMAT_ABGR2101010:
- return convert_xrgb8888_to_abgr2101010(color);
+ return drm_pixel_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
@@ -203,7 +127,7 @@ EXPORT_SYMBOL(drm_draw_fill16);
void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
- u16 color)
+ u32 color)
{
unsigned int y, x;
diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
index f121ee7339dc..20cb404e23ea 100644
--- a/drivers/gpu/drm/drm_draw_internal.h
+++ b/drivers/gpu/drm/drm_draw_internal.h
@@ -47,7 +47,7 @@ void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
- u16 color);
+ u32 color);
void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c2c172eb25df..2915118436ce 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -26,17 +26,23 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/bitops.h>
+#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
+#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/sprintf.h>
#include <linux/srcu.h>
#include <linux/xarray.h>
#include <drm/drm_accel.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_cache.h>
#include <drm/drm_client_event.h>
#include <drm/drm_color_mgmt.h>
@@ -66,8 +72,6 @@ DEFINE_XARRAY_ALLOC(drm_minors_xa);
*/
static bool drm_core_init_complete;
-static struct dentry *drm_debugfs_root;
-
DEFINE_STATIC_SRCU(drm_unplug_srcu);
/*
@@ -180,8 +184,7 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
return 0;
if (minor->type != DRM_MINOR_ACCEL) {
- ret = drm_debugfs_register(minor, minor->index,
- drm_debugfs_root);
+ ret = drm_debugfs_register(minor, minor->index);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
goto err_debugfs;
@@ -497,6 +500,105 @@ void drm_dev_unplug(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unplug);
+/**
+ * drm_dev_set_dma_dev - set the DMA device for a DRM device
+ * @dev: DRM device
+ * @dma_dev: DMA device or NULL
+ *
+ * Sets the DMA device of the given DRM device. Only required if
+ * the DMA device is different from the DRM device's parent. After
+ * calling this function, the DRM device holds a reference on
+ * @dma_dev. Pass NULL to clear the DMA device.
+ */
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev)
+{
+ dma_dev = get_device(dma_dev);
+
+ put_device(dev->dma_dev);
+ dev->dma_dev = dma_dev;
+}
+EXPORT_SYMBOL(drm_dev_set_dma_dev);
+
+/*
+ * Available recovery methods for wedged device. To be sent along with device
+ * wedged uevent.
+ */
+static const char *drm_get_wedge_recovery(unsigned int opt)
+{
+ switch (BIT(opt)) {
+ case DRM_WEDGE_RECOVERY_NONE:
+ return "none";
+ case DRM_WEDGE_RECOVERY_REBIND:
+ return "rebind";
+ case DRM_WEDGE_RECOVERY_BUS_RESET:
+ return "bus-reset";
+ case DRM_WEDGE_RECOVERY_VENDOR:
+ return "vendor-specific";
+ default:
+ return NULL;
+ }
+}
+
+#define WEDGE_STR_LEN 32
+#define PID_STR_LEN 15
+#define COMM_STR_LEN (TASK_COMM_LEN + 5)
+
+/**
+ * drm_dev_wedged_event - generate a device wedged uevent
+ * @dev: DRM device
+ * @method: method(s) to be used for recovery
+ * @info: optional information about the guilty task
+ *
+ * This generates a device wedged uevent for the DRM device specified by @dev.
+ * Recovery @method\(s) of choice will be sent in the uevent environment as
+ * ``WEDGED=<method1>[,..,<methodN>]`` in order of less to more side-effects.
+ * If caller is unsure about recovery or @method is unknown (0),
+ * ``WEDGED=unknown`` will be sent instead.
+ *
+ * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
+ * details.
+ *
+ * Returns: 0 on success, negative error code otherwise.
+ */
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info)
+{
+ char event_string[WEDGE_STR_LEN], pid_string[PID_STR_LEN], comm_string[COMM_STR_LEN];
+ char *envp[] = { event_string, NULL, NULL, NULL };
+ const char *recovery = NULL;
+ unsigned int len, opt;
+
+ len = scnprintf(event_string, sizeof(event_string), "%s", "WEDGED=");
+
+ for_each_set_bit(opt, &method, BITS_PER_TYPE(method)) {
+ recovery = drm_get_wedge_recovery(opt);
+ if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
+ break;
+
+ len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery);
+ }
+
+ if (recovery)
+ /* Get rid of trailing comma */
+ event_string[len - 1] = '\0';
+ else
+ /* Caller is unsure about recovery, do the best we can at this point. */
+ snprintf(event_string, sizeof(event_string), "%s", "WEDGED=unknown");
+
+ drm_info(dev, "device wedged, %s\n", method == DRM_WEDGE_RECOVERY_NONE ?
+ "but recovered through reset" : "needs recovery");
+
+ if (info && (info->comm[0] != '\0') && (info->pid >= 0)) {
+ snprintf(pid_string, sizeof(pid_string), "PID=%u", info->pid);
+ snprintf(comm_string, sizeof(comm_string), "TASK=%s", info->comm);
+ envp[1] = pid_string;
+ envp[2] = comm_string;
+ }
+
+ return kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_dev_wedged_event);
+
/*
* DRM internal mount
* We want to be able to allocate our own "struct address_space" to control
@@ -585,6 +687,8 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
{
drm_fs_inode_free(dev->anon_inode);
+ put_device(dev->dma_dev);
+ dev->dma_dev = NULL;
put_device(dev->dev);
/* Prevent use-after-free in drm_managed_release when debugging is
* enabled. Slightly awkward, but can't really be helped. */
@@ -592,7 +696,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
}
static int drm_dev_init(struct drm_device *dev,
@@ -630,10 +733,10 @@ static int drm_dev_init(struct drm_device *dev,
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
+ INIT_LIST_HEAD(&dev->client_sysrq_list);
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
@@ -682,10 +785,7 @@ static int drm_dev_init(struct drm_device *dev,
goto err;
}
- if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
- accel_debugfs_init(dev);
- else
- drm_debugfs_dev_init(dev, drm_debugfs_root);
+ drm_debugfs_dev_init(dev);
return 0;
@@ -739,36 +839,62 @@ void *__devm_drm_dev_alloc(struct device *parent,
EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
+ * __drm_dev_alloc - Allocation of a &drm_device instance
* @parent: Parent device object
+ * @driver: DRM driver
+ * @size: the size of the struct which contains struct drm_device
+ * @offset: the offset of the &drm_device within the container.
*
- * This is the deprecated version of devm_drm_dev_alloc(), which does not support
- * subclassing through embedding the struct &drm_device in a driver private
- * structure, and which does not support automatic cleanup through devres.
+ * This should *NOT* be by any drivers, but is a dedicated interface for the
+ * corresponding Rust abstraction.
*
- * RETURNS:
- * Pointer to new DRM device, or ERR_PTR on failure.
+ * This is the same as devm_drm_dev_alloc(), but without the corresponding
+ * resource management through the parent device, but not the same as
+ * drm_dev_alloc(), since the latter is the deprecated version, which does not
+ * support subclassing.
+ *
+ * Returns: A pointer to new DRM device, or an ERR_PTR on failure.
*/
-struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
- struct device *parent)
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset)
{
- struct drm_device *dev;
+ void *container;
+ struct drm_device *drm;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
return ERR_PTR(-ENOMEM);
- ret = drm_dev_init(dev, driver, parent);
+ drm = container + offset;
+ ret = drm_dev_init(drm, driver, parent);
if (ret) {
- kfree(dev);
+ kfree(container);
return ERR_PTR(ret);
}
+ drmm_add_final_kfree(drm, container);
- drmm_add_final_kfree(dev, dev);
+ return container;
+}
+EXPORT_SYMBOL(__drm_dev_alloc);
- return dev;
+/**
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * This is the deprecated version of devm_drm_dev_alloc(), which does not support
+ * subclassing through embedding the struct &drm_device in a driver private
+ * structure, and which does not support automatic cleanup through devres.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or ERR_PTR on failure.
+ */
+struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
+ struct device *parent)
+{
+ return __drm_dev_alloc(parent, driver, sizeof(struct drm_device), 0);
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -820,6 +946,37 @@ void drm_dev_put(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_put);
+static void drmm_cg_unregister_region(struct drm_device *dev, void *arg)
+{
+ dmem_cgroup_unregister_region(arg);
+}
+
+/**
+ * drmm_cgroup_register_region - Register a region of a DRM device to cgroups
+ * @dev: device for region
+ * @region_name: Region name for registering
+ * @size: Size of region in bytes
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+struct dmem_cgroup_region *drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size)
+{
+ struct dmem_cgroup_region *region;
+ int ret;
+
+ region = dmem_cgroup_register_region(size, "drm/%s/%s", dev->unique, region_name);
+ if (IS_ERR_OR_NULL(region))
+ return region;
+
+ ret = drmm_add_action_or_reset(dev, drmm_cg_unregister_region, region);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return region;
+}
+EXPORT_SYMBOL_GPL(drmm_cgroup_register_region);
+
static int create_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
@@ -944,6 +1101,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_unload;
}
drm_panic_register(dev);
+ drm_client_sysrq_register(dev);
DRM_INFO("Initialized %s %d.%d.%d for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@@ -988,6 +1146,7 @@ void drm_dev_unregister(struct drm_device *dev)
{
dev->registered = false;
+ drm_client_sysrq_unregister(dev);
drm_panic_unregister(dev);
drm_client_dev_unregister(dev);
@@ -1068,7 +1227,7 @@ static void drm_core_exit(void)
drm_panic_exit();
accel_core_exit();
unregister_chrdev(DRM_MAJOR, "drm");
- debugfs_remove(drm_debugfs_root);
+ drm_debugfs_remove_root();
drm_sysfs_destroy();
WARN_ON(!xa_empty(&drm_minors_xa));
drm_connector_ida_destroy();
@@ -1087,7 +1246,8 @@ static int __init drm_core_init(void)
goto error;
}
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ drm_debugfs_init_root();
+ drm_debugfs_bridge_params();
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 70032bba1c97..e2b62e5fb891 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -25,8 +25,11 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -57,6 +60,134 @@
* a hardware-specific ioctl to allocate suitable buffer objects.
*/
+static int drm_mode_align_dumb(struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align)
+{
+ u32 pitch = args->pitch;
+ u32 size;
+
+ if (!pitch)
+ return -EINVAL;
+
+ if (hw_pitch_align)
+ pitch = roundup(pitch, hw_pitch_align);
+
+ if (!hw_size_align)
+ hw_size_align = PAGE_SIZE;
+ else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE))
+ return -EINVAL; /* TODO: handle this if necessary */
+
+ if (check_mul_overflow(args->height, pitch, &size))
+ return -EINVAL;
+ size = ALIGN(size, hw_size_align);
+ if (!size)
+ return -EINVAL;
+
+ args->pitch = pitch;
+ args->size = size;
+
+ return 0;
+}
+
+/**
+ * drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers
+ * @dev: DRM device
+ * @args: Parameters for the dumb buffer
+ * @hw_pitch_align: Hardware scanline alignment in bytes
+ * @hw_size_align: Hardware buffer-size alignment in bytes
+ *
+ * The helper drm_mode_size_dumb() calculates the size of the buffer
+ * allocation and the scanline size for a dumb buffer. Callers have to
+ * set the buffers width, height and color mode in the argument @arg.
+ * The helper validates the correctness of the input and tests for
+ * possible overflows. If successful, it returns the dumb buffer's
+ * required scanline pitch and size in &args.
+ *
+ * The parameter @hw_pitch_align allows the driver to specifies an
+ * alignment for the scanline pitch, if the hardware requires any. The
+ * calculated pitch will be a multiple of the alignment. The parameter
+ * @hw_size_align allows to specify an alignment for buffer sizes. The
+ * provided alignment should represent requirements of the graphics
+ * hardware. drm_mode_size_dumb() handles GEM-related constraints
+ * automatically across all drivers and hardware. For example, the
+ * returned buffer size is always a multiple of PAGE_SIZE, which is
+ * required by mmap().
+ *
+ * Returns:
+ * Zero on success, or a negative error code otherwise.
+ */
+int drm_mode_size_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align)
+{
+ u64 pitch = 0;
+ u32 fourcc;
+
+ /*
+ * The scanline pitch depends on the buffer width and the color
+ * format. The latter is specified as a color-mode constant for
+ * which we first have to find the corresponding color format.
+ *
+ * Different color formats can have the same color-mode constant.
+ * For example XRGB8888 and BGRX8888 both have a color mode of 32.
+ * It is possible to use different formats for dumb-buffer allocation
+ * and rendering as long as all involved formats share the same
+ * color-mode constant.
+ */
+ fourcc = drm_driver_color_mode_format(dev, args->bpp);
+ if (fourcc != DRM_FORMAT_INVALID) {
+ const struct drm_format_info *info = drm_format_info(fourcc);
+
+ if (!info)
+ return -EINVAL;
+ pitch = drm_format_info_min_pitch(info, 0, args->width);
+ } else if (args->bpp) {
+ /*
+ * Some userspace throws in arbitrary values for bpp and
+ * relies on the kernel to figure it out. In this case we
+ * fall back to the old method of using bpp directly. The
+ * over-commitment of memory from the rounding is acceptable
+ * for compatibility with legacy userspace. We have a number
+ * of deprecated legacy values that are explicitly supported.
+ */
+ switch (args->bpp) {
+ default:
+ drm_warn_once(dev,
+ "Unknown color mode %u; guessing buffer size.\n",
+ args->bpp);
+ fallthrough;
+ /*
+ * These constants represent various YUV formats supported by
+ * drm_gem_afbc_get_bpp().
+ */
+ case 12: // DRM_FORMAT_YUV420_8BIT
+ case 15: // DRM_FORMAT_YUV420_10BIT
+ case 30: // DRM_FORMAT_VUY101010
+ fallthrough;
+ /*
+ * Used by Mesa and Gstreamer to allocate NV formats and others
+ * as RGB buffers. Technically, XRGB16161616F formats are RGB,
+ * but the dumb buffers are not supposed to be used for anything
+ * beyond 32 bits per pixels.
+ */
+ case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010
+ case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F
+ pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8);
+ break;
+ }
+ }
+
+ if (!pitch || pitch > U32_MAX)
+ return -EINVAL;
+
+ args->pitch = pitch;
+
+ return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align);
+}
+EXPORT_SYMBOL(drm_mode_size_dumb);
+
int drm_mode_create_dumb(struct drm_device *dev,
struct drm_mode_create_dumb *args,
struct drm_file *file_priv)
@@ -99,7 +230,30 @@ int drm_mode_create_dumb(struct drm_device *dev,
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- return drm_mode_create_dumb(dev, data, file_priv);
+ struct drm_mode_create_dumb *args = data;
+ int err;
+
+ err = drm_mode_create_dumb(dev, args, file_priv);
+ if (err) {
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+ }
+ return err;
+}
+
+static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args,
+ struct drm_file *file_priv)
+{
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+
+ if (dev->driver->dumb_map_offset)
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
+ else
+ return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
}
/**
@@ -120,17 +274,12 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_map_dumb *args = data;
+ int err;
- if (!dev->driver->dumb_create)
- return -ENOSYS;
-
- if (dev->driver->dumb_map_offset)
- return dev->driver->dumb_map_offset(file_priv, dev,
- args->handle,
- &args->offset);
- else
- return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
- &args->offset);
+ err = drm_mode_mmap_dumb(dev, args, file_priv);
+ if (err)
+ args->offset = 0;
+ return err;
}
int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 13bc4c290b17..26bb7710a462 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -31,6 +31,7 @@
#include <linux/bitfield.h>
#include <linux/byteorder/generic.h>
#include <linux/cec.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
@@ -66,34 +67,36 @@ static int oui(u8 first, u8 second, u8 third)
* on as many displays as possible).
*/
-/* First detailed mode wrong, use largest 60Hz mode */
-#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
-/* Reported 135MHz pixel clock is too high, needs adjustment */
-#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
-/* Prefer the largest mode at 75 Hz */
-#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
-/* Detail timing is in cm not mm */
-#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* use +hsync +vsync for detailed mode */
-#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* Force reduced-blanking timings for detailed modes */
-#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
-/* Force 8bpc */
-#define EDID_QUIRK_FORCE_8BPC (1 << 8)
-/* Force 12bpc */
-#define EDID_QUIRK_FORCE_12BPC (1 << 9)
-/* Force 6bpc */
-#define EDID_QUIRK_FORCE_6BPC (1 << 10)
-/* Force 10bpc */
-#define EDID_QUIRK_FORCE_10BPC (1 << 11)
-/* Non desktop display (i.e. HMD) */
-#define EDID_QUIRK_NON_DESKTOP (1 << 12)
-/* Cap the DSC target bitrate to 15bpp */
-#define EDID_QUIRK_CAP_DSC_15BPP (1 << 13)
+enum drm_edid_internal_quirk {
+ /* First detailed mode wrong, use largest 60Hz mode */
+ EDID_QUIRK_PREFER_LARGE_60 = DRM_EDID_QUIRK_NUM,
+ /* Reported 135MHz pixel clock is too high, needs adjustment */
+ EDID_QUIRK_135_CLOCK_TOO_HIGH,
+ /* Prefer the largest mode at 75 Hz */
+ EDID_QUIRK_PREFER_LARGE_75,
+ /* Detail timing is in cm not mm */
+ EDID_QUIRK_DETAILED_IN_CM,
+ /* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+ EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE,
+ /* use +hsync +vsync for detailed mode */
+ EDID_QUIRK_DETAILED_SYNC_PP,
+ /* Force reduced-blanking timings for detailed modes */
+ EDID_QUIRK_FORCE_REDUCED_BLANKING,
+ /* Force 8bpc */
+ EDID_QUIRK_FORCE_8BPC,
+ /* Force 12bpc */
+ EDID_QUIRK_FORCE_12BPC,
+ /* Force 6bpc */
+ EDID_QUIRK_FORCE_6BPC,
+ /* Force 10bpc */
+ EDID_QUIRK_FORCE_10BPC,
+ /* Non desktop display (i.e. HMD) */
+ EDID_QUIRK_NON_DESKTOP,
+ /* Cap the DSC target bitrate to 15bpp */
+ EDID_QUIRK_CAP_DSC_15BPP,
+};
#define MICROSOFT_IEEE_OUI 0xca125c
@@ -128,124 +131,135 @@ static const struct edid_quirk {
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
- EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'C', 'R', 44358, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Acer F51 */
- EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'P', 'I', 0x7602, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('A', 'E', 'O', 0, BIT(EDID_QUIRK_FORCE_6BPC)),
/* BenQ GW2765 */
- EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('B', 'N', 'Q', 0x78d6, BIT(EDID_QUIRK_FORCE_8BPC)),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x78b, BIT(EDID_QUIRK_FORCE_6BPC)),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('C', 'P', 'T', 0x17df, BIT(EDID_QUIRK_FORCE_6BPC)),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 0x3652, BIT(EDID_QUIRK_FORCE_6BPC)),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x0771, BIT(EDID_QUIRK_FORCE_6BPC)),
/* Belinea 10 15 55 */
- EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('M', 'A', 'X', 1516, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+ EDID_QUIRK('M', 'A', 'X', 0x77e, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Envision Peripherals, Inc. EN-7100e */
- EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
+ EDID_QUIRK('E', 'P', 'I', 59264, BIT(EDID_QUIRK_135_CLOCK_TOO_HIGH)),
/* Envision EN2028 */
- EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('E', 'P', 'I', 8232, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Funai Electronics PM36B */
- EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM),
+ EDID_QUIRK('F', 'C', 'M', 13600, BIT(EDID_QUIRK_PREFER_LARGE_75) |
+ BIT(EDID_QUIRK_DETAILED_IN_CM)),
/* LG 27GP950 */
- EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP),
+ EDID_QUIRK('G', 'S', 'M', 0x5bbf, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
/* LG 27GN950 */
- EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP),
+ EDID_QUIRK('G', 'S', 'M', 0x5b9a, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
- EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
+ EDID_QUIRK('L', 'G', 'D', 764, BIT(EDID_QUIRK_FORCE_10BPC)),
/* LG Philips LCD LP154W01-A5 */
- EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
- EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK('L', 'P', 'L', 0, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
+ EDID_QUIRK('L', 'P', 'L', 0x2a00, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
/* Samsung SyncMaster 205BW. Note: irony */
- EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
+ EDID_QUIRK('S', 'A', 'M', 541, BIT(EDID_QUIRK_DETAILED_SYNC_PP)),
/* Samsung SyncMaster 22[5-6]BW */
- EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('S', 'A', 'M', 596, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+ EDID_QUIRK('S', 'A', 'M', 638, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
- EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
+ EDID_QUIRK('S', 'N', 'Y', 0x2541, BIT(EDID_QUIRK_FORCE_12BPC)),
/* ViewSonic VA2026w */
- EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
+ EDID_QUIRK('V', 'S', 'C', 5020, BIT(EDID_QUIRK_FORCE_REDUCED_BLANKING)),
/* Medion MD 30217 PG */
- EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
+ EDID_QUIRK('M', 'E', 'D', 0x7b8, BIT(EDID_QUIRK_PREFER_LARGE_75)),
/* Lenovo G50 */
- EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 18514, BIT(EDID_QUIRK_FORCE_6BPC)),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
- EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('S', 'E', 'C', 0xd033, BIT(EDID_QUIRK_FORCE_8BPC)),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
- EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('E', 'T', 'R', 13896, BIT(EDID_QUIRK_FORCE_8BPC)),
/* Valve Index Headset */
- EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91a8, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b0, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b1, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b2, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b3, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b4, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b5, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b6, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b7, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b8, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b9, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91ba, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bb, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bc, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bd, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91be, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bf, BIT(EDID_QUIRK_NON_DESKTOP)),
/* HTC Vive and Vive Pro VR Headsets */
- EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'V', 'R', 0xaa01, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('H', 'V', 'R', 0xaa02, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
- EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0001, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0003, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0004, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0012, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Windows Mixed Reality Headsets */
- EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'C', 'R', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('L', 'E', 'N', 0x0408, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('F', 'U', 'J', 0x1970, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('D', 'E', 'L', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('S', 'E', 'C', 0x144a, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('A', 'U', 'S', 0xc102, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Sony PlayStation VR Headset */
- EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'N', 'Y', 0x0704, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Sensics VR Headsets */
- EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'E', 'N', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
/* OSVR HDK and HDK2 VR Headsets */
- EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)),
+
+ /* LQ116M1JW10 displays noise when 8 bpc, but display fine as 6 bpc */
+ EDID_QUIRK('S', 'H', 'P', 0x154c, BIT(EDID_QUIRK_FORCE_6BPC)),
+
+ /*
+ * @drm_edid_internal_quirk entries end here, following with the
+ * @drm_edid_quirk entries.
+ */
+
+ /* HP ZR24w DP AUX DPCD access requires probing to prevent corruption. */
+ EDID_QUIRK('H', 'W', 'P', 0x2869, BIT(DRM_EDID_QUIRK_DP_DPCD_PROBE)),
};
/*
@@ -2951,6 +2965,18 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
return 0;
}
+static bool drm_edid_has_internal_quirk(struct drm_connector *connector,
+ enum drm_edid_internal_quirk quirk)
+{
+ return connector->display_info.quirks & BIT(quirk);
+}
+
+bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk)
+{
+ return connector->display_info.quirks & BIT(quirk);
+}
+EXPORT_SYMBOL(drm_edid_has_quirk);
+
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
@@ -2960,7 +2986,6 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
*/
static void edid_fixup_preferred(struct drm_connector *connector)
{
- const struct drm_display_info *info = &connector->display_info;
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
@@ -2968,9 +2993,9 @@ static void edid_fixup_preferred(struct drm_connector *connector)
if (list_empty(&connector->probed_modes))
return;
- if (info->quirks & EDID_QUIRK_PREFER_LARGE_60)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60))
target_refresh = 60;
- if (info->quirks & EDID_QUIRK_PREFER_LARGE_75)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
@@ -3474,7 +3499,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
- const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
@@ -3508,7 +3532,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
return NULL;
}
- if (info->quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_REDUCED_BLANKING)) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
@@ -3520,7 +3544,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (!mode)
return NULL;
- if (info->quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_135_CLOCK_TOO_HIGH))
mode->clock = 1088 * 10;
else
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
@@ -3551,7 +3575,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
drm_mode_do_interlace_quirk(mode, pt);
- if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_SYNC_PP)) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
@@ -3564,12 +3588,12 @@ set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- if (info->quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_IN_CM)) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
- if (info->quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)) {
mode->width_mm = drm_edid->edid->width_cm * 10;
mode->height_mm = drm_edid->edid->height_cm * 10;
}
@@ -5373,7 +5397,8 @@ static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
static void drm_calculate_luminance_range(struct drm_connector *connector)
{
- struct hdr_static_metadata *hdr_metadata = &connector->hdr_sink_metadata.hdmi_type1;
+ const struct hdr_static_metadata *hdr_metadata =
+ &connector->display_info.hdr_sink_metadata.hdmi_type1;
struct drm_luminance_range_info *luminance_range =
&connector->display_info.luminance_range;
static const u8 pre_computed_values[] = {
@@ -5434,21 +5459,21 @@ static uint8_t hdr_metadata_type(const u8 *edid_ext)
static void
drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
{
+ struct hdr_static_metadata *hdr_metadata =
+ &connector->display_info.hdr_sink_metadata.hdmi_type1;
u16 len;
len = cea_db_payload_len(db);
- connector->hdr_sink_metadata.hdmi_type1.eotf =
- eotf_supported(db);
- connector->hdr_sink_metadata.hdmi_type1.metadata_type =
- hdr_metadata_type(db);
+ hdr_metadata->eotf = eotf_supported(db);
+ hdr_metadata->metadata_type = hdr_metadata_type(db);
if (len >= 4)
- connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4];
+ hdr_metadata->max_cll = db[4];
if (len >= 5)
- connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5];
+ hdr_metadata->max_fall = db[5];
if (len >= 6) {
- connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6];
+ hdr_metadata->min_cll = db[6];
/* Calculate only when all values are available */
drm_calculate_luminance_range(connector);
@@ -6596,6 +6621,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
+ memset(&info->hdr_sink_metadata, 0, sizeof(info->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;
@@ -6733,26 +6759,26 @@ static void update_display_info(struct drm_connector *connector,
drm_update_mso(connector, drm_edid);
out:
- if (info->quirks & EDID_QUIRK_NON_DESKTOP) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_NON_DESKTOP)) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
connector->base.id, connector->name,
info->non_desktop ? " (redundant quirk)" : "");
info->non_desktop = true;
}
- if (info->quirks & EDID_QUIRK_CAP_DSC_15BPP)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_CAP_DSC_15BPP))
info->max_dsc_bpp = 15;
- if (info->quirks & EDID_QUIRK_FORCE_6BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_6BPC))
info->bpc = 6;
- if (info->quirks & EDID_QUIRK_FORCE_8BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_8BPC))
info->bpc = 8;
- if (info->quirks & EDID_QUIRK_FORCE_10BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_10BPC))
info->bpc = 10;
- if (info->quirks & EDID_QUIRK_FORCE_12BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_12BPC))
info->bpc = 12;
/* Depends on info->cea_rev set by drm_parse_cea_ext() above */
@@ -6760,23 +6786,23 @@ out:
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
- struct displayid_detailed_timings_1 *timings,
+ const struct displayid_detailed_timings_1 *timings,
bool type_7)
{
struct drm_display_mode *mode;
- unsigned pixel_clock = (timings->pixel_clock[0] |
- (timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16)) + 1;
- unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
- unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
- unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
- unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
- unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
- unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
- unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
- unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
- bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
- bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+ unsigned int pixel_clock = (timings->pixel_clock[0] |
+ (timings->pixel_clock[1] << 8) |
+ (timings->pixel_clock[2] << 16)) + 1;
+ unsigned int hactive = le16_to_cpu(timings->hactive) + 1;
+ unsigned int hblank = le16_to_cpu(timings->hblank) + 1;
+ unsigned int hsync = (le16_to_cpu(timings->hsync) & 0x7fff) + 1;
+ unsigned int hsync_width = le16_to_cpu(timings->hsw) + 1;
+ unsigned int vactive = le16_to_cpu(timings->vactive) + 1;
+ unsigned int vblank = le16_to_cpu(timings->vblank) + 1;
+ unsigned int vsync = (le16_to_cpu(timings->vsync) & 0x7fff) + 1;
+ unsigned int vsync_width = le16_to_cpu(timings->vsw) + 1;
+ bool hsync_positive = le16_to_cpu(timings->hsync) & (1 << 15);
+ bool vsync_positive = le16_to_cpu(timings->vsync) & (1 << 15);
mode = drm_mode_create(dev);
if (!mode)
@@ -6833,6 +6859,66 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
return num_modes;
}
+static struct drm_display_mode *drm_mode_displayid_formula(struct drm_device *dev,
+ const struct displayid_formula_timings_9 *timings,
+ bool type_10)
+{
+ struct drm_display_mode *mode;
+ u16 hactive = le16_to_cpu(timings->hactive) + 1;
+ u16 vactive = le16_to_cpu(timings->vactive) + 1;
+ u8 timing_formula = timings->flags & 0x7;
+
+ /* TODO: support RB-v2 & RB-v3 */
+ if (timing_formula > 1)
+ return NULL;
+
+ /* TODO: support video-optimized refresh rate */
+ if (timings->flags & (1 << 4))
+ drm_dbg_kms(dev, "Fractional vrefresh is not implemented, proceeding with non-video-optimized refresh rate");
+
+ mode = drm_cvt_mode(dev, hactive, vactive, timings->vrefresh + 1, timing_formula == 1, false, false);
+ if (!mode)
+ return NULL;
+
+ /* TODO: interpret S3D flags */
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static int add_displayid_formula_modes(struct drm_connector *connector,
+ const struct displayid_block *block)
+{
+ const struct displayid_formula_timing_block *formula_block = (struct displayid_formula_timing_block *)block;
+ int num_timings;
+ struct drm_display_mode *newmode;
+ int num_modes = 0;
+ bool type_10 = block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING;
+ int timing_size = 6 + ((formula_block->base.rev & 0x70) >> 4);
+
+ /* extended blocks are not supported yet */
+ if (timing_size != 6)
+ return 0;
+
+ if (block->num_bytes % timing_size)
+ return 0;
+
+ num_timings = block->num_bytes / timing_size;
+ for (int i = 0; i < num_timings; i++) {
+ const struct displayid_formula_timings_9 *timings = &formula_block->timings[i];
+
+ newmode = drm_mode_displayid_formula(connector->dev, timings, type_10);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ num_modes++;
+ }
+ return num_modes;
+}
+
static int add_displayid_detailed_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -6845,6 +6931,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
num_modes += add_displayid_detailed_1_modes(connector, block);
+ else if (block->tag == DATA_BLOCK_2_TYPE_9_FORMULA_TIMING ||
+ block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING)
+ num_modes += add_displayid_formula_modes(connector, block);
}
displayid_iter_end(&iter);
@@ -6854,7 +6943,6 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
static int _drm_edid_connector_add_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- const struct drm_display_info *info = &connector->display_info;
int num_modes = 0;
if (!drm_edid)
@@ -6884,7 +6972,8 @@ static int _drm_edid_connector_add_modes(struct drm_connector *connector,
if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
- if (info->quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60) ||
+ drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector);
return num_modes;
@@ -7099,18 +7188,12 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
+ unsigned int hdisplay, unsigned int vdisplay)
{
- int i, count, num_modes = 0;
+ int i, count = ARRAY_SIZE(drm_dmt_modes), num_modes = 0;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
- count = ARRAY_SIZE(drm_dmt_modes);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
for (i = 0; i < count; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 18e366cc4993..8d0601400182 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -2,7 +2,9 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
+
#include <linux/dma-resv.h>
+#include <linux/export.h>
/**
* DOC: Overview
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index e1d61a65210b..fd71969d2fb1 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -17,7 +17,9 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
+
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/module.h>
/**
@@ -178,7 +180,7 @@ int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane,
dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
/* Buffer should be accessible from the CPU */
- if (dma_obj->base.import_attach)
+ if (drm_gem_is_imported(&dma_obj->base))
return -ENODEV;
/* Buffer should be already mapped to CPU */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index c9008113111b..4a7f72044ab8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,9 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
-#include <linux/pci.h>
-#include <linux/sysrq.h>
-#include <linux/vga_switcheroo.h>
+#include <linux/export.h>
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
@@ -245,12 +243,16 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
if (do_delayed)
drm_fb_helper_hotplug_event(fb_helper);
+ if (fb_helper->funcs->fb_restore)
+ fb_helper->funcs->fb_restore(fb_helper);
+
return ret;
}
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: driver-allocated fbdev helper, can be NULL
+ * @force: ignore present DRM master
*
* This helper should be called from fbdev emulation's &drm_client_funcs.restore
* callback. It ensures that the user isn't greeted with a black screen when the
@@ -259,48 +261,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, bool force)
{
- return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+ return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
-#ifdef CONFIG_MAGIC_SYSRQ
-/* emergency restore, don't bother with error reporting */
-static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
-{
- struct drm_fb_helper *helper;
-
- mutex_lock(&kernel_fb_helper_lock);
- list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
- struct drm_device *dev = helper->dev;
-
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
- continue;
-
- mutex_lock(&helper->lock);
- drm_client_modeset_commit_locked(&helper->client);
- mutex_unlock(&helper->lock);
- }
- mutex_unlock(&kernel_fb_helper_lock);
-}
-
-static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-
-static void drm_fb_helper_sysrq(u8 dummy1)
-{
- schedule_work(&drm_fb_helper_restore_work);
-}
-
-static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
- .handler = drm_fb_helper_sysrq,
- .help_msg = "force-fb(v)",
- .action_msg = "Restore framebuffer console",
-};
-#else
-static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
-#endif
-
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -364,6 +330,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper)
unsigned long flags;
int ret;
+ mutex_lock(&helper->lock);
+ drm_client_modeset_wait_for_vblank(&helper->client, 0);
+ mutex_unlock(&helper->lock);
+
if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty))
return;
@@ -487,20 +457,7 @@ int drm_fb_helper_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_fb_helper_init);
-/**
- * drm_fb_helper_alloc_info - allocate fb_info and some of its members
- * @fb_helper: driver-allocated fbdev helper
- *
- * A helper to alloc fb_info and the member cmap. Called by the driver
- * within the struct &drm_driver.fbdev_probe callback function. Drivers do
- * not need to release the allocated fb_info structure themselves, this is
- * automatically done when calling drm_fb_helper_fini().
- *
- * RETURNS:
- * fb_info pointer if things went okay, pointer containing error code
- * otherwise
- */
-struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
+static struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
{
struct device *dev = fb_helper->dev->dev;
struct fb_info *info;
@@ -527,17 +484,8 @@ err_release:
framebuffer_release(info);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(drm_fb_helper_alloc_info);
-/**
- * drm_fb_helper_release_info - release fb_info and its members
- * @fb_helper: driver-allocated fbdev helper
- *
- * A helper to release fb_info and the member cmap. Drivers do not
- * need to release the allocated fb_info structure themselves, this is
- * automatically done when calling drm_fb_helper_fini().
- */
-void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
+static void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
{
struct fb_info *info = fb_helper->info;
@@ -550,7 +498,6 @@ void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
-EXPORT_SYMBOL(drm_fb_helper_release_info);
/**
* drm_fb_helper_unregister_info - unregister fb_info framebuffer device
@@ -562,11 +509,6 @@ EXPORT_SYMBOL(drm_fb_helper_release_info);
*/
void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
- struct fb_info *info = fb_helper->info;
- struct device *dev = info->device;
-
- if (dev_is_pci(dev))
- vga_switcheroo_client_fb_set(to_pci_dev(dev), NULL);
unregister_framebuffer(fb_helper->info);
}
EXPORT_SYMBOL(drm_fb_helper_unregister_info);
@@ -593,11 +535,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
drm_fb_helper_release_info(fb_helper);
mutex_lock(&kernel_fb_helper_lock);
- if (!list_empty(&fb_helper->kernel_fb_list)) {
+ if (!list_empty(&fb_helper->kernel_fb_list))
list_del(&fb_helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list))
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
mutex_unlock(&kernel_fb_helper_lock);
if (!fb_helper->client.funcs)
@@ -754,7 +693,12 @@ EXPORT_SYMBOL(drm_fb_helper_deferred_io);
*/
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
{
- if (fb_helper && fb_helper->info)
+ if (!fb_helper || !fb_helper->info)
+ return;
+
+ if (fb_helper->funcs->fb_set_suspend)
+ fb_helper->funcs->fb_set_suspend(fb_helper, suspend);
+ else
fb_set_suspend(fb_helper->info, suspend);
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend);
@@ -800,7 +744,7 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
}
}
- fb_set_suspend(fb_helper->info, suspend);
+ drm_fb_helper_set_suspend(fb_helper, suspend);
console_unlock();
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
@@ -1059,15 +1003,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
int ret = 0;
- mutex_lock(&fb_helper->lock);
- if (!drm_master_internal_acquire(dev)) {
- ret = -EBUSY;
- goto unlock;
- }
+ guard(mutex)(&fb_helper->lock);
switch (cmd) {
case FBIO_WAITFORVSYNC:
@@ -1087,28 +1025,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
* make. If we're not smart enough here, one should
* just consider switch the userspace to KMS.
*/
- crtc = fb_helper->client.modesets[0].crtc;
-
- /*
- * Only wait for a vblank event if the CRTC is
- * enabled, otherwise just don't do anythintg,
- * not even report an error.
- */
- ret = drm_crtc_vblank_get(crtc);
- if (!ret) {
- drm_crtc_wait_one_vblank(crtc);
- drm_crtc_vblank_put(crtc);
- }
-
- ret = 0;
+ ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0);
break;
default:
ret = -ENOTTY;
}
- drm_master_internal_release(dev);
-unlock:
- mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
@@ -1337,9 +1259,9 @@ int drm_fb_helper_set_par(struct fb_info *info)
* the KDSET IOCTL with KD_TEXT, and only after that drops the master
* status when exiting.
*
- * In the past this was caught by drm_fb_helper_lastclose(), but on
- * modern systems where logind always keeps a drm fd open to orchestrate
- * the vt switching, this doesn't work.
+ * In the past this was caught by drm_fb_helper_restore_fbdev_mode_unlocked(),
+ * but on modern systems where logind always keeps a drm fd open to
+ * orchestrate the vt switching, this doesn't work.
*
* To not break the userspace ABI we have this special case here, which
* is only used for the above case. Everything else uses the normal
@@ -1354,14 +1276,14 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
-static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
+static void pan_set(struct drm_fb_helper *fb_helper, int dx, int dy)
{
struct drm_mode_set *mode_set;
mutex_lock(&fb_helper->client.modeset_mutex);
drm_client_for_each_modeset(mode_set, &fb_helper->client) {
- mode_set->x = x;
- mode_set->y = y;
+ mode_set->x += dx;
+ mode_set->y += dy;
}
mutex_unlock(&fb_helper->client.modeset_mutex);
}
@@ -1370,16 +1292,18 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- int ret;
+ int ret, dx, dy;
- pan_set(fb_helper, var->xoffset, var->yoffset);
+ dx = var->xoffset - info->var.xoffset;
+ dy = var->yoffset - info->var.yoffset;
+ pan_set(fb_helper, dx, dy);
ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
} else
- pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
+ pan_set(fb_helper, -dx, -dy);
return ret;
}
@@ -1621,9 +1545,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_surface_size sizes;
- struct fb_info *info;
int ret;
+ if (drm_WARN_ON(dev, !dev->driver->fbdev_probe))
+ return -EINVAL;
+
ret = drm_fb_helper_find_sizes(fb_helper, &sizes);
if (ret) {
/* First time: disable all crtc's.. */
@@ -1633,21 +1559,12 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
}
/* push down into drivers */
- if (dev->driver->fbdev_probe)
- ret = dev->driver->fbdev_probe(fb_helper, &sizes);
- else if (fb_helper->funcs)
- ret = fb_helper->funcs->fb_probe(fb_helper, &sizes);
+ ret = dev->driver->fbdev_probe(fb_helper, &sizes);
if (ret < 0)
return ret;
strcpy(fb_helper->fb->comm, "[fbcon]");
- info = fb_helper->info;
-
- /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
- if (dev_is_pci(info->device))
- vga_switcheroo_client_fb_set(to_pci_dev(info->device), info);
-
return 0;
}
@@ -1816,6 +1733,11 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
height = dev->mode_config.max_height;
drm_client_modeset_probe(&fb_helper->client, width, height);
+
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
ret = drm_fb_helper_single_fb_probe(fb_helper);
if (ret < 0) {
if (ret == -EAGAIN) {
@@ -1824,13 +1746,12 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
}
mutex_unlock(&fb_helper->lock);
- return ret;
+ goto err_drm_fb_helper_release_info;
}
drm_setup_crtcs_fb(fb_helper);
fb_helper->deferred_setup = false;
- info = fb_helper->info;
info->var.pixclock = 0;
/* Need to drop locks to avoid recursive deadlock in
@@ -1846,13 +1767,14 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
info->node, info->fix.id);
mutex_lock(&kernel_fb_helper_lock);
- if (list_empty(&kernel_fb_helper_list))
- register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
mutex_unlock(&kernel_fb_helper_lock);
return 0;
+
+err_drm_fb_helper_release_info:
+ drm_fb_helper_release_info(fb_helper);
+ return ret;
}
/**
@@ -1962,16 +1884,3 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
-
-/**
- * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
- * @dev: DRM device
- *
- * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked()
- * instead.
- */
-void drm_fb_helper_lastclose(struct drm_device *dev)
-{
- drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper);
-}
-EXPORT_SYMBOL(drm_fb_helper_lastclose);
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index b14b581c059d..9412d9fdd74b 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
+#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -8,6 +10,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
/*
* struct fb_ops
@@ -53,10 +56,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
- drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_buffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_dma_fb_ops = {
@@ -70,37 +71,100 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
-static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+ void *shadow = info->screen_buffer;
+
+ if (!fb_helper->dev)
+ return;
- if (!dma->map_noncoherent)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ if (info->fbdefio)
+ fb_deferred_io_cleanup(info);
+ drm_fb_helper_fini(fb_helper);
+ vfree(shadow);
- return fb_deferred_io_mmap(info, vma);
+ drm_client_buffer_vunmap(fb_helper->buffer);
+ drm_client_buffer_delete(fb_helper->buffer);
+ drm_client_release(&fb_helper->client);
}
-static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
+static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
- __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
+ FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
- .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
- .fb_destroy = drm_fbdev_dma_fb_destroy,
+ .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
};
/*
* struct drm_fb_helper
*/
+static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct iosys_map *dst)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ size_t offset = clip->y1 * fb->pitches[0];
+ size_t len = clip->x2 - clip->x1;
+ unsigned int y;
+ void *src;
+
+ switch (drm_format_info_bpp(fb->format, 0)) {
+ case 1:
+ offset += clip->x1 / 8;
+ len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
+ break;
+ case 2:
+ offset += clip->x1 / 4;
+ len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
+ break;
+ case 4:
+ offset += clip->x1 / 2;
+ len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
+ break;
+ default:
+ offset += clip->x1 * fb->format->cpp[0];
+ len *= fb->format->cpp[0];
+ break;
+ }
+
+ src = fb_helper->info->screen_buffer + offset;
+ iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ iosys_map_memcpy_to(dst, 0, src, len);
+ iosys_map_incr(dst, fb->pitches[0]);
+ src += fb->pitches[0];
+ }
+}
+
+static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct iosys_map dst;
+
+ /*
+ * For fbdev emulation, we only have to protect against fbdev modeset
+ * operations. Nothing else will involve the client buffer's BO. So it
+ * is sufficient to acquire struct drm_fb_helper.lock here.
+ */
+ mutex_lock(&fb_helper->lock);
+
+ dst = buffer->map;
+ drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
+
+ mutex_unlock(&fb_helper->lock);
+
+ return 0;
+}
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
@@ -112,6 +176,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
return 0;
if (helper->fb->funcs->dirty) {
+ ret = drm_fbdev_dma_damage_blit(helper, clip);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ return ret;
+
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
@@ -128,16 +196,82 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
* struct drm_fb_helper
*/
+static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct fb_info *info = fb_helper->info;
+ struct iosys_map map = buffer->map;
+
+ info->fbops = &drm_fbdev_dma_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ if (dma_obj->map_noncoherent)
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->screen_buffer = map.vaddr;
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
+ info->fix.smem_len = info->screen_size;
+
+ return 0;
+}
+
+static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct fb_info *info = fb_helper->info;
+ size_t screen_size = buffer->gem->size;
+ void *screen_buffer;
+ int ret;
+
+ /*
+ * Deferred I/O requires struct page for framebuffer memory,
+ * which is not guaranteed for all DMA ranges. We thus create
+ * a shadow buffer in system memory.
+ */
+ screen_buffer = vzalloc(screen_size);
+ if (!screen_buffer)
+ return -ENOMEM;
+
+ info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_buffer = screen_buffer;
+ info->fix.smem_len = screen_size;
+
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_vfree;
+
+ return 0;
+
+err_vfree:
+ vfree(screen_buffer);
+ return ret;
+}
+
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
- bool use_deferred_io = false;
+ struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
- struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
- struct fb_info *info;
u32 format;
struct iosys_map map;
int ret;
@@ -148,23 +282,13 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
sizes->surface_depth);
- buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ buffer = drm_client_buffer_create_dumb(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
- /*
- * Deferred I/O requires struct page for framebuffer memory,
- * which is not guaranteed for all DMA ranges. We thus only
- * install deferred I/O if we have a framebuffer that requires
- * it.
- */
- if (fb->funcs->dirty)
- use_deferred_io = true;
-
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@@ -177,64 +301,23 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->buffer = buffer;
fb_helper->fb = fb;
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_client_buffer_vunmap;
- }
-
drm_fb_helper_fill_info(info, fb_helper, sizes);
- if (use_deferred_io)
- info->fbops = &drm_fbdev_dma_deferred_fb_ops;
+ if (fb->funcs->dirty)
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
else
- info->fbops = &drm_fbdev_dma_fb_ops;
-
- /* screen */
- info->flags |= FBINFO_VIRTFB; /* system memory */
- if (dma_obj->map_noncoherent)
- info->flags |= FBINFO_READS_FAST; /* signal caching */
- info->screen_size = sizes->surface_height * fb->pitches[0];
- info->screen_buffer = map.vaddr;
- if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
- if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
- }
- info->fix.smem_len = info->screen_size;
-
- /*
- * Only set up deferred I/O if the screen buffer supports
- * it. If this disagrees with the previous test for ->dirty,
- * mmap on the /dev/fb file might not work correctly.
- */
- if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
- unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
-
- if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
- use_deferred_io = false;
- }
-
- /* deferred I/O */
- if (use_deferred_io) {
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
-
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- goto err_drm_fb_helper_release_info;
- }
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
+ if (ret)
+ goto err_drm_client_buffer_vunmap;
return 0;
-err_drm_fb_helper_release_info:
- drm_fb_helper_release_info(fb_helper);
err_drm_client_buffer_vunmap:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
drm_client_buffer_vunmap(buffer);
err_drm_client_buffer_delete:
- drm_client_framebuffer_delete(buffer);
+ drm_client_buffer_delete(buffer);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c
index f824369baacd..458c899b5d4f 100644
--- a/drivers/gpu/drm/drm_fbdev_shmem.c
+++ b/drivers/gpu/drm/drm_fbdev_shmem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <drm/drm_drv.h>
@@ -8,6 +9,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
/*
* struct fb_ops
@@ -62,10 +64,8 @@ static void drm_fbdev_shmem_fb_destroy(struct fb_info *info)
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
- drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_buffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_shmem_fb_ops = {
@@ -135,10 +135,10 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
+ struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
struct drm_gem_shmem_object *shmem;
struct drm_framebuffer *fb;
- struct fb_info *info;
u32 format;
struct iosys_map map;
int ret;
@@ -148,7 +148,7 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
sizes->surface_bpp);
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth);
- buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ buffer = drm_client_buffer_create_dumb(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
@@ -168,12 +168,6 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->buffer = buffer;
fb_helper->fb = fb;
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_client_buffer_vunmap;
- }
-
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_shmem_fb_ops;
@@ -194,18 +188,16 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
- goto err_drm_fb_helper_release_info;
+ goto err_drm_client_buffer_vunmap;
return 0;
-err_drm_fb_helper_release_info:
- drm_fb_helper_release_info(fb_helper);
err_drm_client_buffer_vunmap:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
drm_client_buffer_vunmap(buffer);
err_drm_client_buffer_delete:
- drm_client_framebuffer_delete(buffer);
+ drm_client_buffer_delete(buffer);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c
index 73d35d59590c..160bc35d8738 100644
--- a/drivers/gpu/drm/drm_fbdev_ttm.c
+++ b/drivers/gpu/drm/drm_fbdev_ttm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
@@ -49,11 +50,9 @@ static void drm_fbdev_ttm_fb_destroy(struct fb_info *info)
fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
vfree(shadow);
- drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_buffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_ttm_fb_ops = {
@@ -175,8 +174,8 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
+ struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
- struct fb_info *info;
size_t screen_size;
void *screen_buffer;
u32 format;
@@ -188,7 +187,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
sizes->surface_depth);
- buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ buffer = drm_client_buffer_create_dumb(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
@@ -201,13 +200,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
screen_buffer = vzalloc(screen_size);
if (!screen_buffer) {
ret = -ENOMEM;
- goto err_drm_client_framebuffer_delete;
- }
-
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_vfree;
+ goto err_drm_client_buffer_delete;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
@@ -226,18 +219,16 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
- goto err_drm_fb_helper_release_info;
+ goto err_vfree;
return 0;
-err_drm_fb_helper_release_info:
- drm_fb_helper_release_info(fb_helper);
err_vfree:
vfree(screen_buffer);
-err_drm_client_framebuffer_delete:
+err_drm_client_buffer_delete:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
- drm_client_framebuffer_delete(buffer);
+ drm_client_buffer_delete(buffer);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 2289e71e2fa2..be5e617ceb9f 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -33,6 +33,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -45,6 +46,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -167,6 +169,9 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
drm_prime_init_file_private(&file->prime);
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_add(file);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, file);
if (ret < 0)
@@ -181,6 +186,10 @@ out_prime_destroy:
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
+
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_remove(file);
+
put_pid(rcu_access_pointer(file->pid));
kfree(file);
@@ -235,6 +244,9 @@ void drm_file_free(struct drm_file *file)
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_remove(file);
+
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -393,7 +405,7 @@ EXPORT_SYMBOL(drm_open);
static void drm_lastclose(struct drm_device *dev)
{
- drm_client_dev_restore(dev);
+ drm_client_dev_restore(dev, false);
if (dev_is_pci(dev->dev))
vga_switcheroo_process_delayed_switch();
@@ -830,8 +842,11 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
}
EXPORT_SYMBOL(drm_send_event);
-static void print_size(struct drm_printer *p, const char *stat,
- const char *region, u64 sz)
+void drm_fdinfo_print_size(struct drm_printer *p,
+ const char *prefix,
+ const char *stat,
+ const char *region,
+ u64 sz)
{
const char *units[] = {"", " KiB", " MiB"};
unsigned u;
@@ -842,8 +857,10 @@ static void print_size(struct drm_printer *p, const char *stat,
sz = div_u64(sz, SZ_1K);
}
- drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
+ drm_printf(p, "%s-%s-%s:\t%llu%s\n",
+ prefix, stat, region, sz, units[u]);
}
+EXPORT_SYMBOL(drm_fdinfo_print_size);
int drm_memory_stats_is_zero(const struct drm_memory_stats *stats)
{
@@ -868,17 +885,22 @@ void drm_print_memory_stats(struct drm_printer *p,
enum drm_gem_object_status supported_status,
const char *region)
{
- print_size(p, "total", region, stats->private + stats->shared);
- print_size(p, "shared", region, stats->shared);
+ const char *prefix = "drm";
+
+ drm_fdinfo_print_size(p, prefix, "total", region,
+ stats->private + stats->shared);
+ drm_fdinfo_print_size(p, prefix, "shared", region, stats->shared);
if (supported_status & DRM_GEM_OBJECT_ACTIVE)
- print_size(p, "active", region, stats->active);
+ drm_fdinfo_print_size(p, prefix, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
- print_size(p, "resident", region, stats->resident);
+ drm_fdinfo_print_size(p, prefix, "resident", region,
+ stats->resident);
if (supported_status & DRM_GEM_OBJECT_PURGEABLE)
- print_size(p, "purgeable", region, stats->purgeable);
+ drm_fdinfo_print_size(p, prefix, "purgeable", region,
+ stats->purgeable);
}
EXPORT_SYMBOL(drm_print_memory_stats);
@@ -954,6 +976,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
struct drm_file *file = f->private_data;
struct drm_device *dev = file->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
@@ -973,10 +999,48 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
if (dev->driver->show_fdinfo)
dev->driver->show_fdinfo(&p, file);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(drm_show_fdinfo);
/**
+ * drm_file_err - log process name, pid and client_name associated with a drm_file
+ * @file_priv: context of interest for process name and pid
+ * @fmt: printf() like format string
+ *
+ * Helper function for clients which needs to log process details such
+ * as name and pid etc along with user logs.
+ */
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...)
+{
+ va_list args;
+ struct va_format vaf;
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_device *dev = file_priv->minor->dev;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ mutex_lock(&file_priv->client_name_lock);
+ rcu_read_lock();
+ pid = rcu_dereference(file_priv->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ drm_err(dev, "comm: %s pid: %d client-id:%llu client: %s ... %pV",
+ task ? task->comm : "Unset",
+ task ? task->pid : 0, file_priv->client_id,
+ file_priv->client_name ?: "Unset", &vaf);
+
+ va_end(args);
+ rcu_read_unlock();
+ mutex_unlock(&file_priv->client_name_lock);
+}
+EXPORT_SYMBOL(drm_file_err);
+
+/**
* mock_drm_getfile - Create a new struct file for the drm device
* @minor: drm minor to wrap (e.g. #drm_device.primary)
* @flags: file creation mode (O_RDWR etc)
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index 8c6090a90d56..f5889dd8e7aa 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -21,6 +21,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_flip_work.h>
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index b1be458ed4dd..6cddf05c493b 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -8,6 +8,7 @@
* (at your option) any later version.
*/
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -20,6 +21,8 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
+#include "drm_format_internal.h"
+
/**
* drm_format_conv_state_init - Initialize format-conversion state
* @state: The state to initialize
@@ -244,6 +247,152 @@ static int drm_fb_xfrm(struct iosys_map *dst,
xfrm_line);
}
+#define ALIGN_DOWN_PIXELS(end, n, a) \
+ ((end) - ((n) & ((a) - 1)))
+
+static __always_inline void drm_fb_xfrm_line_32to8(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 8) |
+ (xfrm_pixel(pix[2]) << 16) |
+ (xfrm_pixel(pix[3]) << 24);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixels */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf8++ = xfrm_pixel(le32_to_cpup(sbuf32++));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to16(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le64 *dbuf64 = dbuf;
+ __le32 *dbuf32;
+ __le16 *dbuf16;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+#if defined(CONFIG_64BIT)
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u64 val64 = ((u64)xfrm_pixel(pix[0])) |
+ ((u64)xfrm_pixel(pix[1]) << 16) |
+ ((u64)xfrm_pixel(pix[2]) << 32) |
+ ((u64)xfrm_pixel(pix[3]) << 48);
+ *dbuf64++ = cpu_to_le64(val64);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+#endif
+
+ /* write 2 pixels at once */
+ dbuf32 = (__le32 __force *)dbuf64;
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 2)) {
+ u32 pix[2] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 16);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixel */
+ dbuf16 = (__le16 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf16++ = cpu_to_le16(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to24(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write pixels in chunks of 4 */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 val24[4] = {
+ xfrm_pixel(le32_to_cpup(sbuf32)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 1)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 2)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 3)),
+ };
+ u32 out32[3] = {
+ /* write output bytes in reverse order for little endianness */
+ ((val24[0] & 0x000000ff)) |
+ ((val24[0] & 0x0000ff00)) |
+ ((val24[0] & 0x00ff0000)) |
+ ((val24[1] & 0x000000ff) << 24),
+ ((val24[1] & 0x0000ff00) >> 8) |
+ ((val24[1] & 0x00ff0000) >> 8) |
+ ((val24[2] & 0x000000ff) << 16) |
+ ((val24[2] & 0x0000ff00) << 16),
+ ((val24[2] & 0x00ff0000) >> 16) |
+ ((val24[3] & 0x000000ff) << 8) |
+ ((val24[3] & 0x0000ff00) << 8) |
+ ((val24[3] & 0x00ff0000) << 8),
+ };
+
+ *dbuf32++ = cpu_to_le32(out32[0]);
+ *dbuf32++ = cpu_to_le32(out32[1]);
+ *dbuf32++ = cpu_to_le32(out32[2]);
+ sbuf32 += ARRAY_SIZE(val24);
+ }
+
+ /* write trailing pixel */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32) {
+ u32 val24 = xfrm_pixel(le32_to_cpup(sbuf32++));
+ /* write output in reverse order for little endianness */
+ *dbuf8++ = (val24 & 0x000000ff);
+ *dbuf8++ = (val24 & 0x0000ff00) >> 8;
+ *dbuf8++ = (val24 & 0x00ff0000) >> 16;
+ }
+}
+
+static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ while (sbuf32 < send32)
+ *dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
/**
* drm_fb_memcpy - Copy clip buffer
* @dst: Array of destination buffers
@@ -368,17 +517,7 @@ EXPORT_SYMBOL(drm_fb_swab);
static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- dbuf8[x] = ((pix & 0x00e00000) >> 16) |
- ((pix & 0x0000e000) >> 11) |
- ((pix & 0x000000c0) >> 6);
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb332);
}
/**
@@ -417,38 +556,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
-}
-
-/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
-static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
- unsigned int pixels)
-{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(swab16(val16));
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565);
}
/**
@@ -460,7 +568,6 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @state: Transform and conversion state
- * @swab: Swap bytes
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -475,39 +582,60 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
*/
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state,
- bool swab)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_rgb565_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
- if (swab)
- xfrm_line = drm_fb_xrgb8888_to_rgb565_swab_line;
- else
- xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
+static void drm_fb_xrgb8888_to_rgb565be_line(void *dbuf, const void *sbuf,
+ unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565be);
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb565be - Convert XRGB8888 to RGB565|DRM_FORMAT_BIG_ENDIAN clip buffer
+ * @dst: Array of RGB565BE destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGB565BE devices that don't support XRGB8888 natively.
+ */
+void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_rgb565be_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565be);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb1555);
}
/**
@@ -547,20 +675,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb1555);
}
/**
@@ -600,20 +715,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgba5551);
}
/**
@@ -653,18 +755,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write blue-green-red to output in little endianness */
- *dbuf8++ = (pix & 0x000000FF) >> 0;
- *dbuf8++ = (pix & 0x0000FF00) >> 8;
- *dbuf8++ = (pix & 0x00FF0000) >> 16;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb888);
}
/**
@@ -702,18 +793,49 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
-static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgr888);
+}
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix |= GENMASK(31, 24); /* fill alpha bits */
- dbuf32[x] = cpu_to_le32(pix);
- }
+/**
+ * drm_fb_xrgb8888_to_bgr888 - Convert XRGB8888 to BGR888 clip buffer
+ * @dst: Array of BGR888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for BGR888 devices that don't natively
+ * support XRGB8888.
+ */
+void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 3,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_bgr888_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
+
+static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888);
}
/**
@@ -753,26 +875,36 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
-static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_abgr8888 - Convert XRGB8888 to ABGR8888 clip buffer
+ * @dst: Array of ABGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ABGR8888 devices that don't support XRGB8888
+ * natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -781,29 +913,40 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_abgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_abgr8888);
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
-static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_xbgr8888 - Convert XRGB8888 to XBGR8888 clip buffer
+ * @dst: Array of XBGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XBGR8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -812,23 +955,53 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xbgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xbgr8888);
+
+static void drm_fb_xrgb8888_to_bgrx8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgrx8888);
+}
+
+/**
+ * drm_fb_xrgb8888_to_bgrx8888 - Convert XRGB8888 to BGRX8888 clip buffer
+ * @dst: Array of BGRX8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for BGRX8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_bgrx8888_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgrx8888);
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- pix = val32 | ((val32 >> 8) & 0x00300C03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010);
}
/**
@@ -869,21 +1042,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000ff) << 2) |
- ((pix & 0x0000ff00) << 4) |
- ((pix & 0x00ff0000) << 6);
- pix = GENMASK(31, 30) | /* set alpha bits */
- val32 | ((val32 >> 8) & 0x00300c03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010);
}
/**
@@ -924,19 +1083,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
-
- for (x = 0; x < pixels; x++) {
- u32 pix = le32_to_cpu(sbuf32[x]);
- u8 r = (pix & 0x00ff0000) >> 16;
- u8 g = (pix & 0x0000ff00) >> 8;
- u8 b = pix & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dbuf8++ = (3 * r + 6 * g + b) / 10;
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_r8_bt601);
}
/**
@@ -978,90 +1125,64 @@ void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pit
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
+static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_argb8888_to_argb4444);
+}
+
/**
- * drm_fb_blit - Copy parts of a framebuffer to display memory
- * @dst: Array of display-memory addresses to copy to
+ * drm_fb_argb8888_to_argb4444 - Convert ARGB8888 to ARGB4444 clip buffer
+ * @dst: Array of ARGB4444 destination buffers
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
* within @dst; can be NULL if scanlines are stored next to each other.
- * @dst_format: FOURCC code of the display's color format
- * @src: The framebuffer memory to copy from
- * @fb: The framebuffer to copy from
- * @clip: Clip rectangle area to copy
+ * @src: Array of ARGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
* @state: Transform and conversion state
*
- * This function copies parts of a framebuffer to display memory. If the
- * formats of the display and the framebuffer mismatch, the blit function
- * will attempt to convert between them during the process. The parameters @dst,
- * @dst_pitch and @src refer to arrays. Each array must have at least as many
- * entries as there are planes in @dst_format's format. Each entry stores the
- * value for the format's respective color plane at the same index.
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
*
* This function does not apply clipping on @dst (i.e. the destination is at the
* top-left corner).
*
- * Returns:
- * 0 on success, or
- * -EINVAL if the color-format conversion failed, or
- * a negative error code otherwise.
+ * Drivers can use this function for ARGB4444 devices that don't support
+ * ARGB8888 natively.
*/
-int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
- const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state)
+void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
- uint32_t fb_format = fb->format->format;
-
- if (fb_format == dst_format) {
- drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
- return 0;
- } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- } else if (fb_format == DRM_FORMAT_XRGB8888) {
- if (dst_format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false);
- return 0;
- } else if (dst_format == DRM_FORMAT_XRGB1555) {
- drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB1555) {
- drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_RGBA5551) {
- drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB8888) {
- drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_XBGR8888) {
- drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ABGR8888) {
- drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_XRGB2101010) {
- drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB2101010) {
- drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_BGRX8888) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- }
- }
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_argb8888_to_argb4444_line);
+}
+EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444);
+
+static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ u8 *dbuf8 = dbuf;
+ const u8 *sbuf8 = sbuf;
+ u8 px;
- drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n",
- &fb_format, &dst_format);
+ while (pixels) {
+ unsigned int i, bits = min(pixels, 4U);
+ u8 byte = 0;
- return -EINVAL;
+ for (i = 0; i < bits; i++, pixels--) {
+ byte >>= 2;
+ px = (*sbuf8++ * 3 + 127) / 255;
+ byte |= (px &= 0x03) << 6;
+ }
+ *dbuf8++ = byte;
+ }
}
-EXPORT_SYMBOL(drm_fb_blit);
static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -1170,140 +1291,91 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
-static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
-{
- /* only handle formats with depth != 0 and alpha channel */
- switch (fourcc) {
- case DRM_FORMAT_ARGB1555:
- return DRM_FORMAT_XRGB1555;
- case DRM_FORMAT_ABGR1555:
- return DRM_FORMAT_XBGR1555;
- case DRM_FORMAT_RGBA5551:
- return DRM_FORMAT_RGBX5551;
- case DRM_FORMAT_BGRA5551:
- return DRM_FORMAT_BGRX5551;
- case DRM_FORMAT_ARGB8888:
- return DRM_FORMAT_XRGB8888;
- case DRM_FORMAT_ABGR8888:
- return DRM_FORMAT_XBGR8888;
- case DRM_FORMAT_RGBA8888:
- return DRM_FORMAT_RGBX8888;
- case DRM_FORMAT_BGRA8888:
- return DRM_FORMAT_BGRX8888;
- case DRM_FORMAT_ARGB2101010:
- return DRM_FORMAT_XRGB2101010;
- case DRM_FORMAT_ABGR2101010:
- return DRM_FORMAT_XBGR2101010;
- case DRM_FORMAT_RGBA1010102:
- return DRM_FORMAT_RGBX1010102;
- case DRM_FORMAT_BGRA1010102:
- return DRM_FORMAT_BGRX1010102;
- }
-
- return fourcc;
-}
-
-static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
-{
- const uint32_t *fourccs_end = fourccs + nfourccs;
-
- while (fourccs < fourccs_end) {
- if (*fourccs == fourcc)
- return true;
- ++fourccs;
- }
- return false;
-}
-
/**
- * drm_fb_build_fourcc_list - Filters a list of supported color formats against
- * the device's native formats
- * @dev: DRM device
- * @native_fourccs: 4CC codes of natively supported color formats
- * @native_nfourccs: The number of entries in @native_fourccs
- * @fourccs_out: Returns 4CC codes of supported color formats
- * @nfourccs_out: The number of available entries in @fourccs_out
- *
- * This function create a list of supported color format from natively
- * supported formats and additional emulated formats.
- * At a minimum, most userspace programs expect at least support for
- * XRGB8888 on the primary plane. Devices that have to emulate the
- * format, and possibly others, can use drm_fb_build_fourcc_list() to
- * create a list of supported color formats. The returned list can
- * be handed over to drm_universal_plane_init() et al. Native formats
- * will go before emulated formats. Native formats with alpha channel
- * will be replaced by such without, as primary planes usually don't
- * support alpha. Other heuristics might be applied
- * to optimize the order. Formats near the beginning of the list are
- * usually preferred over formats near the end of the list.
+ * drm_fb_xrgb8888_to_gray2 - Convert XRGB8888 to gray2
+ * @dst: Array of gray2 destination buffer
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner). The first pixel (upper left corner of the clip rectangle) will
+ * be converted and copied to the two first bits (LSB) in the first byte of the gray2
+ * destination buffer. If the caller requires that the first pixel in a byte must
+ * be located at an x-coordinate that is a multiple of 8, then the caller must take
+ * care itself of supplying a suitable clip rectangle.
+ *
+ * DRM doesn't have native gray2 support. Drivers can use this function for
+ * gray2 devices that don't support XRGB8888 natively. Such drivers can
+ * announce the commonly supported XR24 format to userspace and use this function
+ * to convert to the native format.
*
- * Returns:
- * The number of color-formats 4CC codes returned in @fourccs_out.
*/
-size_t drm_fb_build_fourcc_list(struct drm_device *dev,
- const u32 *native_fourccs, size_t native_nfourccs,
- u32 *fourccs_out, size_t nfourccs_out)
+void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
- /*
- * XRGB8888 is the default fallback format for most of userspace
- * and it's currently the only format that should be emulated for
- * the primary plane. Only if there's ever another default fallback,
- * it should be added here.
- */
- static const uint32_t extra_fourccs[] = {
- DRM_FORMAT_XRGB8888,
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
};
- static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
+ unsigned int linepixels = drm_rect_width(clip);
+ unsigned int lines = drm_rect_height(clip);
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int len_src32 = linepixels * cpp;
+ struct drm_device *dev = fb->dev;
+ void *vaddr = src[0].vaddr;
+ unsigned int dst_pitch_0;
+ unsigned int y;
+ u8 *gray2 = dst[0].vaddr, *gray8;
+ u32 *src32;
- u32 *fourccs = fourccs_out;
- const u32 *fourccs_end = fourccs_out + nfourccs_out;
- size_t i;
+ if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+
+ if (!dst_pitch)
+ dst_pitch = default_dst_pitch;
+ dst_pitch_0 = dst_pitch[0];
/*
- * The device's native formats go first.
+ * The gray2 destination buffer contains 2 bit per pixel
*/
-
- for (i = 0; i < native_nfourccs; ++i) {
- /*
- * Several DTs, boot loaders and firmware report native
- * alpha formats that are non-alpha formats instead. So
- * replace alpha formats by non-alpha formats.
- */
- u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
+ if (!dst_pitch_0)
+ dst_pitch_0 = DIV_ROUND_UP(linepixels, 4);
/*
- * The extra formats, emulated by the driver, go second.
+ * The dma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ *
+ * Also, format conversion from XR24 to gray2 are done
+ * line-by-line but are converted to 8-bit grayscale as an
+ * intermediate step.
+ *
+ * Allocate a buffer to be used for both copying from the cma
+ * memory and to store the intermediate grayscale line pixels.
*/
+ src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL);
+ if (!src32)
+ return;
- for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
- u32 fourcc = extra_fourccs[i];
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate and native entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
+ gray8 = (u8 *)src32 + len_src32;
- *fourccs = fourcc;
- ++fourccs;
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
+ for (y = 0; y < lines; y++) {
+ src32 = memcpy(src32, vaddr, len_src32);
+ drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
+ drm_fb_gray8_to_gray2_line(gray2, gray8, linepixels);
+ vaddr += fb->pitches[0];
+ gray2 += dst_pitch_0;
}
-
- return fourccs - fourccs_out;
}
-EXPORT_SYMBOL(drm_fb_build_fourcc_list);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray2);
+
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
new file mode 100644
index 000000000000..ce29dd05bcc5
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+#ifndef DRM_FORMAT_INTERNAL_H
+#define DRM_FORMAT_INTERNAL_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+#include <linux/swab.h>
+
+/*
+ * Each pixel-format conversion helper takes a raw pixel in a
+ * specific input format and returns a raw pixel in a specific
+ * output format. All pixels are in little-endian byte order.
+ *
+ * Function names are
+ *
+ * drm_pixel_<input>_to_<output>_<algorithm>()
+ *
+ * where <input> and <output> refer to pixel formats. The
+ * <algorithm> is optional and hints to the method used for the
+ * conversion. Helpers with no algorithm given apply pixel-bit
+ * shifting.
+ *
+ * The argument type is u32. We expect this to be wide enough to
+ * hold all conversion input from 32-bit RGB to any output format.
+ * The Linux kernel should avoid format conversion for anything
+ * but XRGB8888 input data. Converting from other format can still
+ * be acceptable in some cases.
+ *
+ * The return type is u32. It is wide enough to hold all conversion
+ * output from XRGB8888. For output formats wider than 32 bit, a
+ * return type of u64 would be acceptable.
+ */
+
+/*
+ * Conversions from XRGB8888
+ */
+
+static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
+{
+ u32 r = (pix & 0x00ff0000) >> 16;
+ u32 g = (pix & 0x0000ff00) >> 8;
+ u32 b = pix & 0x000000ff;
+
+ /* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ return (77 * r + 150 * g + 29 * b) / 256;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
+{
+ return ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000fc00) >> 5) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb565be(u32 pix)
+{
+ return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix)
+{
+ return drm_pixel_xrgb8888_to_rgbx5551(pix) |
+ BIT(0); /* set alpha bit */
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ drm_pixel_xrgb8888_to_xrgb1555(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb888(u32 pix)
+{
+ return pix & GENMASK(23, 0);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_bgr888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ pix;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0xff000000)) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_bgrx8888(u32 pix)
+{
+ return ((pix & 0xff000000) >> 24) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 8) |
+ ((pix & 0x0000ff00) << 8) |
+ ((pix & 0x000000ff) << 24);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ drm_pixel_xrgb8888_to_xbgr8888(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xrgb2101010(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00ff0000) >> 14) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x000000ff) << 22);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xbgr2101010(pix);
+}
+
+/*
+ * Conversion from ARGB8888
+ */
+
+static inline u32 drm_pixel_argb8888_to_argb4444(u32 pix)
+{
+ return ((pix & 0xf0000000) >> 16) |
+ ((pix & 0x00f00000) >> 12) |
+ ((pix & 0x0000f000) >> 8) |
+ ((pix & 0x000000f0) >> 4);
+}
+
+#endif
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 3a94ca211f9c..e0d533611040 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -238,6 +238,14 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGB161616, .depth = 0,
+ .num_planes = 1, .char_per_block = { 6, 0, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = false },
+ { .format = DRM_FORMAT_BGR161616, .depth = 0,
+ .num_planes = 1, .char_per_block = { 6, 0, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = false },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
@@ -346,6 +354,33 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
.char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S010, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S210, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S410, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S012, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S212, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S412, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S016, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S216, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S416, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
};
unsigned int i;
@@ -382,7 +417,8 @@ EXPORT_SYMBOL(drm_format_info);
/**
* drm_get_format_info - query information for a given framebuffer configuration
* @dev: DRM device
- * @mode_cmd: metadata from the userspace fb creation request
+ * @pixel_format: pixel format (DRM_FORMAT_*)
+ * @modifier: modifier
*
* Returns:
* The instance of struct drm_format_info that describes the pixel format, or
@@ -390,15 +426,16 @@ EXPORT_SYMBOL(drm_format_info);
*/
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+ u32 pixel_format, u64 modifier)
{
const struct drm_format_info *info = NULL;
if (dev->mode_config.funcs->get_format_info)
- info = dev->mode_config.funcs->get_format_info(mode_cmd);
+ info = dev->mode_config.funcs->get_format_info(pixel_format,
+ modifier);
if (!info)
- info = drm_format_info(mode_cmd->pixel_format);
+ info = drm_format_info(pixel_format);
return info;
}
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index b781601946db..18e753ade001 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -153,18 +153,11 @@ int drm_mode_addfb_ioctl(struct drm_device *dev,
}
static int framebuffer_check(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *r)
{
- const struct drm_format_info *info;
int i;
- /* check if the format is supported at all */
- if (!__drm_format_info(r->pixel_format)) {
- drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
- &r->pixel_format);
- return -EINVAL;
- }
-
if (r->width == 0) {
drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width);
return -EINVAL;
@@ -175,9 +168,6 @@ static int framebuffer_check(struct drm_device *dev,
return -EINVAL;
}
- /* now let the driver pick its own format info */
- info = drm_get_format_info(dev, r);
-
for (i = 0; i < info->num_planes; i++) {
unsigned int width = drm_format_info_plane_width(info, r->width, i);
unsigned int height = drm_format_info_plane_height(info, r->height, i);
@@ -272,6 +262,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
+ const struct drm_format_info *info;
struct drm_framebuffer *fb;
int ret;
@@ -297,11 +288,21 @@ drm_internal_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- ret = framebuffer_check(dev, r);
+ /* check if the format is supported at all */
+ if (!__drm_format_info(r->pixel_format)) {
+ drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
+ &r->pixel_format);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* now let the driver pick its own format info */
+ info = drm_get_format_info(dev, r->pixel_format, r->modifier[0]);
+
+ ret = framebuffer_check(dev, info, r);
if (ret)
return ERR_PTR(ret);
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, info, r);
if (IS_ERR(fb)) {
drm_dbg_kms(dev, "could not create framebuffer\n");
return fb;
@@ -862,11 +863,23 @@ EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_free);
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs)
{
+ unsigned int i;
int ret;
+ bool exists;
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
return -EINVAL;
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)))
+ fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
+ if (fb->obj[i]) {
+ exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]);
+ if (exists)
+ fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
+ }
+ }
+
INIT_LIST_HEAD(&fb->filp_head);
fb->funcs = funcs;
@@ -875,7 +888,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
false, drm_framebuffer_free);
if (ret)
- goto out;
+ goto err;
mutex_lock(&dev->mode_config.fb_lock);
dev->mode_config.num_fb++;
@@ -883,7 +896,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
mutex_unlock(&dev->mode_config.fb_lock);
drm_mode_object_register(dev, &fb->base);
-out:
+
+ return 0;
+
+err:
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) {
+ drm_gem_object_handle_put_unlocked(fb->obj[i]);
+ fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
+ }
+ }
return ret;
}
EXPORT_SYMBOL(drm_framebuffer_init);
@@ -960,6 +982,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
+ unsigned int i;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))
+ drm_gem_object_handle_put_unlocked(fb->obj[i]);
+ }
mutex_lock(&dev->mode_config.fb_lock);
list_del(&fb->head);
@@ -1020,7 +1048,7 @@ retry:
plane_state->crtc->base.id,
plane_state->crtc->name, fb->base.id);
- crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
if (ret)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ee811764c3df..efc79bbf3c73 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -26,6 +26,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/iosys-map.h>
@@ -100,10 +101,8 @@ drm_gem_init(struct drm_device *dev)
vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
GFP_KERNEL);
- if (!vma_offset_manager) {
- DRM_ERROR("out of memory\n");
+ if (!vma_offset_manager)
return -ENOMEM;
- }
dev->vma_offset_manager = vma_offset_manager;
drm_vma_offset_manager_init(vma_offset_manager,
@@ -186,6 +185,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
+ mutex_init(&obj->gpuva.lock);
dma_resv_init(&obj->_resv);
if (!obj->resv)
obj->resv = &obj->_resv;
@@ -209,9 +209,50 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
WARN_ON(obj->dma_buf);
dma_resv_fini(&obj->_resv);
+ mutex_destroy(&obj->gpuva.lock);
}
EXPORT_SYMBOL(drm_gem_private_object_fini);
+static void drm_gem_object_handle_get(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
+
+ if (obj->handle_count++ == 0)
+ drm_gem_object_get(obj);
+}
+
+/**
+ * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
+ * @obj: GEM object
+ *
+ * Acquires a reference on the GEM buffer object's handle. Required to keep
+ * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
+ * to release the reference. Does nothing if the buffer object has no handle.
+ *
+ * Returns:
+ * True if a handle exists, or false otherwise
+ */
+bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ guard(mutex)(&dev->object_name_lock);
+
+ /*
+ * First ref taken during GEM object creation, if any. Some
+ * drivers set up internal framebuffers with GEM objects that
+ * do not have a GEM handle. Hence, this counter can be zero.
+ */
+ if (!obj->handle_count)
+ return false;
+
+ drm_gem_object_handle_get(obj);
+
+ return true;
+}
+
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -242,20 +283,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
}
}
-static void
-drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
+/**
+ * drm_gem_object_handle_put_unlocked - releases reference on user-space handle
+ * @obj: GEM object
+ *
+ * Releases a reference on the GEM buffer object's handle. Possibly releases
+ * the GEM buffer object and associated dma-buf objects.
+ */
+void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
bool final = false;
- if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
+ if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
return;
/*
- * Must bump handle count first as this may be the last
- * ref, in which case the object would disappear before we
- * checked for a name
- */
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before
+ * we checked for a name.
+ */
mutex_lock(&dev->object_name_lock);
if (--obj->handle_count == 0) {
@@ -279,10 +326,18 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
+ if (drm_WARN_ON(obj->dev, !data))
+ return 0;
+
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
+ mutex_lock(&file_priv->prime.lock);
+
drm_prime_remove_buf_handle(&file_priv->prime, id);
+
+ mutex_unlock(&file_priv->prime.lock);
+
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -348,7 +403,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return -ENOENT;
/* Don't allow imported objects to be mapped */
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = -EINVAL;
goto out;
}
@@ -389,8 +444,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
int ret;
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
- if (obj->handle_count++ == 0)
- drm_gem_object_get(obj);
+
+ drm_gem_object_handle_get(obj);
/*
* Get the user-visible handle using idr. Preload and perform
@@ -399,7 +454,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
idr_preload(GFP_KERNEL);
spin_lock(&file_priv->table_lock);
- ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
+ ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT);
spin_unlock(&file_priv->table_lock);
idr_preload_end();
@@ -420,6 +475,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
goto err_revoke;
}
+ /* mirrors drm_gem_handle_delete to avoid races */
+ spin_lock(&file_priv->table_lock);
+ obj = idr_replace(&file_priv->object_idr, obj, handle);
+ WARN_ON(obj != NULL);
+ spin_unlock(&file_priv->table_lock);
*handlep = handle;
return 0;
@@ -566,7 +626,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
struct page **pages;
struct folio *folio;
struct folio_batch fbatch;
- long i, j, npages;
+ unsigned long i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
@@ -590,7 +650,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
i = 0;
while (i < npages) {
- long nr;
+ unsigned long nr;
folio = shmem_read_folio_gfp(mapping, i,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
@@ -723,9 +783,9 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out)
{
- int ret;
- u32 *handles;
struct drm_gem_object **objs;
+ u32 *handles;
+ int ret;
if (!count)
return 0;
@@ -737,20 +797,11 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
*objs_out = objs;
- handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
- if (!handles) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy in GEM handles\n");
- goto out;
- }
+ handles = vmemdup_array_user(bo_handles, count, sizeof(u32));
+ if (IS_ERR(handles))
+ return PTR_ERR(handles);
ret = objects_lookup(filp, handles, count, objs);
-out:
kvfree(handles);
return ret;
@@ -793,12 +844,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout)
{
- long ret;
+ struct drm_device *dev = filep->minor->dev;
struct drm_gem_object *obj;
+ long ret;
obj = drm_gem_object_lookup(filep, handle);
if (!obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+ drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle);
return -EINVAL;
}
@@ -815,14 +867,6 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
}
EXPORT_SYMBOL(drm_gem_dma_resv_wait);
-/**
- * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Releases the handle to an mm object.
- */
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -838,17 +882,6 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
return ret;
}
-/**
- * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Create a global name for an object, returning the name.
- *
- * Note that the name does not hold a reference; when the object
- * is freed, the name goes away.
- */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -888,17 +921,6 @@ err:
return ret;
}
-/**
- * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -933,6 +955,57 @@ err:
return ret;
}
+int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_change_handle *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return -EOPNOTSUPP;
+
+ obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->handle == args->new_handle)
+ return 0;
+
+ mutex_lock(&file_priv->prime.lock);
+
+ spin_lock(&file_priv->table_lock);
+ ret = idr_alloc(&file_priv->object_idr, obj,
+ args->new_handle, args->new_handle + 1, GFP_NOWAIT);
+ spin_unlock(&file_priv->table_lock);
+
+ if (ret < 0)
+ goto out_unlock;
+
+ if (obj->dma_buf) {
+ ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle);
+ if (ret < 0) {
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, args->new_handle);
+ spin_unlock(&file_priv->table_lock);
+ goto out_unlock;
+ }
+
+ drm_prime_remove_buf_handle(&file_priv->prime, args->handle);
+ }
+
+ ret = 0;
+
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, args->handle);
+ spin_unlock(&file_priv->table_lock);
+
+out_unlock:
+ mutex_unlock(&file_priv->prime.lock);
+
+ return ret;
+}
+
/**
* drm_gem_open - initializes GEM file-private structures at devnode open time
* @dev: drm_device which is being opened by userspace
@@ -1178,45 +1251,13 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_vma_node_start(&obj->vma_node));
drm_printf_indent(p, indent, "size=%zu\n", obj->size);
drm_printf_indent(p, indent, "imported=%s\n",
- str_yes_no(obj->import_attach));
+ str_yes_no(drm_gem_is_imported(obj)));
if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
}
-int drm_gem_pin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->pin)
- return obj->funcs->pin(obj);
-
- return 0;
-}
-
-void drm_gem_unpin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->unpin)
- obj->funcs->unpin(obj);
-}
-
-int drm_gem_pin(struct drm_gem_object *obj)
-{
- int ret;
-
- dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_pin_locked(obj);
- dma_resv_unlock(obj->resv);
-
- return ret;
-}
-
-void drm_gem_unpin(struct drm_gem_object *obj)
-{
- dma_resv_lock(obj->resv, NULL);
- drm_gem_unpin_locked(obj);
- dma_resv_unlock(obj->resv);
-}
-
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1233,9 +1274,9 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vmap);
+EXPORT_SYMBOL(drm_gem_vmap_locked);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_assert_held(obj->resv);
@@ -1248,7 +1289,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
/* Always set the mapping to NULL. Callers may rely on this. */
iosys_map_clear(map);
}
-EXPORT_SYMBOL(drm_gem_vunmap);
+EXPORT_SYMBOL(drm_gem_vunmap_locked);
void drm_gem_lock(struct drm_gem_object *obj)
{
@@ -1262,25 +1303,25 @@ void drm_gem_unlock(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_unlock);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_vmap(obj, map);
+ ret = drm_gem_vmap_locked(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
-EXPORT_SYMBOL(drm_gem_vmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vmap);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
dma_resv_unlock(obj->resv);
}
-EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vunmap);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
@@ -1460,12 +1501,14 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
* @nr_to_scan: The number of pages to try to reclaim
* @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
+ * @ticket: Optional ww_acquire_ctx context to use for locking
*/
unsigned long
drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned int nr_to_scan,
unsigned long *remaining,
- bool (*shrink)(struct drm_gem_object *obj))
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket)
{
struct drm_gem_lru still_in_lru;
struct drm_gem_object *obj;
@@ -1498,17 +1541,20 @@ drm_gem_lru_scan(struct drm_gem_lru *lru,
*/
mutex_unlock(lru->lock);
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
+
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
- if (!dma_resv_trylock(obj->resv)) {
+ if (!ww_mutex_trylock(&obj->resv->lock, ticket)) {
*remaining += obj->size >> PAGE_SHIFT;
goto tail;
}
- if (shrink(obj)) {
+ if (shrink(obj, ticket)) {
freed += obj->size >> PAGE_SHIFT;
/*
@@ -1522,6 +1568,9 @@ drm_gem_lru_scan(struct drm_gem_lru *lru,
dma_resv_unlock(obj->resv);
+ if (ticket)
+ ww_acquire_fini(ticket);
+
tail:
drm_gem_object_put(obj);
mutex_lock(lru->lock);
@@ -1543,10 +1592,10 @@ tail:
EXPORT_SYMBOL(drm_gem_lru_scan);
/**
- * drm_gem_evict - helper to evict backing pages for a GEM object
+ * drm_gem_evict_locked - helper to evict backing pages for a GEM object
* @obj: obj in question
*/
-int drm_gem_evict(struct drm_gem_object *obj)
+int drm_gem_evict_locked(struct drm_gem_object *obj)
{
dma_resv_assert_held(obj->resv);
@@ -1558,4 +1607,4 @@ int drm_gem_evict(struct drm_gem_object *obj)
return 0;
}
-EXPORT_SYMBOL(drm_gem_evict);
+EXPORT_SYMBOL(drm_gem_evict_locked);
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 93337543aac3..569d41a65a0b 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -2,6 +2,7 @@
#include <linux/dma-resv.h>
#include <linux/dma-fence-chain.h>
+#include <linux/export.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
@@ -309,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
- __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
- drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
+ if (shadow_plane_state) {
+ __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+ drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
+ } else {
+ __drm_atomic_helper_plane_reset(plane, NULL);
+ }
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
@@ -333,8 +338,6 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane)
}
shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL);
- if (!shadow_plane_state)
- return;
__drm_gem_reset_shadow_plane(plane, shadow_plane_state);
}
EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 16988d316a6d..12d8307997a0 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -20,7 +20,9 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
/**
@@ -228,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
struct drm_gem_object *gem_obj = &dma_obj->base;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
- if (gem_obj->import_attach) {
+ if (drm_gem_is_imported(gem_obj)) {
if (dma_obj->vaddr)
dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
@@ -304,9 +306,11 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct drm_gem_dma_object *dma_obj;
+ int ret;
- args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- args->size = args->pitch * args->height;
+ ret = drm_mode_size_dumb(drm, args, SZ_8, 0);
+ if (ret)
+ return ret;
dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
@@ -582,7 +586,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
if (ret) {
- DRM_ERROR("Failed to vmap PRIME buffer\n");
+ drm_err(dev, "Failed to vmap PRIME buffer\n");
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 185534f56bab..9fd4eb02a20f 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -5,6 +5,7 @@
* Copyright (C) 2017 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -15,6 +16,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include "drm_internal.h"
@@ -67,6 +69,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
static int
drm_gem_fb_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes,
const struct drm_framebuffer_funcs *funcs)
@@ -74,7 +77,7 @@ drm_gem_fb_init(struct drm_device *dev,
unsigned int i;
int ret;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
for (i = 0; i < num_planes; i++)
fb->obj[i] = obj[i];
@@ -135,6 +138,7 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
* @dev: DRM device
* @fb: framebuffer object
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
@@ -151,20 +155,14 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
- const struct drm_format_info *info;
struct drm_gem_object *objs[DRM_FORMAT_MAX_PLANES];
unsigned int i;
int ret;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info) {
- drm_dbg_kms(dev, "Failed to get FB format info\n");
- return -EINVAL;
- }
-
if (drm_drv_uses_atomic_modeset(dev) &&
!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -199,7 +197,7 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
}
}
- ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+ ret = drm_gem_fb_init(dev, fb, info, mode_cmd, objs, i, funcs);
if (ret)
goto err_gem_object_put;
@@ -220,6 +218,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
* callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
@@ -232,6 +231,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
*/
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
@@ -242,7 +242,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+ ret = drm_gem_fb_init_with_funcs(dev, fb, file, info, mode_cmd, funcs);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
@@ -262,6 +262,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
@@ -281,9 +282,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
*/
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd,
&drm_gem_fb_funcs);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create);
@@ -299,6 +301,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
@@ -319,9 +322,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
*/
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd,
&drm_gem_fb_funcs_dirtyfb);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
@@ -362,7 +366,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
- ret = drm_gem_vmap_unlocked(obj, &map[i]);
+ ret = drm_gem_vmap(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
@@ -384,7 +388,7 @@ err_drm_gem_vunmap:
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
return ret;
}
@@ -411,7 +415,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue;
if (iosys_map_is_null(&map[i]))
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
@@ -429,7 +433,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
if (!obj)
continue;
import_attach = obj->import_attach;
- if (!import_attach)
+ if (!drm_gem_is_imported(obj))
continue;
ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
if (ret)
@@ -466,7 +470,7 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
goto err___drm_gem_fb_end_cpu_access;
}
import_attach = obj->import_attach;
- if (!import_attach)
+ if (!drm_gem_is_imported(obj))
continue;
ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
if (ret)
@@ -501,12 +505,9 @@ EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
// TODO Drop this function and replace by drm_format_info_bpp() once all
// DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c
static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- info = drm_get_format_info(dev, mode_cmd);
-
switch (info->format) {
case DRM_FORMAT_YUV420_8BIT:
return 12;
@@ -520,6 +521,7 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
}
static int drm_gem_afbc_min_size(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
@@ -560,7 +562,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
afbc_fb->offset = mode_cmd->offsets[0];
- bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+ bpp = drm_gem_afbc_get_bpp(dev, info, mode_cmd);
if (!bpp) {
drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
return -EINVAL;
@@ -582,6 +584,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
*
* @dev: DRM device
* @afbc_fb: afbc-specific framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @afbc_fb: afbc framebuffer
*
@@ -595,24 +598,24 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
* Zero on success or a negative error value on failure.
*/
int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
- const struct drm_format_info *info;
struct drm_gem_object **objs;
int ret;
objs = afbc_fb->base.obj;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return -EINVAL;
- ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+ ret = drm_gem_afbc_min_size(dev, info, mode_cmd, afbc_fb);
if (ret < 0)
return ret;
- if (objs[0]->size < afbc_fb->afbc_size)
+ if (objs[0]->size < afbc_fb->afbc_size) {
+ drm_dbg_kms(dev, "GEM object size (%zu) smaller than minimum afbc size (%u)\n",
+ objs[0]->size, afbc_fb->afbc_size);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5ab351409312..dc94a27710e5 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -18,6 +18,7 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
@@ -48,28 +49,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.vm_ops = &drm_gem_shmem_vm_ops,
};
-static struct drm_gem_shmem_object *
-__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
- struct vfsmount *gemfs)
+static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
+ size_t size, bool private, struct vfsmount *gemfs)
{
- struct drm_gem_shmem_object *shmem;
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- size = PAGE_ALIGN(size);
-
- if (dev->driver->gem_create_object) {
- obj = dev->driver->gem_create_object(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
- shmem = to_drm_gem_shmem_obj(obj);
- } else {
- shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!shmem)
- return ERR_PTR(-ENOMEM);
- obj = &shmem->base;
- }
-
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
@@ -81,7 +66,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
}
if (ret) {
drm_gem_private_object_fini(obj);
- goto err_free;
+ return ret;
}
ret = drm_gem_create_mmap_offset(obj);
@@ -102,14 +87,55 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
- return shmem;
-
+ return 0;
err_release:
drm_gem_object_release(obj);
-err_free:
- kfree(obj);
+ return ret;
+}
- return ERR_PTR(ret);
+/**
+ * drm_gem_shmem_init - Initialize an allocated object.
+ * @dev: DRM device
+ * @obj: The allocated shmem GEM object.
+ *
+ * Returns:
+ * 0 on success, or a negative error code on failure.
+ */
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
+{
+ return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
+
+static struct drm_gem_shmem_object *
+__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
+ struct vfsmount *gemfs)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ size = PAGE_ALIGN(size);
+
+ if (dev->driver->gem_create_object) {
+ obj = dev->driver->gem_create_object(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+ shmem = to_drm_gem_shmem_obj(obj);
+ } else {
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return ERR_PTR(-ENOMEM);
+ obj = &shmem->base;
+ }
+
+ ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return shmem;
}
/**
* drm_gem_shmem_create - Allocate an object with the given size
@@ -150,22 +176,22 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
/**
- * drm_gem_shmem_free - Free resources associated with a shmem GEM object
- * @shmem: shmem GEM object to free
+ * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
+ * @shmem: shmem GEM object
*
- * This function cleans up the GEM object state and frees the memory used to
- * store the object itself.
+ * This function cleans up the GEM object state, but does not free the memory used to store the
+ * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
*/
-void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
dma_resv_lock(shmem->base.resv, NULL);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -174,33 +200,46 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
}
if (shmem->pages)
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
dma_resv_unlock(shmem->base.resv);
}
drm_gem_object_release(obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
+
+/**
+ * drm_gem_shmem_free - Free resources associated with a shmem GEM object
+ * @shmem: shmem GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+{
+ drm_gem_shmem_release(shmem);
kfree(shmem);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->pages_use_count++ > 0)
+ if (refcount_inc_not_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -216,38 +255,36 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ refcount_set(&shmem->pages_use_count, 1);
+
return 0;
}
/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero.
*/
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
-
- if (--shmem->pages_use_count > 0)
- return;
-
+ if (refcount_dec_and_test(&shmem->pages_use_count)) {
#ifdef CONFIG_X86
- if (shmem->map_wc)
- set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
- drm_gem_put_pages(obj, shmem->pages,
- shmem->pages_mark_dirty_on_put,
- shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+ }
}
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
@@ -255,9 +292,14 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
- drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
+ drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
+
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ if (!ret)
+ refcount_set(&shmem->pages_pin_count, 1);
return ret;
}
@@ -267,7 +309,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
- drm_gem_shmem_put_pages(shmem);
+ if (refcount_dec_and_test(&shmem->pages_pin_count))
+ drm_gem_shmem_put_pages_locked(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
@@ -286,7 +329,10 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
int ret;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
@@ -296,7 +342,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_pin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
@@ -309,16 +355,19 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+
+ if (refcount_dec_not_one(&shmem->pages_pin_count))
+ return;
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
dma_resv_unlock(shmem->base.resv);
}
-EXPORT_SYMBOL(drm_gem_shmem_unpin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
@@ -327,47 +376,45 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
*
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- if (obj->import_attach) {
+ dma_resv_assert_held(obj->resv);
+
+ if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
- if (!ret) {
- if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
- return -EIO;
- }
- }
} else {
pgprot_t prot = PAGE_KERNEL;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->vmap_use_count++ > 0) {
+ if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ refcount_set(&shmem->vmap_use_count, 1);
+ }
}
if (ret) {
@@ -378,50 +425,46 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
return 0;
err_put_pages:
- if (!obj->import_attach)
- drm_gem_shmem_put_pages(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
+ if (!drm_gem_is_imported(obj))
+ drm_gem_shmem_unpin_locked(shmem);
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
+ * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
+ * drops to zero.
*
* This function hides the differences between dma-buf imported and natively
* allocated objects.
*/
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
+ dma_resv_assert_held(obj->resv);
+
+ if (drm_gem_is_imported(obj)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
+ if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+ vunmap(shmem->vaddr);
+ shmem->vaddr = NULL;
- vunmap(shmem->vaddr);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
+ }
}
-
- shmem->vaddr = NULL;
}
-EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
@@ -449,7 +492,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
{
dma_resv_assert_held(shmem->base.resv);
@@ -460,9 +503,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
return (madv >= 0);
}
-EXPORT_SYMBOL(drm_gem_shmem_madvise);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
@@ -476,7 +519,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
shmem->sgt = NULL;
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
shmem->madv = -1;
@@ -492,7 +535,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
-EXPORT_SYMBOL(drm_gem_shmem_purge);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
@@ -514,18 +557,11 @@ EXPORT_SYMBOL(drm_gem_shmem_purge);
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ int ret;
- if (!args->pitch || !args->size) {
- args->pitch = min_pitch;
- args->size = PAGE_ALIGN(args->pitch * args->height);
- } else {
- /* ensure sane minimum values */
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
- if (args->size < args->pitch * args->height)
- args->size = PAGE_ALIGN(args->pitch * args->height);
- }
+ ret = drm_mode_size_dumb(dev, args, SZ_8, 0);
+ if (ret)
+ return ret;
return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
}
@@ -566,7 +602,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
dma_resv_lock(shmem->base.resv, NULL);
@@ -575,8 +611,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !refcount_inc_not_zero(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
@@ -589,7 +625,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
dma_resv_lock(shmem->base.resv, NULL);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_close(vma);
@@ -618,7 +654,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
struct drm_gem_object *obj = &shmem->base;
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
* doesn't set those fields.
@@ -639,7 +675,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return -EINVAL;
dma_resv_lock(shmem->base.resv, NULL);
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
if (ret)
@@ -663,14 +699,15 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
- if (shmem->base.import_attach)
+ if (drm_gem_is_imported(&shmem->base))
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
-EXPORT_SYMBOL(drm_gem_shmem_print_info);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
@@ -690,7 +727,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
@@ -705,9 +742,9 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
if (shmem->sgt)
return shmem->sgt;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
return ERR_PTR(ret);
@@ -729,7 +766,7 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
@@ -799,6 +836,63 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
+/**
+ * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
+ * @dev: Device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * Drivers that use the shmem helpers but also wants to import dmabuf without
+ * mapping its sg_table can use this as their &drm_driver.gem_prime_import
+ * implementation.
+ */
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ size_t size;
+ int ret;
+
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ obj = dma_buf->priv;
+ drm_gem_object_get(obj);
+ return obj;
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ size = PAGE_ALIGN(attach->dmabuf->size);
+
+ shmem = __drm_gem_shmem_create(dev, size, true, NULL);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto fail_detach;
+ }
+
+ drm_dbg_prime(dev, "size = %zu\n", size);
+
+ shmem->base.import_attach = attach;
+ shmem->base.resv = dma_buf->resv;
+
+ return &shmem->base;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
+
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 3734aa2d1c5b..08ff0fadd0b2 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 22b1fe9c03b8..5e5b70518dbe 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -16,6 +17,7 @@
#include <drm/drm_mode.h>
#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_tt.h>
@@ -88,11 +90,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* drmm_vram_helper_init() is a managed interface that installs a
* clean-up handler to run during the DRM device's release.
*
- * For drawing or scanout operations, rsp. buffer objects have to be pinned
- * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
* A buffer object that is pinned in video RAM has a fixed address within that
* memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
* it's used to program the hardware's scanout engine for framebuffers, set
@@ -111,7 +108,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
{
- /* We got here via ttm_bo_put(), which means that the
+ /* We got here via ttm_bo_fini(), which means that the
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
@@ -238,11 +235,11 @@ EXPORT_SYMBOL(drm_gem_vram_create);
* drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
* @gbo: the GEM VRAM object
*
- * See ttm_bo_put() for more information.
+ * See ttm_bo_fini() for more information.
*/
void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
{
- ttm_bo_put(&gbo->bo);
+ ttm_bo_fini(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_put);
@@ -299,30 +296,7 @@ out:
return 0;
}
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Small buffer objects, such as cursor images, can lead to memory
- * fragmentation if they are pinned in the middle of video RAM. This
- * is especially a problem on devices with only a small amount of
- * video RAM. Fragmentation can prevent the primary framebuffer from
- * fitting in, even though there's enough memory overall. The modifier
- * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
- * at the high end of the memory region to avoid fragmentation.
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
int ret;
@@ -334,7 +308,6 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
@@ -343,15 +316,7 @@ static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
ttm_bo_unpin(&gbo->bo);
}
-/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
- * @gbo: the GEM VRAM object
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+static int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
{
int ret;
@@ -364,7 +329,6 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vram_unpin);
/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
@@ -690,41 +654,6 @@ EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
*/
/**
- * drm_gem_vram_object_pin() - Implements &struct drm_gem_object_funcs.pin
- * @gem: The GEM object to pin
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- /*
- * Fbdev console emulation is the use case of these PRIME
- * helpers. This may involve updating a hardware buffer from
- * a shadow FB. We pin the buffer to it's current location
- * (either video RAM or system memory) to prevent it from
- * being relocated during the update operation. If you require
- * the buffer to be pinned to VRAM, implement a callback that
- * sets the flags accordingly.
- */
- return drm_gem_vram_pin_locked(gbo, 0);
-}
-
-/**
- * drm_gem_vram_object_unpin() - Implements &struct drm_gem_object_funcs.unpin
- * @gem: The GEM object to unpin
- */
-static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- drm_gem_vram_unpin_locked(gbo);
-}
-
-/**
* drm_gem_vram_object_vmap() -
* Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
@@ -762,8 +691,6 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.free = drm_gem_vram_object_free,
- .pin = drm_gem_vram_object_pin,
- .unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
.vunmap = drm_gem_vram_object_vunmap,
.mmap = drm_gem_ttm_mmap,
@@ -933,7 +860,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
- false, true);
+ TTM_ALLOCATION_POOL_USE_DMA32);
if (ret)
return ret;
@@ -1041,7 +968,7 @@ drm_vram_helper_mode_valid_internal(struct drm_device *dev,
max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbsize = (u32)mode->hdisplay * mode->vdisplay * max_bpp;
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
if (fbpages > max_fbpages)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
new file mode 100644
index 000000000000..73e550c8ff8c
--- /dev/null
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -0,0 +1,1635 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ *
+ * Authors:
+ * Matthew Brost <matthew.brost@intel.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/hmm.h>
+#include <linux/hugetlb_inline.h>
+#include <linux/memremap.h>
+#include <linux/mm_types.h>
+#include <linux/slab.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gpusvm.h>
+#include <drm/drm_pagemap.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: Overview
+ *
+ * GPU Shared Virtual Memory (GPU SVM) layer for the Direct Rendering Manager (DRM)
+ * is a component of the DRM framework designed to manage shared virtual memory
+ * between the CPU and GPU. It enables efficient data exchange and processing
+ * for GPU-accelerated applications by allowing memory sharing and
+ * synchronization between the CPU's and GPU's virtual address spaces.
+ *
+ * Key GPU SVM Components:
+ *
+ * - Notifiers:
+ * Used for tracking memory intervals and notifying the GPU of changes,
+ * notifiers are sized based on a GPU SVM initialization parameter, with a
+ * recommendation of 512M or larger. They maintain a Red-BlacK tree and a
+ * list of ranges that fall within the notifier interval. Notifiers are
+ * tracked within a GPU SVM Red-BlacK tree and list and are dynamically
+ * inserted or removed as ranges within the interval are created or
+ * destroyed.
+ * - Ranges:
+ * Represent memory ranges mapped in a DRM device and managed by GPU SVM.
+ * They are sized based on an array of chunk sizes, which is a GPU SVM
+ * initialization parameter, and the CPU address space. Upon GPU fault,
+ * the largest aligned chunk that fits within the faulting CPU address
+ * space is chosen for the range size. Ranges are expected to be
+ * dynamically allocated on GPU fault and removed on an MMU notifier UNMAP
+ * event. As mentioned above, ranges are tracked in a notifier's Red-Black
+ * tree.
+ *
+ * - Operations:
+ * Define the interface for driver-specific GPU SVM operations such as
+ * range allocation, notifier allocation, and invalidations.
+ *
+ * - Device Memory Allocations:
+ * Embedded structure containing enough information for GPU SVM to migrate
+ * to / from device memory.
+ *
+ * - Device Memory Operations:
+ * Define the interface for driver-specific device memory operations
+ * release memory, populate pfns, and copy to / from device memory.
+ *
+ * This layer provides interfaces for allocating, mapping, migrating, and
+ * releasing memory ranges between the CPU and GPU. It handles all core memory
+ * management interactions (DMA mapping, HMM, and migration) and provides
+ * driver-specific virtual functions (vfuncs). This infrastructure is sufficient
+ * to build the expected driver components for an SVM implementation as detailed
+ * below.
+ *
+ * Expected Driver Components:
+ *
+ * - GPU page fault handler:
+ * Used to create ranges and notifiers based on the fault address,
+ * optionally migrate the range to device memory, and create GPU bindings.
+ *
+ * - Garbage collector:
+ * Used to unmap and destroy GPU bindings for ranges. Ranges are expected
+ * to be added to the garbage collector upon a MMU_NOTIFY_UNMAP event in
+ * notifier callback.
+ *
+ * - Notifier callback:
+ * Used to invalidate and DMA unmap GPU bindings for ranges.
+ */
+
+/**
+ * DOC: Locking
+ *
+ * GPU SVM handles locking for core MM interactions, i.e., it locks/unlocks the
+ * mmap lock as needed.
+ *
+ * GPU SVM introduces a global notifier lock, which safeguards the notifier's
+ * range RB tree and list, as well as the range's DMA mappings and sequence
+ * number. GPU SVM manages all necessary locking and unlocking operations,
+ * except for the recheck range's pages being valid
+ * (drm_gpusvm_range_pages_valid) when the driver is committing GPU bindings.
+ * This lock corresponds to the ``driver->update`` lock mentioned in
+ * Documentation/mm/hmm.rst. Future revisions may transition from a GPU SVM
+ * global lock to a per-notifier lock if finer-grained locking is deemed
+ * necessary.
+ *
+ * In addition to the locking mentioned above, the driver should implement a
+ * lock to safeguard core GPU SVM function calls that modify state, such as
+ * drm_gpusvm_range_find_or_insert and drm_gpusvm_range_remove. This lock is
+ * denoted as 'driver_svm_lock' in code examples. Finer grained driver side
+ * locking should also be possible for concurrent GPU fault processing within a
+ * single GPU SVM. The 'driver_svm_lock' can be via drm_gpusvm_driver_set_lock
+ * to add annotations to GPU SVM.
+ */
+
+/**
+ * DOC: Partial Unmapping of Ranges
+ *
+ * Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting
+ * in MMU_NOTIFY_UNMAP event) presents several challenges, with the main one
+ * being that a subset of the range still has CPU and GPU mappings. If the
+ * backing store for the range is in device memory, a subset of the backing
+ * store has references. One option would be to split the range and device
+ * memory backing store, but the implementation for this would be quite
+ * complicated. Given that partial unmappings are rare and driver-defined range
+ * sizes are relatively small, GPU SVM does not support splitting of ranges.
+ *
+ * With no support for range splitting, upon partial unmapping of a range, the
+ * driver is expected to invalidate and destroy the entire range. If the range
+ * has device memory as its backing, the driver is also expected to migrate any
+ * remaining pages back to RAM.
+ */
+
+/**
+ * DOC: Examples
+ *
+ * This section provides three examples of how to build the expected driver
+ * components: the GPU page fault handler, the garbage collector, and the
+ * notifier callback.
+ *
+ * The generic code provided does not include logic for complex migration
+ * policies, optimized invalidations, fined grained driver locking, or other
+ * potentially required driver locking (e.g., DMA-resv locks).
+ *
+ * 1) GPU page fault handler
+ *
+ * .. code-block:: c
+ *
+ * int driver_bind_range(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range)
+ * {
+ * int err = 0;
+ *
+ * driver_alloc_and_setup_memory_for_bind(gpusvm, range);
+ *
+ * drm_gpusvm_notifier_lock(gpusvm);
+ * if (drm_gpusvm_range_pages_valid(range))
+ * driver_commit_bind(gpusvm, range);
+ * else
+ * err = -EAGAIN;
+ * drm_gpusvm_notifier_unlock(gpusvm);
+ *
+ * return err;
+ * }
+ *
+ * int driver_gpu_fault(struct drm_gpusvm *gpusvm, unsigned long fault_addr,
+ * unsigned long gpuva_start, unsigned long gpuva_end)
+ * {
+ * struct drm_gpusvm_ctx ctx = {};
+ * int err;
+ *
+ * driver_svm_lock();
+ * retry:
+ * // Always process UNMAPs first so view of GPU SVM ranges is current
+ * driver_garbage_collector(gpusvm);
+ *
+ * range = drm_gpusvm_range_find_or_insert(gpusvm, fault_addr,
+ * gpuva_start, gpuva_end,
+ * &ctx);
+ * if (IS_ERR(range)) {
+ * err = PTR_ERR(range);
+ * goto unlock;
+ * }
+ *
+ * if (driver_migration_policy(range)) {
+ * err = drm_pagemap_populate_mm(driver_choose_drm_pagemap(),
+ * gpuva_start, gpuva_end, gpusvm->mm,
+ * ctx->timeslice_ms);
+ * if (err) // CPU mappings may have changed
+ * goto retry;
+ * }
+ *
+ * err = drm_gpusvm_range_get_pages(gpusvm, range, &ctx);
+ * if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { // CPU mappings changed
+ * if (err == -EOPNOTSUPP)
+ * drm_gpusvm_range_evict(gpusvm, range);
+ * goto retry;
+ * } else if (err) {
+ * goto unlock;
+ * }
+ *
+ * err = driver_bind_range(gpusvm, range);
+ * if (err == -EAGAIN) // CPU mappings changed
+ * goto retry
+ *
+ * unlock:
+ * driver_svm_unlock();
+ * return err;
+ * }
+ *
+ * 2) Garbage Collector
+ *
+ * .. code-block:: c
+ *
+ * void __driver_garbage_collector(struct drm_gpusvm *gpusvm,
+ * struct drm_gpusvm_range *range)
+ * {
+ * assert_driver_svm_locked(gpusvm);
+ *
+ * // Partial unmap, migrate any remaining device memory pages back to RAM
+ * if (range->flags.partial_unmap)
+ * drm_gpusvm_range_evict(gpusvm, range);
+ *
+ * driver_unbind_range(range);
+ * drm_gpusvm_range_remove(gpusvm, range);
+ * }
+ *
+ * void driver_garbage_collector(struct drm_gpusvm *gpusvm)
+ * {
+ * assert_driver_svm_locked(gpusvm);
+ *
+ * for_each_range_in_garbage_collector(gpusvm, range)
+ * __driver_garbage_collector(gpusvm, range);
+ * }
+ *
+ * 3) Notifier callback
+ *
+ * .. code-block:: c
+ *
+ * void driver_invalidation(struct drm_gpusvm *gpusvm,
+ * struct drm_gpusvm_notifier *notifier,
+ * const struct mmu_notifier_range *mmu_range)
+ * {
+ * struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
+ * struct drm_gpusvm_range *range = NULL;
+ *
+ * driver_invalidate_device_pages(gpusvm, mmu_range->start, mmu_range->end);
+ *
+ * drm_gpusvm_for_each_range(range, notifier, mmu_range->start,
+ * mmu_range->end) {
+ * drm_gpusvm_range_unmap_pages(gpusvm, range, &ctx);
+ *
+ * if (mmu_range->event != MMU_NOTIFY_UNMAP)
+ * continue;
+ *
+ * drm_gpusvm_range_set_unmapped(range, mmu_range);
+ * driver_garbage_collector_add(gpusvm, range);
+ * }
+ * }
+ */
+
+/**
+ * npages_in_range() - Calculate the number of pages in a given range
+ * @start: The start address of the range
+ * @end: The end address of the range
+ *
+ * This macro calculates the number of pages in a given memory range,
+ * specified by the start and end addresses. It divides the difference
+ * between the end and start addresses by the page size (PAGE_SIZE) to
+ * determine the number of pages in the range.
+ *
+ * Return: The number of pages in the specified range.
+ */
+static unsigned long
+npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/**
+ * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @start: Start address of the notifier
+ * @end: End address of the notifier
+ *
+ * Return: A pointer to the drm_gpusvm_notifier if found or NULL
+ */
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end)
+{
+ struct interval_tree_node *itree;
+
+ itree = interval_tree_iter_first(&gpusvm->root, start, end - 1);
+
+ if (itree)
+ return container_of(itree, struct drm_gpusvm_notifier, itree);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find);
+
+/**
+ * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
+ * @notifier: Pointer to the GPU SVM notifier structure.
+ * @start: Start address of the range
+ * @end: End address of the range
+ *
+ * Return: A pointer to the drm_gpusvm_range if found or NULL
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end)
+{
+ struct interval_tree_node *itree;
+
+ itree = interval_tree_iter_first(&notifier->root, start, end - 1);
+
+ if (itree)
+ return container_of(itree, struct drm_gpusvm_range, itree);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
+
+/**
+ * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
+ * @mni: Pointer to the mmu_interval_notifier structure.
+ * @mmu_range: Pointer to the mmu_notifier_range structure.
+ * @cur_seq: Current sequence number.
+ *
+ * This function serves as a generic MMU notifier for GPU SVM. It sets the MMU
+ * notifier sequence number and calls the driver invalidate vfunc under
+ * gpusvm->notifier_lock.
+ *
+ * Return: true if the operation succeeds, false otherwise.
+ */
+static bool
+drm_gpusvm_notifier_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *mmu_range,
+ unsigned long cur_seq)
+{
+ struct drm_gpusvm_notifier *notifier =
+ container_of(mni, typeof(*notifier), notifier);
+ struct drm_gpusvm *gpusvm = notifier->gpusvm;
+
+ if (!mmu_notifier_range_blockable(mmu_range))
+ return false;
+
+ down_write(&gpusvm->notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+ gpusvm->ops->invalidate(gpusvm, notifier, mmu_range);
+ up_write(&gpusvm->notifier_lock);
+
+ return true;
+}
+
+/*
+ * drm_gpusvm_notifier_ops - MMU interval notifier operations for GPU SVM
+ */
+static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
+ .invalidate = drm_gpusvm_notifier_invalidate,
+};
+
+/**
+ * drm_gpusvm_init() - Initialize the GPU SVM.
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @name: Name of the GPU SVM.
+ * @drm: Pointer to the DRM device structure.
+ * @mm: Pointer to the mm_struct for the address space.
+ * @mm_start: Start address of GPU SVM.
+ * @mm_range: Range of the GPU SVM.
+ * @notifier_size: Size of individual notifiers.
+ * @ops: Pointer to the operations structure for GPU SVM.
+ * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
+ * Entries should be powers of 2 in descending order with last
+ * entry being SZ_4K.
+ * @num_chunks: Number of chunks.
+ *
+ * This function initializes the GPU SVM.
+ *
+ * Note: If only using the simple drm_gpusvm_pages API (get/unmap/free),
+ * then only @gpusvm, @name, and @drm are expected. However, the same base
+ * @gpusvm can also be used with both modes together in which case the full
+ * setup is needed, where the core drm_gpusvm_pages API will simply never use
+ * the other fields.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
+ const char *name, struct drm_device *drm,
+ struct mm_struct *mm,
+ unsigned long mm_start, unsigned long mm_range,
+ unsigned long notifier_size,
+ const struct drm_gpusvm_ops *ops,
+ const unsigned long *chunk_sizes, int num_chunks)
+{
+ if (mm) {
+ if (!ops->invalidate || !num_chunks)
+ return -EINVAL;
+ mmgrab(mm);
+ } else {
+ /* No full SVM mode, only core drm_gpusvm_pages API. */
+ if (ops || num_chunks || mm_range || notifier_size)
+ return -EINVAL;
+ }
+
+ gpusvm->name = name;
+ gpusvm->drm = drm;
+ gpusvm->mm = mm;
+ gpusvm->mm_start = mm_start;
+ gpusvm->mm_range = mm_range;
+ gpusvm->notifier_size = notifier_size;
+ gpusvm->ops = ops;
+ gpusvm->chunk_sizes = chunk_sizes;
+ gpusvm->num_chunks = num_chunks;
+
+ gpusvm->root = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&gpusvm->notifier_list);
+
+ init_rwsem(&gpusvm->notifier_lock);
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&gpusvm->notifier_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+#ifdef CONFIG_LOCKDEP
+ gpusvm->lock_dep_map = NULL;
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_init);
+
+/**
+ * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
+ * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
+ *
+ * Return: A pointer to the containing drm_gpusvm_notifier structure.
+ */
+static struct drm_gpusvm_notifier *to_drm_gpusvm_notifier(struct rb_node *node)
+{
+ return container_of(node, struct drm_gpusvm_notifier, itree.rb);
+}
+
+/**
+ * drm_gpusvm_notifier_insert() - Insert GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function inserts the GPU SVM notifier into the GPU SVM RB tree and list.
+ */
+static void drm_gpusvm_notifier_insert(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ struct rb_node *node;
+ struct list_head *head;
+
+ interval_tree_insert(&notifier->itree, &gpusvm->root);
+
+ node = rb_prev(&notifier->itree.rb);
+ if (node)
+ head = &(to_drm_gpusvm_notifier(node))->entry;
+ else
+ head = &gpusvm->notifier_list;
+
+ list_add(&notifier->entry, head);
+}
+
+/**
+ * drm_gpusvm_notifier_remove() - Remove GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM tructure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function removes the GPU SVM notifier from the GPU SVM RB tree and list.
+ */
+static void drm_gpusvm_notifier_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ interval_tree_remove(&notifier->itree, &gpusvm->root);
+ list_del(&notifier->entry);
+}
+
+/**
+ * drm_gpusvm_fini() - Finalize the GPU SVM.
+ * @gpusvm: Pointer to the GPU SVM structure.
+ *
+ * This function finalizes the GPU SVM by cleaning up any remaining ranges and
+ * notifiers, and dropping a reference to struct MM.
+ */
+void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, gpusvm, 0, LONG_MAX) {
+ struct drm_gpusvm_range *range, *__next;
+
+ /*
+ * Remove notifier first to avoid racing with any invalidation
+ */
+ mmu_interval_notifier_remove(&notifier->notifier);
+ notifier->flags.removed = true;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, 0,
+ LONG_MAX)
+ drm_gpusvm_range_remove(gpusvm, range);
+ }
+
+ if (gpusvm->mm)
+ mmdrop(gpusvm->mm);
+ WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
+
+/**
+ * drm_gpusvm_notifier_alloc() - Allocate GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @fault_addr: Fault address
+ *
+ * This function allocates and initializes the GPU SVM notifier structure.
+ *
+ * Return: Pointer to the allocated GPU SVM notifier on success, ERR_PTR() on failure.
+ */
+static struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_alloc(struct drm_gpusvm *gpusvm, unsigned long fault_addr)
+{
+ struct drm_gpusvm_notifier *notifier;
+
+ if (gpusvm->ops->notifier_alloc)
+ notifier = gpusvm->ops->notifier_alloc();
+ else
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+
+ if (!notifier)
+ return ERR_PTR(-ENOMEM);
+
+ notifier->gpusvm = gpusvm;
+ notifier->itree.start = ALIGN_DOWN(fault_addr, gpusvm->notifier_size);
+ notifier->itree.last = ALIGN(fault_addr + 1, gpusvm->notifier_size) - 1;
+ INIT_LIST_HEAD(&notifier->entry);
+ notifier->root = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&notifier->range_list);
+
+ return notifier;
+}
+
+/**
+ * drm_gpusvm_notifier_free() - Free GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function frees the GPU SVM notifier structure.
+ */
+static void drm_gpusvm_notifier_free(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ WARN_ON(!RB_EMPTY_ROOT(&notifier->root.rb_root));
+
+ if (gpusvm->ops->notifier_free)
+ gpusvm->ops->notifier_free(notifier);
+ else
+ kfree(notifier);
+}
+
+/**
+ * to_drm_gpusvm_range() - retrieve the container struct for a given rbtree node
+ * @node: a pointer to the rbtree node embedded within a drm_gpusvm_range struct
+ *
+ * Return: A pointer to the containing drm_gpusvm_range structure.
+ */
+static struct drm_gpusvm_range *to_drm_gpusvm_range(struct rb_node *node)
+{
+ return container_of(node, struct drm_gpusvm_range, itree.rb);
+}
+
+/**
+ * drm_gpusvm_range_insert() - Insert GPU SVM range
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function inserts the GPU SVM range into the notifier RB tree and list.
+ */
+static void drm_gpusvm_range_insert(struct drm_gpusvm_notifier *notifier,
+ struct drm_gpusvm_range *range)
+{
+ struct rb_node *node;
+ struct list_head *head;
+
+ drm_gpusvm_notifier_lock(notifier->gpusvm);
+ interval_tree_insert(&range->itree, &notifier->root);
+
+ node = rb_prev(&range->itree.rb);
+ if (node)
+ head = &(to_drm_gpusvm_range(node))->entry;
+ else
+ head = &notifier->range_list;
+
+ list_add(&range->entry, head);
+ drm_gpusvm_notifier_unlock(notifier->gpusvm);
+}
+
+/**
+ * __drm_gpusvm_range_remove() - Remove GPU SVM range
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This macro removes the GPU SVM range from the notifier RB tree and list.
+ */
+static void __drm_gpusvm_range_remove(struct drm_gpusvm_notifier *notifier,
+ struct drm_gpusvm_range *range)
+{
+ interval_tree_remove(&range->itree, &notifier->root);
+ list_del(&range->entry);
+}
+
+/**
+ * drm_gpusvm_range_alloc() - Allocate GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @fault_addr: Fault address
+ * @chunk_size: Chunk size
+ * @migrate_devmem: Flag indicating whether to migrate device memory
+ *
+ * This function allocates and initializes the GPU SVM range structure.
+ *
+ * Return: Pointer to the allocated GPU SVM range on success, ERR_PTR() on failure.
+ */
+static struct drm_gpusvm_range *
+drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ unsigned long fault_addr, unsigned long chunk_size,
+ bool migrate_devmem)
+{
+ struct drm_gpusvm_range *range;
+
+ if (gpusvm->ops->range_alloc)
+ range = gpusvm->ops->range_alloc(gpusvm);
+ else
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+
+ if (!range)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&range->refcount);
+ range->gpusvm = gpusvm;
+ range->notifier = notifier;
+ range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
+ range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
+ INIT_LIST_HEAD(&range->entry);
+ range->pages.notifier_seq = LONG_MAX;
+ range->pages.flags.migrate_devmem = migrate_devmem ? 1 : 0;
+
+ return range;
+}
+
+/**
+ * drm_gpusvm_hmm_pfn_to_order() - Get the largest CPU mapping order.
+ * @hmm_pfn: The current hmm_pfn.
+ * @hmm_pfn_index: Index of the @hmm_pfn within the pfn array.
+ * @npages: Number of pages within the pfn array i.e the hmm range size.
+ *
+ * To allow skipping PFNs with the same flags (like when they belong to
+ * the same huge PTE) when looping over the pfn array, take a given a hmm_pfn,
+ * and return the largest order that will fit inside the CPU PTE, but also
+ * crucially accounting for the original hmm range boundaries.
+ *
+ * Return: The largest order that will safely fit within the size of the hmm_pfn
+ * CPU PTE.
+ */
+static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn,
+ unsigned long hmm_pfn_index,
+ unsigned long npages)
+{
+ unsigned long size;
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= (hmm_pfn & ~HMM_PFN_FLAGS) & (size - 1);
+ hmm_pfn_index += size;
+ if (hmm_pfn_index > npages)
+ size -= (hmm_pfn_index - npages);
+
+ return ilog2(size);
+}
+
+/**
+ * drm_gpusvm_check_pages() - Check pages
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @start: Start address
+ * @end: End address
+ * @dev_private_owner: The device private page owner
+ *
+ * Check if pages between start and end have been faulted in on the CPU. Use to
+ * prevent migration of pages without CPU backing store.
+ *
+ * Return: True if pages have been faulted into CPU, False otherwise
+ */
+static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ unsigned long start, unsigned long end,
+ void *dev_private_owner)
+{
+ struct hmm_range hmm_range = {
+ .default_flags = 0,
+ .notifier = &notifier->notifier,
+ .start = start,
+ .end = end,
+ .dev_private_owner = dev_private_owner,
+ };
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns;
+ unsigned long npages = npages_in_range(start, end);
+ int err, i;
+
+ mmap_assert_locked(gpusvm->mm);
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return false;
+
+ hmm_range.notifier_seq = mmu_interval_read_begin(&notifier->notifier);
+ hmm_range.hmm_pfns = pfns;
+
+ while (true) {
+ err = hmm_range_fault(&hmm_range);
+ if (err == -EBUSY) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ hmm_range.notifier_seq =
+ mmu_interval_read_begin(&notifier->notifier);
+ continue;
+ }
+ break;
+ }
+ if (err)
+ goto err_free;
+
+ for (i = 0; i < npages;) {
+ if (!(pfns[i] & HMM_PFN_VALID)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+ i += 0x1 << drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
+ }
+
+err_free:
+ kvfree(pfns);
+ return err ? false : true;
+}
+
+/**
+ * drm_gpusvm_range_chunk_size() - Determine chunk size for GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @vas: Pointer to the virtual memory area structure
+ * @fault_addr: Fault address
+ * @gpuva_start: Start address of GPUVA which mirrors CPU
+ * @gpuva_end: End address of GPUVA which mirrors CPU
+ * @check_pages_threshold: Check CPU pages for present threshold
+ * @dev_private_owner: The device private page owner
+ *
+ * This function determines the chunk size for the GPU SVM range based on the
+ * fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual
+ * memory area boundaries.
+ *
+ * Return: Chunk size on success, LONG_MAX on failure.
+ */
+static unsigned long
+drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ struct vm_area_struct *vas,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ unsigned long check_pages_threshold,
+ void *dev_private_owner)
+{
+ unsigned long start, end;
+ int i = 0;
+
+retry:
+ for (; i < gpusvm->num_chunks; ++i) {
+ start = ALIGN_DOWN(fault_addr, gpusvm->chunk_sizes[i]);
+ end = ALIGN(fault_addr + 1, gpusvm->chunk_sizes[i]);
+
+ if (start >= vas->vm_start && end <= vas->vm_end &&
+ start >= drm_gpusvm_notifier_start(notifier) &&
+ end <= drm_gpusvm_notifier_end(notifier) &&
+ start >= gpuva_start && end <= gpuva_end)
+ break;
+ }
+
+ if (i == gpusvm->num_chunks)
+ return LONG_MAX;
+
+ /*
+ * If allocation more than page, ensure not to overlap with existing
+ * ranges.
+ */
+ if (end - start != SZ_4K) {
+ struct drm_gpusvm_range *range;
+
+ range = drm_gpusvm_range_find(notifier, start, end);
+ if (range) {
+ ++i;
+ goto retry;
+ }
+
+ /*
+ * XXX: Only create range on pages CPU has faulted in. Without
+ * this check, or prefault, on BMG 'xe_exec_system_allocator --r
+ * process-many-malloc' fails. In the failure case, each process
+ * mallocs 16k but the CPU VMA is ~128k which results in 64k SVM
+ * ranges. When migrating the SVM ranges, some processes fail in
+ * drm_pagemap_migrate_to_devmem with 'migrate.cpages != npages'
+ * and then upon drm_gpusvm_range_get_pages device pages from
+ * other processes are collected + faulted in which creates all
+ * sorts of problems. Unsure exactly how this happening, also
+ * problem goes away if 'xe_exec_system_allocator --r
+ * process-many-malloc' mallocs at least 64k at a time.
+ */
+ if (end - start <= check_pages_threshold &&
+ !drm_gpusvm_check_pages(gpusvm, notifier, start, end, dev_private_owner)) {
+ ++i;
+ goto retry;
+ }
+ }
+
+ return end - start;
+}
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gpusvm_driver_lock_held() - Assert GPU SVM driver lock is held
+ * @gpusvm: Pointer to the GPU SVM structure.
+ *
+ * Ensure driver lock is held.
+ */
+static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
+{
+ if ((gpusvm)->lock_dep_map)
+ lockdep_assert(lock_is_held_type((gpusvm)->lock_dep_map, 0));
+}
+#else
+static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
+{
+}
+#endif
+
+/**
+ * drm_gpusvm_find_vma_start() - Find start address for first VMA in range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @start: The inclusive start user address.
+ * @end: The exclusive end user address.
+ *
+ * Returns: The start address of first VMA within the provided range,
+ * ULONG_MAX otherwise. Assumes start_addr < end_addr.
+ */
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = gpusvm->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = ULONG_MAX;
+
+ if (!mmget_not_zero(mm))
+ return addr;
+
+ mmap_read_lock(mm);
+
+ vma = find_vma_intersection(mm, start, end);
+ if (vma)
+ addr = vma->vm_start;
+
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return addr;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_find_vma_start);
+
+/**
+ * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @fault_addr: Fault address
+ * @gpuva_start: Start address of GPUVA which mirrors CPU
+ * @gpuva_end: End address of GPUVA which mirrors CPU
+ * @ctx: GPU SVM context
+ *
+ * This function finds or inserts a newly allocated a GPU SVM range based on the
+ * fault address. Caller must hold a lock to protect range lookup and insertion.
+ *
+ * Return: Pointer to the GPU SVM range on success, ERR_PTR() on failure.
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_gpusvm_notifier *notifier;
+ struct drm_gpusvm_range *range;
+ struct mm_struct *mm = gpusvm->mm;
+ struct vm_area_struct *vas;
+ bool notifier_alloc = false;
+ unsigned long chunk_size;
+ int err;
+ bool migrate_devmem;
+
+ drm_gpusvm_driver_lock_held(gpusvm);
+
+ if (fault_addr < gpusvm->mm_start ||
+ fault_addr > gpusvm->mm_start + gpusvm->mm_range)
+ return ERR_PTR(-EINVAL);
+
+ if (!mmget_not_zero(mm))
+ return ERR_PTR(-EFAULT);
+
+ notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1);
+ if (!notifier) {
+ notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
+ if (IS_ERR(notifier)) {
+ err = PTR_ERR(notifier);
+ goto err_mmunlock;
+ }
+ notifier_alloc = true;
+ err = mmu_interval_notifier_insert(&notifier->notifier,
+ mm,
+ drm_gpusvm_notifier_start(notifier),
+ drm_gpusvm_notifier_size(notifier),
+ &drm_gpusvm_notifier_ops);
+ if (err)
+ goto err_notifier;
+ }
+
+ mmap_read_lock(mm);
+
+ vas = vma_lookup(mm, fault_addr);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_notifier_remove;
+ }
+
+ if (!ctx->read_only && !(vas->vm_flags & VM_WRITE)) {
+ err = -EPERM;
+ goto err_notifier_remove;
+ }
+
+ range = drm_gpusvm_range_find(notifier, fault_addr, fault_addr + 1);
+ if (range)
+ goto out_mmunlock;
+ /*
+ * XXX: Short-circuiting migration based on migrate_vma_* current
+ * limitations. If/when migrate_vma_* add more support, this logic will
+ * have to change.
+ */
+ migrate_devmem = ctx->devmem_possible &&
+ vma_is_anonymous(vas) && !is_vm_hugetlb_page(vas);
+
+ chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas,
+ fault_addr, gpuva_start,
+ gpuva_end,
+ ctx->check_pages_threshold,
+ ctx->device_private_page_owner);
+ if (chunk_size == LONG_MAX) {
+ err = -EINVAL;
+ goto err_notifier_remove;
+ }
+
+ range = drm_gpusvm_range_alloc(gpusvm, notifier, fault_addr, chunk_size,
+ migrate_devmem);
+ if (IS_ERR(range)) {
+ err = PTR_ERR(range);
+ goto err_notifier_remove;
+ }
+
+ drm_gpusvm_range_insert(notifier, range);
+ if (notifier_alloc)
+ drm_gpusvm_notifier_insert(gpusvm, notifier);
+
+out_mmunlock:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return range;
+
+err_notifier_remove:
+ mmap_read_unlock(mm);
+ if (notifier_alloc)
+ mmu_interval_notifier_remove(&notifier->notifier);
+err_notifier:
+ if (notifier_alloc)
+ drm_gpusvm_notifier_free(gpusvm, notifier);
+err_mmunlock:
+ mmput(mm);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert);
+
+/**
+ * __drm_gpusvm_unmap_pages() - Unmap pages associated with GPU SVM pages (internal)
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of pages to unmap
+ *
+ * This function unmap pages associated with a GPU SVM pages struct. Assumes and
+ * asserts correct locking is in place when called.
+ */
+static void __drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
+{
+ struct drm_pagemap *dpagemap = svm_pages->dpagemap;
+ struct device *dev = gpusvm->drm->dev;
+ unsigned long i, j;
+
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ if (svm_pages->flags.has_dma_mapping) {
+ struct drm_gpusvm_pages_flags flags = {
+ .__flags = svm_pages->flags.__flags,
+ };
+
+ for (i = 0, j = 0; i < npages; j++) {
+ struct drm_pagemap_addr *addr = &svm_pages->dma_addr[j];
+
+ if (addr->proto == DRM_INTERCONNECT_SYSTEM)
+ dma_unmap_page(dev,
+ addr->addr,
+ PAGE_SIZE << addr->order,
+ addr->dir);
+ else if (dpagemap && dpagemap->ops->device_unmap)
+ dpagemap->ops->device_unmap(dpagemap,
+ dev, *addr);
+ i += 1 << addr->order;
+ }
+
+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
+ flags.has_devmem_pages = false;
+ flags.has_dma_mapping = false;
+ WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
+
+ svm_pages->dpagemap = NULL;
+ }
+}
+
+/**
+ * __drm_gpusvm_free_pages() - Free dma array associated with GPU SVM pages
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ *
+ * This function frees the dma address array associated with a GPU SVM range.
+ */
+static void __drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ if (svm_pages->dma_addr) {
+ kvfree(svm_pages->dma_addr);
+ svm_pages->dma_addr = NULL;
+ }
+}
+
+/**
+ * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
+ * struct
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of mapped pages
+ *
+ * This function unmaps and frees the dma address array associated with a GPU
+ * SVM pages struct.
+ */
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
+{
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages);
+
+/**
+ * drm_gpusvm_range_remove() - Remove GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range to be removed
+ *
+ * This function removes the specified GPU SVM range and also removes the parent
+ * GPU SVM notifier if no more ranges remain in the notifier. The caller must
+ * hold a lock to protect range and notifier removal.
+ */
+void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+ struct drm_gpusvm_notifier *notifier;
+
+ drm_gpusvm_driver_lock_held(gpusvm);
+
+ notifier = drm_gpusvm_notifier_find(gpusvm,
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_start(range) + 1);
+ if (WARN_ON_ONCE(!notifier))
+ return;
+
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, &range->pages);
+ __drm_gpusvm_range_remove(notifier, range);
+ drm_gpusvm_notifier_unlock(gpusvm);
+
+ drm_gpusvm_range_put(range);
+
+ if (RB_EMPTY_ROOT(&notifier->root.rb_root)) {
+ if (!notifier->flags.removed)
+ mmu_interval_notifier_remove(&notifier->notifier);
+ drm_gpusvm_notifier_remove(gpusvm, notifier);
+ drm_gpusvm_notifier_free(gpusvm, notifier);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_remove);
+
+/**
+ * drm_gpusvm_range_get() - Get a reference to GPU SVM range
+ * @range: Pointer to the GPU SVM range
+ *
+ * This function increments the reference count of the specified GPU SVM range.
+ *
+ * Return: Pointer to the GPU SVM range.
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_get(struct drm_gpusvm_range *range)
+{
+ kref_get(&range->refcount);
+
+ return range;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_get);
+
+/**
+ * drm_gpusvm_range_destroy() - Destroy GPU SVM range
+ * @refcount: Pointer to the reference counter embedded in the GPU SVM range
+ *
+ * This function destroys the specified GPU SVM range when its reference count
+ * reaches zero. If a custom range-free function is provided, it is invoked to
+ * free the range; otherwise, the range is deallocated using kfree().
+ */
+static void drm_gpusvm_range_destroy(struct kref *refcount)
+{
+ struct drm_gpusvm_range *range =
+ container_of(refcount, struct drm_gpusvm_range, refcount);
+ struct drm_gpusvm *gpusvm = range->gpusvm;
+
+ if (gpusvm->ops->range_free)
+ gpusvm->ops->range_free(range);
+ else
+ kfree(range);
+}
+
+/**
+ * drm_gpusvm_range_put() - Put a reference to GPU SVM range
+ * @range: Pointer to the GPU SVM range
+ *
+ * This function decrements the reference count of the specified GPU SVM range
+ * and frees it when the count reaches zero.
+ */
+void drm_gpusvm_range_put(struct drm_gpusvm_range *range)
+{
+ kref_put(&range->refcount, drm_gpusvm_range_destroy);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
+
+/**
+ * drm_gpusvm_pages_valid() - GPU SVM range pages valid
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called holding gpusvm->notifier_lock and as the last step before committing a
+ * GPU binding. This is akin to a notifier seqno check in the HMM documentation
+ * but due to wider notifiers (i.e., notifiers which span multiple ranges) this
+ * function is required for finer grained checking (i.e., per range) if pages
+ * are valid.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+static bool drm_gpusvm_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ return svm_pages->flags.has_devmem_pages || svm_pages->flags.has_dma_mapping;
+}
+
+/**
+ * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called holding gpusvm->notifier_lock and as the last step before committing a
+ * GPU binding. This is akin to a notifier seqno check in the HMM documentation
+ * but due to wider notifiers (i.e., notifiers which span multiple ranges) this
+ * function is required for finer grained checking (i.e., per range) if pages
+ * are valid.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ return drm_gpusvm_pages_valid(gpusvm, &range->pages);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
+
+/**
+ * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called without holding gpusvm->notifier_lock.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
+{
+ bool pages_valid;
+
+ if (!svm_pages->dma_addr)
+ return false;
+
+ drm_gpusvm_notifier_lock(gpusvm);
+ pages_valid = drm_gpusvm_pages_valid(gpusvm, svm_pages);
+ if (!pages_valid)
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
+ drm_gpusvm_notifier_unlock(gpusvm);
+
+ return pages_valid;
+}
+
+/**
+ * drm_gpusvm_get_pages() - Get pages and populate GPU SVM pages struct
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: The SVM pages to populate. This will contain the dma-addresses
+ * @mm: The mm corresponding to the CPU range
+ * @notifier: The corresponding notifier for the given CPU range
+ * @pages_start: Start CPU address for the pages
+ * @pages_end: End CPU address for the pages (exclusive)
+ * @ctx: GPU SVM context
+ *
+ * This function gets and maps pages for CPU range and ensures they are
+ * mapped for DMA access.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct hmm_range hmm_range = {
+ .default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
+ HMM_PFN_REQ_WRITE),
+ .notifier = notifier,
+ .start = pages_start,
+ .end = pages_end,
+ .dev_private_owner = ctx->device_private_page_owner,
+ };
+ void *zdd;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long i, j;
+ unsigned long npages = npages_in_range(pages_start, pages_end);
+ unsigned long num_dma_mapped;
+ unsigned int order = 0;
+ unsigned long *pfns;
+ int err = 0;
+ struct dev_pagemap *pagemap;
+ struct drm_pagemap *dpagemap;
+ struct drm_gpusvm_pages_flags flags;
+ enum dma_data_direction dma_dir = ctx->read_only ? DMA_TO_DEVICE :
+ DMA_BIDIRECTIONAL;
+
+retry:
+ hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
+ if (drm_gpusvm_pages_valid_unlocked(gpusvm, svm_pages))
+ goto set_seqno;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return -ENOMEM;
+
+ if (!mmget_not_zero(mm)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ hmm_range.hmm_pfns = pfns;
+ while (true) {
+ mmap_read_lock(mm);
+ err = hmm_range_fault(&hmm_range);
+ mmap_read_unlock(mm);
+
+ if (err == -EBUSY) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ hmm_range.notifier_seq =
+ mmu_interval_read_begin(notifier);
+ continue;
+ }
+ break;
+ }
+ mmput(mm);
+ if (err)
+ goto err_free;
+
+map_pages:
+ /*
+ * Perform all dma mappings under the notifier lock to not
+ * access freed pages. A notifier will either block on
+ * the notifier lock or unmap dma.
+ */
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ flags.__flags = svm_pages->flags.__flags;
+ if (flags.unmapped) {
+ drm_gpusvm_notifier_unlock(gpusvm);
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (mmu_interval_read_retry(notifier, hmm_range.notifier_seq)) {
+ drm_gpusvm_notifier_unlock(gpusvm);
+ kvfree(pfns);
+ goto retry;
+ }
+
+ if (!svm_pages->dma_addr) {
+ /* Unlock and restart mapping to allocate memory. */
+ drm_gpusvm_notifier_unlock(gpusvm);
+ svm_pages->dma_addr =
+ kvmalloc_array(npages, sizeof(*svm_pages->dma_addr), GFP_KERNEL);
+ if (!svm_pages->dma_addr) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ goto map_pages;
+ }
+
+ zdd = NULL;
+ pagemap = NULL;
+ num_dma_mapped = 0;
+ for (i = 0, j = 0; i < npages; ++j) {
+ struct page *page = hmm_pfn_to_page(pfns[i]);
+
+ order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
+ if (is_device_private_page(page) ||
+ is_device_coherent_page(page)) {
+ if (!ctx->allow_mixed &&
+ zdd != page->zone_device_data && i > 0) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+ zdd = page->zone_device_data;
+ if (pagemap != page_pgmap(page)) {
+ if (i > 0) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+
+ pagemap = page_pgmap(page);
+ dpagemap = drm_pagemap_page_to_dpagemap(page);
+ if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
+ /*
+ * Raced. This is not supposed to happen
+ * since hmm_range_fault() should've migrated
+ * this page to system.
+ */
+ err = -EAGAIN;
+ goto err_unmap;
+ }
+ }
+ svm_pages->dma_addr[j] =
+ dpagemap->ops->device_map(dpagemap,
+ gpusvm->drm->dev,
+ page, order,
+ dma_dir);
+ if (dma_mapping_error(gpusvm->drm->dev,
+ svm_pages->dma_addr[j].addr)) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+ } else {
+ dma_addr_t addr;
+
+ if (is_zone_device_page(page) ||
+ (pagemap && !ctx->allow_mixed)) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+
+ if (ctx->devmem_only) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
+ addr = dma_map_page(gpusvm->drm->dev,
+ page, 0,
+ PAGE_SIZE << order,
+ dma_dir);
+ if (dma_mapping_error(gpusvm->drm->dev, addr)) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
+ svm_pages->dma_addr[j] = drm_pagemap_addr_encode
+ (addr, DRM_INTERCONNECT_SYSTEM, order,
+ dma_dir);
+ }
+ i += 1 << order;
+ num_dma_mapped = i;
+ flags.has_dma_mapping = true;
+ }
+
+ if (pagemap) {
+ flags.has_devmem_pages = true;
+ svm_pages->dpagemap = dpagemap;
+ }
+
+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
+ WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
+
+ drm_gpusvm_notifier_unlock(gpusvm);
+ kvfree(pfns);
+set_seqno:
+ svm_pages->notifier_seq = hmm_range.notifier_seq;
+
+ return 0;
+
+err_unmap:
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, num_dma_mapped);
+ drm_gpusvm_notifier_unlock(gpusvm);
+err_free:
+ kvfree(pfns);
+ if (err == -EAGAIN)
+ goto retry;
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages);
+
+/**
+ * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a GPU SVM range and ensures they are mapped for
+ * DMA access.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return drm_gpusvm_get_pages(gpusvm, &range->pages, gpusvm->mm,
+ &range->notifier->notifier,
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range), ctx);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
+
+/**
+ * drm_gpusvm_unmap_pages() - Unmap GPU svm pages
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of pages in @svm_pages.
+ * @ctx: GPU SVM context
+ *
+ * This function unmaps pages associated with a GPU SVM pages struct. If
+ * @in_notifier is set, it is assumed that gpusvm->notifier_lock is held in
+ * write mode; if it is clear, it acquires gpusvm->notifier_lock in read mode.
+ * Must be called in the invalidate() callback of the corresponding notifier for
+ * IOMMU security model.
+ */
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ if (ctx->in_notifier)
+ lockdep_assert_held_write(&gpusvm->notifier_lock);
+ else
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+
+ if (!ctx->in_notifier)
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages);
+
+/**
+ * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function unmaps pages associated with a GPU SVM range. If @in_notifier
+ * is set, it is assumed that gpusvm->notifier_lock is held in write mode; if it
+ * is clear, it acquires gpusvm->notifier_lock in read mode. Must be called on
+ * each GPU SVM range attached to notifier in gpusvm->ops->invalidate for IOMMU
+ * security model.
+ */
+void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+
+ return drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages, ctx);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
+
+/**
+ * drm_gpusvm_range_evict() - Evict GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range to be removed
+ *
+ * This function evicts the specified GPU SVM range.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ struct mmu_interval_notifier *notifier = &range->notifier->notifier;
+ struct hmm_range hmm_range = {
+ .default_flags = HMM_PFN_REQ_FAULT,
+ .notifier = notifier,
+ .start = drm_gpusvm_range_start(range),
+ .end = drm_gpusvm_range_end(range),
+ .dev_private_owner = NULL,
+ };
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns;
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+ int err = 0;
+ struct mm_struct *mm = gpusvm->mm;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return -ENOMEM;
+
+ hmm_range.hmm_pfns = pfns;
+ while (!time_after(jiffies, timeout)) {
+ hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
+ if (time_after(jiffies, timeout)) {
+ err = -ETIME;
+ break;
+ }
+
+ mmap_read_lock(mm);
+ err = hmm_range_fault(&hmm_range);
+ mmap_read_unlock(mm);
+ if (err != -EBUSY)
+ break;
+ }
+
+ kvfree(pfns);
+ mmput(mm);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict);
+
+/**
+ * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @start: Start address
+ * @end: End address
+ *
+ * Return: True if GPU SVM has mapping, False otherwise
+ */
+bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end)
+{
+ struct drm_gpusvm_notifier *notifier;
+
+ drm_gpusvm_for_each_notifier(notifier, gpusvm, start, end) {
+ struct drm_gpusvm_range *range = NULL;
+
+ drm_gpusvm_for_each_range(range, notifier, start, end)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_has_mapping);
+
+/**
+ * drm_gpusvm_range_set_unmapped() - Mark a GPU SVM range as unmapped
+ * @range: Pointer to the GPU SVM range structure.
+ * @mmu_range: Pointer to the MMU notifier range structure.
+ *
+ * This function marks a GPU SVM range as unmapped and sets the partial_unmap flag
+ * if the range partially falls within the provided MMU notifier range.
+ */
+void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
+ const struct mmu_notifier_range *mmu_range)
+{
+ lockdep_assert_held_write(&range->gpusvm->notifier_lock);
+
+ range->pages.flags.unmapped = true;
+ if (drm_gpusvm_range_start(range) < mmu_range->start ||
+ drm_gpusvm_range_end(range) > mmu_range->end)
+ range->pages.flags.partial_unmap = true;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
+
+MODULE_DESCRIPTION("DRM GPUSVM");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index f9eb56f24bef..8a06d296561d 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -26,7 +26,9 @@
*/
#include <drm/drm_gpuvm.h>
+#include <drm/drm_print.h>
+#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/mm.h>
@@ -39,7 +41,7 @@
* mapping's backing &drm_gem_object buffers.
*
* &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
- * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
+ * all existing GPU VA mappings using this &drm_gem_object as backing buffer.
*
* GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
* keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
@@ -71,7 +73,7 @@
* but it can also be a 'dummy' object, which can be allocated with
* drm_gpuvm_resv_object_alloc().
*
- * In order to connect a struct drm_gpuva its backing &drm_gem_object each
+ * In order to connect a struct drm_gpuva to its backing &drm_gem_object each
* &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
* &drm_gpuvm_bo contains a list of &drm_gpuva structures.
*
@@ -80,7 +82,7 @@
* This is ensured by the API through drm_gpuvm_bo_obtain() and
* drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
* &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
- * particular combination. If not existent a new instance is created and linked
+ * particular combination. If not present, a new instance is created and linked
* to the &drm_gem_object.
*
* &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
@@ -107,7 +109,7 @@
* sequence of operations to satisfy a given map or unmap request.
*
* Therefore the DRM GPU VA manager provides an algorithm implementing splitting
- * and merging of existent GPU VA mappings with the ones that are requested to
+ * and merging of existing GPU VA mappings with the ones that are requested to
* be mapped or unmapped. This feature is required by the Vulkan API to
* implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
* as VM BIND.
@@ -118,7 +120,7 @@
* execute in order to integrate the new mapping cleanly into the current state
* of the GPU VA space.
*
- * Depending on how the new GPU VA mapping intersects with the existent mappings
+ * Depending on how the new GPU VA mapping intersects with the existing mappings
* of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
* of unmap operations, a maximum of two remap operations and a single map
* operation. The caller might receive no callback at all if no operation is
@@ -138,16 +140,16 @@
* one unmap operation and one or two map operations, such that drivers can
* derive the page table update delta accordingly.
*
- * Note that there can't be more than two existent mappings to split up, one at
+ * Note that there can't be more than two existing mappings to split up, one at
* the beginning and one at the end of the new mapping, hence there is a
* maximum of two remap operations.
*
* Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
* call back into the driver in order to unmap a range of GPU VA space. The
- * logic behind this function is way simpler though: For all existent mappings
+ * logic behind this function is way simpler though: For all existing mappings
* enclosed by the given range unmap operations are created. For mappings which
- * are only partically located within the given range, remap operations are
- * created such that those mappings are split up and re-mapped partically.
+ * are only partially located within the given range, remap operations are
+ * created such that those mappings are split up and re-mapped partially.
*
* As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
* drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
@@ -167,7 +169,7 @@
* provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
* drm_gpuva_unmap() instead.
*
- * The following diagram depicts the basic relationships of existent GPU VA
+ * The following diagram depicts the basic relationships of existing GPU VA
* mappings, a newly requested mapping and the resulting mappings as implemented
* by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
*
@@ -217,7 +219,7 @@
*
*
* 4) Existent mapping is a left aligned subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -235,9 +237,9 @@
* and/or non-contiguous BO offset.
*
*
- * 5) Requested mapping's range is a left aligned subset of the existent one,
+ * 5) Requested mapping's range is a left aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
- * the existent one adjusting its BO offset.
+ * the existing one adjusting its BO offset.
*
* ::
*
@@ -270,9 +272,9 @@
* new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
*
*
- * 7) Requested mapping's range is a right aligned subset of the existent one,
+ * 7) Requested mapping's range is a right aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
- * the existent one, without adjusting the BO offset.
+ * the existing one, without adjusting the BO offset.
*
* ::
*
@@ -303,7 +305,7 @@
*
* 9) Existent mapping is overlapped at the end by the requested mapping backed
* by a different BO. Hence, map the requested mapping and split up the
- * existent one, without adjusting the BO offset.
+ * existing one, without adjusting the BO offset.
*
* ::
*
@@ -333,9 +335,9 @@
* new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
*
*
- * 11) Requested mapping's range is a centered subset of the existent one
+ * 11) Requested mapping's range is a centered subset of the existing one
* having a different backing BO. Hence, map the requested mapping and split
- * up the existent one in two mappings, adjusting the BO offset of the right
+ * up the existing one in two mappings, adjusting the BO offset of the right
* one accordingly.
*
* ::
@@ -350,7 +352,7 @@
* new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
*
*
- * 12) Requested mapping is a contiguous subset of the existent one. Split it
+ * 12) Requested mapping is a contiguous subset of the existing one. Split it
* up, but indicate that the backing PTEs could be kept.
*
* ::
@@ -366,7 +368,7 @@
*
*
* 13) Existent mapping is a right aligned subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -385,7 +387,7 @@
*
*
* 14) Existent mapping is a centered subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -405,7 +407,7 @@
*
* 15) Existent mappings is overlapped at the beginning by the requested mapping
* backed by a different BO. Hence, map the requested mapping and split up
- * the existent one, adjusting its BO offset accordingly.
+ * the existing one, adjusting its BO offset accordingly.
*
* ::
*
@@ -420,6 +422,71 @@
*/
/**
+ * DOC: Madvise Logic - Splitting and Traversal
+ *
+ * This logic handles GPU VA range updates by generating remap and map operations
+ * without performing unmaps or merging existing mappings.
+ *
+ * 1) The requested range lies entirely within a single drm_gpuva. The logic splits
+ * the existing mapping at the start and end boundaries and inserts a new map.
+ *
+ * ::
+ * a start end b
+ * pre: |-----------------------|
+ * drm_gpuva1
+ *
+ * a start end b
+ * new: |-----|=========|-------|
+ * remap map remap
+ *
+ * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
+ *
+ * 2) The requested range spans multiple drm_gpuva regions. The logic traverses
+ * across boundaries, remapping the start and end segments, and inserting two
+ * map operations to cover the full range.
+ *
+ * :: a start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c end d
+ * new: |-------|==========|--------------|========|---------|
+ * remap1 map1 drm_gpuva2 map2 remap2
+ *
+ * two REMAPS and two MAPS
+ *
+ * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
+ * are generated to update the affected portion.
+ *
+ *
+ * :: a/start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a/start b c end d
+ * new: |------------------|--------------|========|---------|
+ * drm_gpuva1 drm_gpuva2 map1 remap1
+ *
+ * :: a start b c/end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c/end d
+ * new: |-------|==========|--------------|------------------|
+ * remap1 map1 drm_gpuva2 drm_gpuva3
+ *
+ * one REMAP and one MAP
+ *
+ * 4) Both start and end align with existing drm_gpuva boundaries. No operations
+ * are needed as the range is already covered.
+ *
+ * 5) No existing drm_gpuvas. No operations.
+ *
+ * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
+ * focusing solely on remap and map operations for efficient traversal and update.
+ */
+
+/**
* DOC: Locking
*
* In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
@@ -431,8 +498,7 @@
* DRM GPUVM also does not take care of the locking of the backing
* &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
* itself; drivers are responsible to enforce mutual exclusion using either the
- * GEMs dma_resv lock or alternatively a driver specific external lock. For the
- * latter see also drm_gem_gpuva_set_lock().
+ * GEMs dma_resv lock or the GEMs gpuva.lock mutex.
*
* However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
* the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
@@ -468,8 +534,8 @@
* make use of them.
*
* The below code is strictly limited to illustrate the generic usage pattern.
- * To maintain simplicitly, it doesn't make use of any abstractions for common
- * code, different (asyncronous) stages with fence signalling critical paths,
+ * To maintain simplicity, it doesn't make use of any abstractions for common
+ * code, different (asynchronous) stages with fence signalling critical paths,
* any other helpers or error handling in terms of freeing memory and dropping
* previously taken locks.
*
@@ -478,20 +544,25 @@
* // Allocates a new &drm_gpuva.
* struct drm_gpuva * driver_gpuva_alloc(void);
*
- * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
+ * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
+ * struct drm_gpuvm_map_req map_req = {
+ * .map.va.addr = addr,
+ * .map.va.range = range,
+ * .map.gem.obj = obj,
+ * .map.gem.offset = offset,
+ * };
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
* struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
- * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
- * obj, offset);
+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
@@ -581,7 +652,7 @@
* .sm_step_unmap = driver_gpuva_unmap,
* };
*
- * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
+ * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
@@ -679,7 +750,7 @@
*
* This helper is here to provide lockless list iteration. Lockless as in, the
* iterator releases the lock immediately after picking the first element from
- * the list, so list insertion deletion can happen concurrently.
+ * the list, so list insertion and deletion can happen concurrently.
*
* Elements popped from the original list are kept in a local list, so removal
* and is_empty checks can still happen while we're iterating the list.
@@ -807,6 +878,31 @@ __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
}
/**
+ * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup
+ * @vm_bo: the &drm_gpuvm_bo
+ *
+ * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not
+ * immediately removed from the evict and extobj lists. Therefore, anyone
+ * iterating these lists should skip entries that are being destroyed.
+ *
+ * Checking the refcount without incrementing it is okay as long as the lock
+ * protecting the evict/extobj list is held for as long as you are using the
+ * vm_bo, because even if the refcount hits zero while you are using it, freeing
+ * the vm_bo requires taking the list's lock.
+ *
+ * Zombie entries can be observed on the evict and extobj lists regardless of
+ * whether DRM_GPUVM_RESV_PROTECTED is used, but they remain on the lists for a
+ * longer time when the resv lock is used because we can't take the resv lock
+ * during run_job() in immediate mode, meaning that they need to remain on the
+ * lists until drm_gpuvm_bo_deferred_cleanup() is called.
+ */
+static bool
+drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo)
+{
+ return !kref_read(&vm_bo->kref);
+}
+
+/**
* drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
* @__vm_bo: the &drm_gpuvm_bo
* @__list_name: the name of the list to insert into
@@ -1011,6 +1107,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
INIT_LIST_HEAD(&gpuvm->evict.list);
spin_lock_init(&gpuvm->evict.lock);
+ init_llist_head(&gpuvm->bo_defer);
+
kref_init(&gpuvm->kref);
gpuvm->name = name ? name : "unknown";
@@ -1052,6 +1150,8 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
"Extobj list should be empty.\n");
drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
"Evict list should be empty.\n");
+ drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer),
+ "VM BO cleanup list should be empty.\n");
drm_gem_object_put(gpuvm->r_obj);
}
@@ -1147,6 +1247,9 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
drm_gpuvm_resv_assert_held(gpuvm);
list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
+ if (drm_gpuvm_bo_is_zombie(vm_bo))
+ continue;
+
ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
if (ret)
break;
@@ -1159,7 +1262,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
+ * drm_gpuvm_prepare_objects() - prepare all associated BOs
* @gpuvm: the &drm_gpuvm
* @exec: the &drm_exec locking context
* @num_fences: the amount of &dma_fences to reserve
@@ -1229,13 +1332,13 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
/**
- * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
+ * drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
*
* Acquires all dma-resv locks of all &drm_gem_objects the given
* &drm_gpuvm contains mappings of.
*
- * Addionally, when calling this function with struct drm_gpuvm_exec::extra
+ * Additionally, when calling this function with struct drm_gpuvm_exec::extra
* being set the driver receives the given @fn callback to lock additional
* dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
* would call drm_exec_prepare_obj() from within this callback.
@@ -1292,7 +1395,7 @@ fn_lock_array(struct drm_gpuvm_exec *vm_exec)
}
/**
- * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
+ * drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
* @objs: additional &drm_gem_objects to lock
* @num_objs: the number of additional &drm_gem_objects to lock
@@ -1390,6 +1493,9 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
list.entry.evict) {
+ if (drm_gpuvm_bo_is_zombie(vm_bo))
+ continue;
+
ret = ops->vm_bo_validate(vm_bo, exec);
if (ret)
break;
@@ -1490,6 +1596,7 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
INIT_LIST_HEAD(&vm_bo->list.entry.evict);
+ init_llist_node(&vm_bo->list.entry.bo_defer);
return vm_bo;
}
@@ -1511,7 +1618,7 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_del(&vm_bo->list.entry.gem);
if (ops && ops->vm_bo_free)
@@ -1532,7 +1639,8 @@ drm_gpuvm_bo_destroy(struct kref *kref)
* If the reference count drops to zero, the &gpuvm_bo is destroyed, which
* includes removing it from the GEMs gpuva list. Hence, if a call to this
* function can potentially let the reference count drop to zero the caller must
- * hold the dma-resv or driver specific GEM gpuva lock.
+ * hold the lock that the GEM uses for its gpuva list (either the GEM's
+ * dma-resv or gpuva.lock mutex).
*
* This function may only be called from non-atomic context.
*
@@ -1550,13 +1658,133 @@ drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
+/*
+ * drm_gpuvm_bo_into_zombie() - called when the vm_bo becomes a zombie due to
+ * deferred cleanup
+ *
+ * If deferred cleanup is used, then this must be called right after the vm_bo
+ * refcount drops to zero. Must be called with GEM mutex held. After releasing
+ * the GEM mutex, drm_gpuvm_bo_defer_zombie_cleanup() must be called.
+ */
+static void
+drm_gpuvm_bo_into_zombie(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ if (!drm_gpuvm_resv_protected(vm_bo->vm)) {
+ drm_gpuvm_bo_list_del(vm_bo, extobj, true);
+ drm_gpuvm_bo_list_del(vm_bo, evict, true);
+ }
+
+ list_del(&vm_bo->list.entry.gem);
+}
+
+/*
+ * drm_gpuvm_bo_defer_zombie_cleanup() - adds a new zombie vm_bo to the
+ * bo_defer list
+ *
+ * Called after drm_gpuvm_bo_into_zombie(). GEM mutex must not be held.
+ *
+ * It's important that the GEM stays alive for the duration in which we hold
+ * the mutex, but the instant we add the vm_bo to bo_defer, another thread
+ * might call drm_gpuvm_bo_deferred_cleanup() and put the GEM. Therefore, to
+ * avoid kfreeing a mutex we are holding, the GEM mutex must be released
+ * *before* calling this function.
+ */
+static void
+drm_gpuvm_bo_defer_zombie_cleanup(struct drm_gpuvm_bo *vm_bo)
+{
+ llist_add(&vm_bo->list.entry.bo_defer, &vm_bo->vm->bo_defer);
+}
+
+static void
+drm_gpuvm_bo_defer_free(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ drm_gpuvm_bo_into_zombie(kref);
+ mutex_unlock(&vm_bo->obj->gpuva.lock);
+ drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
+}
+
+/**
+ * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with
+ * deferred cleanup
+ * @vm_bo: the &drm_gpuvm_bo to release the reference of
+ *
+ * This releases a reference to @vm_bo.
+ *
+ * This might take and release the GEMs GPUVA lock. You should call
+ * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process.
+ *
+ * Returns: true if vm_bo is being destroyed, false otherwise.
+ */
+bool
+drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo)
+{
+ if (!vm_bo)
+ return false;
+
+ drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
+
+ return !!kref_put_mutex(&vm_bo->kref,
+ drm_gpuvm_bo_defer_free,
+ &vm_bo->obj->gpuva.lock);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
+
+/**
+ * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list
+ * deferred cleanup
+ * @gpuvm: the VM to clean up
+ *
+ * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list.
+ */
+void
+drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo;
+ struct drm_gem_object *obj;
+ struct llist_node *bo_defer;
+
+ bo_defer = llist_del_all(&gpuvm->bo_defer);
+ if (!bo_defer)
+ return;
+
+ if (drm_gpuvm_resv_protected(gpuvm)) {
+ dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL);
+ llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) {
+ drm_gpuvm_bo_list_del(vm_bo, extobj, false);
+ drm_gpuvm_bo_list_del(vm_bo, evict, false);
+ }
+ dma_resv_unlock(drm_gpuvm_resv(gpuvm));
+ }
+
+ while (bo_defer) {
+ vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
+ bo_defer = bo_defer->next;
+ obj = vm_bo->obj;
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
+
static struct drm_gpuvm_bo *
__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj)
{
struct drm_gpuvm_bo *vm_bo;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
drm_gem_for_each_gpuvm_bo(vm_bo, obj)
if (vm_bo->vm == gpuvm)
return vm_bo;
@@ -1587,7 +1815,7 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
/**
- * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
+ * drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
* given &drm_gpuvm and &drm_gem_object
* @gpuvm: The &drm_gpuvm the @obj is mapped in.
* @obj: The &drm_gem_object being mapped in the @gpuvm.
@@ -1615,7 +1843,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
if (!vm_bo)
return ERR_PTR(-ENOMEM);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
return vm_bo;
@@ -1623,7 +1851,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
/**
- * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
+ * drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
* for the given &drm_gpuvm and &drm_gem_object
* @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
*
@@ -1651,7 +1879,7 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
return vm_bo;
}
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
return __vm_bo;
@@ -1687,7 +1915,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
* @vm_bo: the &drm_gpuvm_bo to add or remove
* @evict: indicates whether the object is evicted
*
- * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
+ * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list.
*/
void
drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
@@ -1789,7 +2017,7 @@ __drm_gpuva_remove(struct drm_gpuva *va)
* drm_gpuva_remove() - remove a &drm_gpuva
* @va: the &drm_gpuva to remove
*
- * This removes the given &va from the underlaying tree.
+ * This removes the given &va from the underlying tree.
*
* It is safe to use this function using the safe versions of iterating the GPU
* VA space, such as drm_gpuvm_for_each_va_safe() and
@@ -1823,8 +2051,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove);
* reference of the latter is taken.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using either the GEMs dma_resv lock or a driver specific
- * lock set through drm_gem_gpuva_set_lock().
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
@@ -1839,7 +2066,7 @@ drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
va->vm_bo = drm_gpuvm_bo_get(vm_bo);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
}
EXPORT_SYMBOL_GPL(drm_gpuva_link);
@@ -1859,8 +2086,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
* the latter is dropped.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using either the GEMs dma_resv lock or a driver specific
- * lock set through drm_gem_gpuva_set_lock().
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_unlink(struct drm_gpuva *va)
@@ -1871,7 +2097,7 @@ drm_gpuva_unlink(struct drm_gpuva *va)
if (unlikely(!obj))
return;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(va->vm, obj);
list_del_init(&va->gem.entry);
va->vm_bo = NULL;
@@ -1880,6 +2106,40 @@ drm_gpuva_unlink(struct drm_gpuva *va)
EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
/**
+ * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup
+ * @va: the &drm_gpuva to unlink
+ *
+ * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes
+ * the lock for the caller.
+ */
+void
+drm_gpuva_unlink_defer(struct drm_gpuva *va)
+{
+ struct drm_gem_object *obj = va->gem.obj;
+ struct drm_gpuvm_bo *vm_bo = va->vm_bo;
+ bool should_defer_bo;
+
+ if (unlikely(!obj))
+ return;
+
+ drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
+
+ mutex_lock(&obj->gpuva.lock);
+ list_del_init(&va->gem.entry);
+
+ /*
+ * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex.
+ */
+ should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_into_zombie);
+ mutex_unlock(&obj->gpuva.lock);
+ if (should_defer_bo)
+ drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
+
+ va->vm_bo = NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer);
+
+/**
* drm_gpuva_find_first() - find the first &drm_gpuva in the given range
* @gpuvm: the &drm_gpuvm to search in
* @addr: the &drm_gpuvas address
@@ -2053,16 +2313,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
+ const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_op op = {};
+ if (!req)
+ return 0;
+
op.op = DRM_GPUVA_OP_MAP;
- op.map.va.addr = addr;
- op.map.va.range = range;
- op.map.gem.obj = obj;
- op.map.gem.offset = offset;
+ op.map.va.addr = req->map.va.addr;
+ op.map.va.range = req->map.va.range;
+ op.map.gem.obj = req->map.gem.obj;
+ op.map.gem.offset = req->map.gem.offset;
return fn->sm_step_map(&op, priv);
}
@@ -2087,10 +2349,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
- struct drm_gpuva *va, bool merge)
+ struct drm_gpuva *va, bool merge, bool madvise)
{
struct drm_gpuva_op op = {};
+ if (madvise)
+ return 0;
+
op.op = DRM_GPUVA_OP_UNMAP;
op.unmap.va = va;
op.unmap.keep = merge;
@@ -2101,10 +2366,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+ const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
struct drm_gpuva *va, *next;
+ u64 req_offset = req->map.gem.offset;
+ u64 req_range = req->map.va.range;
+ u64 req_addr = req->map.va.addr;
u64 req_end = req_addr + req_range;
int ret;
@@ -2119,19 +2389,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 end = addr + range;
bool merge = !!va->gem.obj;
+ if (madvise && obj)
+ continue;
+
if (addr == req_addr) {
merge &= obj == req_obj &&
offset == req_offset;
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
continue;
@@ -2152,6 +2425,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr < req_addr) {
@@ -2172,6 +2448,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
@@ -2179,6 +2458,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = req_addr,
+ .map.va.range = end - req_addr,
+ };
+
+ ret = op_map_cb(ops, priv, &map_req);
+ if (ret)
+ return ret;
+ }
+
continue;
}
@@ -2194,6 +2485,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr > req_addr) {
@@ -2202,16 +2496,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
(addr - req_addr);
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
continue;
}
@@ -2230,14 +2526,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = req_end - addr,
+ };
+
+ return op_map_cb(ops, priv, &map_req);
+ }
break;
}
}
}
-
- return op_map_cb(ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return op_map_cb(ops, priv, op_map);
}
static int
@@ -2289,7 +2591,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
if (ret)
return ret;
} else {
- ret = op_unmap_cb(ops, priv, va, false);
+ ret = op_unmap_cb(ops, priv, va, false, false);
if (ret)
return ret;
}
@@ -2299,13 +2601,10 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps
+ * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
* @priv: pointer to a driver private data structure
+ * @req: ptr to struct drm_gpuvm_map_req
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2332,8 +2631,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
*/
int
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
@@ -2342,14 +2640,12 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
/**
- * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
+ * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
* @req_addr: the start address of the range to unmap
@@ -2357,7 +2653,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the operations to
- * unmap and, if required, split existent mappings.
+ * unmap and, if required, split existing mappings.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
@@ -2390,6 +2686,126 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
+static int
+drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
+{
+ struct drm_exec *exec = priv;
+
+ switch (op->op) {
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.unmap->va->gem.obj)
+ return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
+ return 0;
+ case DRM_GPUVA_OP_UNMAP:
+ if (op->unmap.va->gem.obj)
+ return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static const struct drm_gpuvm_ops lock_ops = {
+ .sm_step_map = drm_gpuva_sm_step_lock,
+ .sm_step_remap = drm_gpuva_sm_step_lock,
+ .sm_step_unmap = drm_gpuva_sm_step_lock,
+};
+
+/**
+ * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @num_fences: for newly mapped objects, the # of fences to reserve
+ * @req: ptr to drm_gpuvm_map_req struct
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
+ * will be newly mapped.
+ *
+ * The expected usage is::
+ *
+ * vm_bind {
+ * struct drm_exec exec;
+ *
+ * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
+ * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
+ *
+ * drm_exec_until_all_locked (&exec) {
+ * for_each_vm_bind_operation {
+ * switch (op->op) {
+ * case DRIVER_OP_UNMAP:
+ * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
+ * break;
+ * case DRIVER_OP_MAP:
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
+ * break;
+ * }
+ *
+ * drm_exec_retry_on_contention(&exec);
+ * if (ret)
+ * return ret;
+ * }
+ * }
+ * }
+ *
+ * This enables all locking to be performed before the driver begins modifying
+ * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
+ * where an earlier op can alter the sequence of steps generated for a later
+ * op, because the later altered step will involve the same GEM object(s)
+ * already seen in the earlier locking step. For example:
+ *
+ * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
+ * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already
+ * locked the GEM object in the earlier DRIVER_OP_UNMAP op.
+ *
+ * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
+ * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
+ * required without the earlier DRIVER_OP_MAP. This is safe because we've
+ * already locked the GEM object in the earlier DRIVER_OP_MAP step.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ struct drm_gpuvm_map_req *req)
+{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+
+ if (req_obj) {
+ int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
+
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
+
+/**
+ * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped by drm_gpuvm_sm_unmap().
+ *
+ * See drm_gpuvm_sm_map_exec_lock() for expected usage.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range)
+{
+ return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
+ req_addr, req_range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
+
static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuvm *gpuvm)
{
@@ -2481,21 +2897,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
.sm_step_unmap = drm_gpuva_sm_step,
};
+static struct drm_gpuva_ops *
+__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
+{
+ struct drm_gpuva_ops *ops;
+ struct {
+ struct drm_gpuvm *vm;
+ struct drm_gpuva_ops *ops;
+ } args;
+ int ret;
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (unlikely(!ops))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ args.vm = gpuvm;
+ args.ops = ops;
+
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
+ if (ret)
+ goto err_free_ops;
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(gpuvm, ops);
+ return ERR_PTR(ret);
+}
+
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: map request arguments
*
* This function creates a list of operations to perform splitting and merging
- * of existent mapping(s) with the newly requested one.
+ * of existing mapping(s) with the newly requested one.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain map, unmap and remap operations, but it
* also can be empty if no operation is required, e.g. if the requested mapping
- * already exists is the exact same way.
+ * already exists in the exact same way.
*
* There can be an arbitrary amount of unmap operations, a maximum of two remap
* operations and a single map operation. The latter one represents the original
@@ -2515,40 +2960,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
*/
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
- struct drm_gpuva_ops *ops;
- struct {
- struct drm_gpuvm *vm;
- struct drm_gpuva_ops *ops;
- } args;
- int ret;
-
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
- if (unlikely(!ops))
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&ops->list);
-
- args.vm = gpuvm;
- args.ops = ops;
-
- ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
- req_addr, req_range,
- req_obj, req_offset);
- if (ret)
- goto err_free_ops;
-
- return ops;
-
-err_free_ops:
- drm_gpuva_ops_free(gpuvm, ops);
- return ERR_PTR(ret);
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
/**
+ * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @req: map request arguments
+ *
+ * This function creates a list of operations to perform splitting
+ * of existent mapping(s) at start or end, based on the request map.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain map and remap operations, but it
+ * also can be empty if no operation is required, e.g. if the requested mapping
+ * already exists is the exact same way.
+ *
+ * There will be no unmap operations, a maximum of two remap operations and two
+ * map operations. The two map operations correspond to: one from start to the
+ * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
+ *
+ * Note that before calling this function again with another mapping request it
+ * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
+ * previously obtained operations must be either processed or abandoned. To
+ * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req)
+{
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
+
+/**
* drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
* unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
@@ -2677,8 +3132,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
- * It is the callers responsibility to protect the GEMs GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * This function expects the caller to protect the GEM's GPUVA list against
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
@@ -2690,7 +3145,7 @@ drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
struct drm_gpuva *va;
int ret;
- drm_gem_gpuva_assert_lock_held(vm_bo->obj);
+ drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj);
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b2b6a8e49dda..f893b1e3a596 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -56,6 +56,17 @@ static inline void drm_client_debugfs_init(struct drm_device *dev)
{ }
#endif
+/* drm_client_sysrq.c */
+#if defined(CONFIG_DRM_CLIENT) && defined(CONFIG_MAGIC_SYSRQ)
+void drm_client_sysrq_register(struct drm_device *dev);
+void drm_client_sysrq_unregister(struct drm_device *dev);
+#else
+static inline void drm_client_sysrq_register(struct drm_device *dev)
+{ }
+static inline void drm_client_sysrq_unregister(struct drm_device *dev)
+{ }
+#endif
+
/* drm_file.c */
extern struct mutex drm_global_mutex;
bool drm_dev_needs_global_mutex(struct drm_device *dev);
@@ -85,6 +96,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf, uint32_t handle);
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle);
@@ -161,6 +174,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
+bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj);
+void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -168,6 +183,8 @@ int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
@@ -175,19 +192,14 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
-int drm_gem_pin_locked(struct drm_gem_object *obj);
-void drm_gem_unpin_locked(struct drm_gem_object *obj);
-int drm_gem_pin(struct drm_gem_object *obj);
-void drm_gem_unpin(struct drm_gem_object *obj);
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
void drm_debugfs_dev_fini(struct drm_device *dev);
void drm_debugfs_dev_register(struct drm_device *dev);
-int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root);
+int drm_debugfs_register(struct drm_minor *minor, int minor_id);
void drm_debugfs_unregister(struct drm_minor *minor);
void drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
@@ -205,8 +217,7 @@ static inline void drm_debugfs_dev_register(struct drm_device *dev)
{
}
-static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root)
+static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id)
{
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f593dc569d31..ff193155129e 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -373,6 +373,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->supports_virtualized_cursor_plane = req->value;
break;
+ case DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE:
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->plane_color_pipeline = req->value;
+ break;
default:
return -EINVAL;
}
@@ -653,6 +660,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CHANGE_HANDLE, drm_gem_change_handle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index 79ce86a5bd67..247f468731de 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -7,6 +7,7 @@
#include <drm/drm_managed.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -310,3 +311,11 @@ void __drmm_mutex_release(struct drm_device *dev, void *res)
mutex_destroy(lock);
}
EXPORT_SYMBOL(__drmm_mutex_release);
+
+void __drmm_workqueue_release(struct drm_device *device, void *res)
+{
+ struct workqueue_struct *wq = res;
+
+ destroy_workqueue(wq);
+}
+EXPORT_SYMBOL(__drmm_workqueue_release);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 34bca7567576..00482227a9cd 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -25,6 +26,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <video/mipi_display.h>
@@ -218,7 +220,7 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach,
+ drm_fb_swab(&dst_map, NULL, src, fb, clip, !drm_gem_is_imported(gem),
fmtcnv_state);
else
drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
@@ -229,7 +231,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
case DRM_FORMAT_XRGB8888:
switch (dbidev->pixel_format) {
case DRM_FORMAT_RGB565:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap);
+ if (swap) {
+ drm_fb_xrgb8888_to_rgb565be(&dst_map, NULL, src, fb, clip,
+ fmtcnv_state);
+ } else {
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip,
+ fmtcnv_state);
+ }
break;
case DRM_FORMAT_RGB888:
drm_fb_xrgb8888_to_rgb888(&dst_map, NULL, src, fb, clip, fmtcnv_state);
@@ -404,12 +412,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
struct mipi_dbi *dbi = &dbidev->dbi;
- size_t len = width * height * 2;
+ const struct drm_format_info *dst_format;
+ size_t len;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
+ dst_format = drm_format_info(dbidev->pixel_format);
+ len = drm_format_info_min_pitch(dst_format, 0, width) * height;
+
memset(dbidev->tx_buf, 0, len);
mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
@@ -680,7 +692,7 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation)
{
- size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
+ size_t bufsize = (u32)mode->vdisplay * mode->hdisplay * sizeof(u16);
dbidev->drm.mode_config.preferred_depth = 16;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 5e5c5f84daac..a712e177b350 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -26,6 +26,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -36,6 +37,8 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
+#include <linux/media-bus-format.h>
+
#include <video/mipi_display.h>
/**
@@ -89,12 +92,13 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
.restore = pm_generic_restore,
};
-static const struct bus_type mipi_dsi_bus_type = {
+const struct bus_type mipi_dsi_bus_type = {
.name = "mipi-dsi",
.match = mipi_dsi_device_match,
.uevent = mipi_dsi_uevent,
.pm = &mipi_dsi_device_pm_ops,
};
+EXPORT_SYMBOL_GPL(mipi_dsi_bus_type);
/**
* of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
@@ -162,13 +166,13 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
u32 reg;
if (of_alias_from_compatible(node, info.type, sizeof(info.type)) < 0) {
- drm_err(host, "modalias failure on %pOF\n", node);
+ dev_err(host->dev, "modalias failure on %pOF\n", node);
return ERR_PTR(-EINVAL);
}
ret = of_property_read_u32(node, "reg", &reg);
if (ret) {
- drm_err(host, "device node %pOF has no valid reg property: %d\n",
+ dev_err(host->dev, "device node %pOF has no valid reg property: %d\n",
node, ret);
return ERR_PTR(-EINVAL);
}
@@ -206,18 +210,18 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
int ret;
if (!info) {
- drm_err(host, "invalid mipi_dsi_device_info pointer\n");
+ dev_err(host->dev, "invalid mipi_dsi_device_info pointer\n");
return ERR_PTR(-EINVAL);
}
if (info->channel > 3) {
- drm_err(host, "invalid virtual channel: %u\n", info->channel);
+ dev_err(host->dev, "invalid virtual channel: %u\n", info->channel);
return ERR_PTR(-EINVAL);
}
dsi = mipi_dsi_device_alloc(host);
if (IS_ERR(dsi)) {
- drm_err(host, "failed to allocate DSI device %ld\n",
+ dev_err(host->dev, "failed to allocate DSI device %ld\n",
PTR_ERR(dsi));
return dsi;
}
@@ -228,7 +232,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
ret = mipi_dsi_device_add(dsi);
if (ret) {
- drm_err(host, "failed to add DSI device %d\n", ret);
+ dev_err(host->dev, "failed to add DSI device %d\n", ret);
kfree(dsi);
return ERR_PTR(ret);
}
@@ -769,41 +773,13 @@ ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
EXPORT_SYMBOL(mipi_dsi_generic_write);
/**
- * mipi_dsi_generic_write_chatty() - mipi_dsi_generic_write() w/ an error log
- * @dsi: DSI peripheral device
- * @payload: buffer containing the payload
- * @size: size of payload buffer
- *
- * Like mipi_dsi_generic_write() but includes a dev_err()
- * call for you and returns 0 upon success, not the number of bytes sent.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int mipi_dsi_generic_write_chatty(struct mipi_dsi_device *dsi,
- const void *payload, size_t size)
-{
- struct device *dev = &dsi->dev;
- ssize_t ret;
-
- ret = mipi_dsi_generic_write(dsi, payload, size);
- if (ret < 0) {
- dev_err(dev, "sending generic data %*ph failed: %zd\n",
- (int)size, payload, ret);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mipi_dsi_generic_write_chatty);
-
-/**
- * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write_chatty() w/ accum_err
+ * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write() w/ accum_err
* @ctx: Context for multiple DSI transactions
* @payload: buffer containing the payload
* @size: size of payload buffer
*
- * Like mipi_dsi_generic_write_chatty() but deals with errors in a way that
- * makes it convenient to make several calls in a row.
+ * A wrapper around mipi_dsi_generic_write() that deals with errors in a way
+ * that makes it convenient to make several calls in a row.
*/
void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
const void *payload, size_t size)
@@ -825,6 +801,30 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_generic_write_multi);
/**
+ * mipi_dsi_dual_generic_write_multi() - mipi_dsi_generic_write_multi() for
+ * two dsi channels, one after the other
+ * @ctx: Context for multiple DSI transactions
+ * @dsi1: First dsi channel to write buffer to
+ * @dsi2: Second dsi channel to write buffer to
+ * @payload: Buffer containing the payload
+ * @size: Size of payload buffer
+ *
+ * A wrapper around mipi_dsi_generic_write_multi() that allows the user to
+ * conveniently write to two dsi channels, one after the other.
+ */
+void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *payload, size_t size)
+{
+ ctx->dsi = dsi1;
+ mipi_dsi_generic_write_multi(ctx, payload, size);
+ ctx->dsi = dsi2;
+ mipi_dsi_generic_write_multi(ctx, payload, size);
+}
+EXPORT_SYMBOL(mipi_dsi_dual_generic_write_multi);
+
+/**
* mipi_dsi_generic_read() - receive data using a generic read packet
* @dsi: DSI peripheral device
* @params: buffer containing the request parameters
@@ -871,6 +871,41 @@ ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
EXPORT_SYMBOL(mipi_dsi_generic_read);
/**
+ * drm_mipi_dsi_get_input_bus_fmt() - Get the required MEDIA_BUS_FMT_* based
+ * input pixel format for a given DSI output
+ * pixel format
+ * @dsi_format: pixel format that a DSI host needs to output
+ *
+ * Various DSI hosts can use this function during their
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation to ascertain
+ * the MEDIA_BUS_FMT_* pixel format required as input.
+ *
+ * RETURNS:
+ * a 32-bit MEDIA_BUS_FMT_* value on success or 0 in case of failure.
+ */
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format)
+{
+ switch (dsi_format) {
+ case MIPI_DSI_FMT_RGB888:
+ return MEDIA_BUS_FMT_RGB888_1X24;
+
+ case MIPI_DSI_FMT_RGB666:
+ return MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return MEDIA_BUS_FMT_RGB666_1X18;
+
+ case MIPI_DSI_FMT_RGB565:
+ return MEDIA_BUS_FMT_RGB565_1X16;
+
+ default:
+ /* Unsupported DSI Format */
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_mipi_dsi_get_input_bus_fmt);
+
+/**
* mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @data: buffer containing data to be transmitted
@@ -969,6 +1004,30 @@ void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer_multi);
/**
+ * mipi_dsi_dual_dcs_write_buffer_multi - mipi_dsi_dcs_write_buffer_multi() for
+ * two dsi channels, one after the other
+ * @ctx: Context for multiple DSI transactions
+ * @dsi1: First dsi channel to write buffer to
+ * @dsi2: Second dsi channel to write buffer to
+ * @data: Buffer containing data to be transmitted
+ * @len: Size of transmission buffer
+ *
+ * A wrapper around mipi_dsi_dcs_write_buffer_multi() that allows the user to
+ * conveniently write to two dsi channels, one after the other.
+ */
+void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *data, size_t len)
+{
+ ctx->dsi = dsi1;
+ mipi_dsi_dcs_write_buffer_multi(ctx, data, len);
+ ctx->dsi = dsi2;
+ mipi_dsi_dcs_write_buffer_multi(ctx, data, len);
+}
+EXPORT_SYMBOL(mipi_dsi_dual_dcs_write_buffer_multi);
+
+/**
* mipi_dsi_dcs_write() - send DCS write command
* @dsi: DSI peripheral device
* @cmd: DCS command
@@ -1038,6 +1097,43 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
EXPORT_SYMBOL(mipi_dsi_dcs_read);
/**
+ * mipi_dsi_dcs_read_multi() - mipi_dsi_dcs_read() w/ accum_err
+ * @ctx: Context for multiple DSI transactions
+ * @cmd: DCS command
+ * @data: buffer in which to receive data
+ * @len: size of receive buffer
+ *
+ * Like mipi_dsi_dcs_read() but deals with errors in a way that makes it
+ * convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
+ void *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_DCS_READ,
+ .tx_buf = &cmd,
+ .tx_len = 1,
+ .rx_buf = data,
+ .rx_len = len
+ };
+ ssize_t ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_device_transfer(dsi, &msg);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "dcs read with command %#x failed: %d\n", cmd,
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read_multi);
+
+/**
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device
*
@@ -1266,25 +1362,6 @@ int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
/**
- * mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
- * output signal on the TE signal line
- * @dsi: DSI peripheral device
- *
- * Return: 0 on success or a negative error code on failure
- */
-int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
-{
- ssize_t err;
-
- err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
-
-/**
* mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
* output signal on the TE signal line.
* @dsi: DSI peripheral device
@@ -1714,6 +1791,29 @@ void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx)
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral_multi);
/**
+ * mipi_dsi_dcs_set_tear_off_multi() - turn off the display module's Tearing Effect
+ * output signal on the TE signal line
+ * @ctx: Context for multiple DSI transactions
+ */
+void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ ssize_t err;
+
+ if (ctx->accum_err)
+ return;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
+ if (err < 0) {
+ ctx->accum_err = err;
+ dev_err(dev, "Failed to set tear off: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off_multi);
+
+/**
* mipi_dsi_dcs_soft_reset_multi() - perform a software reset of the display module
* @ctx: Context for multiple DSI transactions
*
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index ca254611b382..6692abe564d3 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,6 +49,7 @@
#include <linux/stacktrace.h>
#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
/**
* DOC: Overview
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 8642a2fb25a9..d12db9b0bab8 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
@@ -29,6 +30,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
+#include <drm/drm_colorop.h>
#include <linux/dma-resv.h>
#include "drm_crtc_internal.h"
@@ -191,11 +193,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ struct drm_colorop *colorop;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ drm_for_each_colorop(colorop, dev)
+ drm_colorop_reset(colorop);
+
drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
plane->funcs->reset(plane);
@@ -383,6 +389,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
prop = drm_property_create(dev,
DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "IN_FORMATS_ASYNC", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.async_modifiers_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
"SIZE_HINTS", 0);
if (!prop)
return -ENOMEM;
@@ -429,6 +442,7 @@ int drmm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ INIT_LIST_HEAD(&dev->mode_config.colorop_list);
INIT_LIST_HEAD(&dev->mode_config.privobj_list);
idr_init_base(&dev->mode_config.object_idr, 1);
idr_init_base(&dev->mode_config.tile_idr, 1);
@@ -450,6 +464,7 @@ int drmm_mode_config_init(struct drm_device *dev)
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
dev->mode_config.num_total_plane = 0;
+ dev->mode_config.num_colorop = 0;
if (IS_ENABLED(CONFIG_LOCKDEP)) {
struct drm_modeset_acquire_ctx modeset_ctx;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index e943205a2394..b45d501b10c8 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -28,6 +28,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@@ -386,6 +387,7 @@ EXPORT_SYMBOL(drm_object_property_get_default_value);
/* helper for getconnector and getproperties ioctls */
int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+ bool plane_color_pipeline,
uint32_t __user *prop_ptr,
uint64_t __user *prop_values,
uint32_t *arg_count_props)
@@ -399,6 +401,21 @@ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
continue;
+ if (plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) {
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (prop == plane->color_encoding_property ||
+ prop == plane->color_range_property)
+ continue;
+ }
+
+ if (!plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) {
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (prop == plane->color_pipeline_property)
+ continue;
+ }
+
if (*arg_count_props > count) {
ret = __drm_object_property_get_value(obj, prop, &val);
if (ret)
@@ -457,6 +474,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
}
ret = drm_mode_object_get_properties(obj, file_priv->atomic,
+ file_priv->plane_color_pipeline,
(uint32_t __user *)(unsigned long)(arg->props_ptr),
(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
&arg->count_props);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 513c04f74420..e72f855fc495 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1286,14 +1286,11 @@ EXPORT_SYMBOL(drm_mode_set_name);
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
- unsigned int num, den;
+ unsigned int num = 1, den = 1;
if (mode->htotal == 0 || mode->vtotal == 0)
return 0;
- num = mode->clock;
- den = mode->htotal * mode->vtotal;
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
num *= 2;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1301,6 +1298,12 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->vscan > 1)
den *= mode->vscan;
+ if (check_mul_overflow(mode->clock, num, &num))
+ return 0;
+
+ if (check_mul_overflow(mode->htotal * mode->vtotal, den, &den))
+ return 0;
+
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
}
EXPORT_SYMBOL(drm_mode_vrefresh);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 5565464c1734..a57f6a10ada4 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -20,6 +20,8 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_fourcc.h>
@@ -72,6 +74,7 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
* drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
* @dev: DRM device
* @fb: drm_framebuffer object to fill out
+ * @info: pixel format information
* @mode_cmd: metadata from the userspace fb creation request
*
* This helper can be used in a drivers fb_create callback to pre-fill the fb's
@@ -79,12 +82,13 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
*/
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
fb->dev = dev;
- fb->format = drm_get_format_info(dev, mode_cmd);
+ fb->format = info;
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
@@ -199,10 +203,10 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_disable(dev);
- drm_client_dev_suspend(dev, false);
+ drm_client_dev_suspend(dev);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
/*
* Don't enable polling if it was never initialized
@@ -248,7 +252,7 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
DRM_ERROR("Failed to resume (%d)\n", ret);
dev->mode_config.suspend_state = NULL;
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
/*
* Don't enable polling if it is not initialized
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 7694b85e75e3..beb91a13a312 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -21,6 +21,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 5530919e0ba0..4f65ce729a47 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -55,7 +55,8 @@ EXPORT_SYMBOL(drm_of_crtc_port_mask);
* and generate the DRM mask of CRTCs which may be attached to this
* encoder.
*
- * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ * See https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/graph.yaml
+ * for the bindings.
*/
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
@@ -106,7 +107,9 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
* satisfy their .bind requirements
- * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ *
+ * See https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/graph.yaml
+ * for the bindings.
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
@@ -268,9 +271,9 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
*panel = NULL;
}
- /* No panel found yet, check for a bridge next. */
if (bridge) {
if (ret) {
+ /* No panel found yet, check for a bridge next. */
*bridge = of_drm_find_bridge(remote);
if (*bridge)
ret = 0;
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
new file mode 100644
index 000000000000..37d7cfbbb3e8
--- /dev/null
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/migrate.h>
+#include <linux/pagemap.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_pagemap.h>
+
+/**
+ * DOC: Overview
+ *
+ * The DRM pagemap layer is intended to augment the dev_pagemap functionality by
+ * providing a way to populate a struct mm_struct virtual range with device
+ * private pages and to provide helpers to abstract device memory allocations,
+ * to migrate memory back and forth between device memory and system RAM and
+ * to handle access (and in the future migration) between devices implementing
+ * a fast interconnect that is not necessarily visible to the rest of the
+ * system.
+ *
+ * Typically the DRM pagemap receives requests from one or more DRM GPU SVM
+ * instances to populate struct mm_struct virtual ranges with memory, and the
+ * migration is best effort only and may thus fail. The implementation should
+ * also handle device unbinding by blocking (return an -ENODEV) error for new
+ * population requests and after that migrate all device pages to system ram.
+ */
+
+/**
+ * DOC: Migration
+ *
+ * Migration granularity typically follows the GPU SVM range requests, but
+ * if there are clashes, due to races or due to the fact that multiple GPU
+ * SVM instances have different views of the ranges used, and because of that
+ * parts of a requested range is already present in the requested device memory,
+ * the implementation has a variety of options. It can fail and it can choose
+ * to populate only the part of the range that isn't already in device memory,
+ * and it can evict the range to system before trying to migrate. Ideally an
+ * implementation would just try to migrate the missing part of the range and
+ * allocate just enough memory to do so.
+ *
+ * When migrating to system memory as a response to a cpu fault or a device
+ * memory eviction request, currently a full device memory allocation is
+ * migrated back to system. Moving forward this might need improvement for
+ * situations where a single page needs bouncing between system memory and
+ * device memory due to, for example, atomic operations.
+ *
+ * Key DRM pagemap components:
+ *
+ * - Device Memory Allocations:
+ * Embedded structure containing enough information for the drm_pagemap to
+ * migrate to / from device memory.
+ *
+ * - Device Memory Operations:
+ * Define the interface for driver-specific device memory operations
+ * release memory, populate pfns, and copy to / from device memory.
+ */
+
+/**
+ * struct drm_pagemap_zdd - GPU SVM zone device data
+ *
+ * @refcount: Reference count for the zdd
+ * @devmem_allocation: device memory allocation
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This structure serves as a generic wrapper installed in
+ * page->zone_device_data. It provides infrastructure for looking up a device
+ * memory allocation upon CPU page fault and asynchronously releasing device
+ * memory once the CPU has no page references. Asynchronous release is useful
+ * because CPU page references can be dropped in IRQ contexts, while releasing
+ * device memory likely requires sleeping locks.
+ */
+struct drm_pagemap_zdd {
+ struct kref refcount;
+ struct drm_pagemap_devmem *devmem_allocation;
+ void *device_private_page_owner;
+};
+
+/**
+ * drm_pagemap_zdd_alloc() - Allocate a zdd structure.
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This function allocates and initializes a new zdd structure. It sets up the
+ * reference count and initializes the destroy work.
+ *
+ * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
+ */
+static struct drm_pagemap_zdd *
+drm_pagemap_zdd_alloc(void *device_private_page_owner)
+{
+ struct drm_pagemap_zdd *zdd;
+
+ zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
+ if (!zdd)
+ return NULL;
+
+ kref_init(&zdd->refcount);
+ zdd->devmem_allocation = NULL;
+ zdd->device_private_page_owner = device_private_page_owner;
+
+ return zdd;
+}
+
+/**
+ * drm_pagemap_zdd_get() - Get a reference to a zdd structure.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function increments the reference count of the provided zdd structure.
+ *
+ * Return: Pointer to the zdd structure.
+ */
+static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd)
+{
+ kref_get(&zdd->refcount);
+ return zdd;
+}
+
+/**
+ * drm_pagemap_zdd_destroy() - Destroy a zdd structure.
+ * @ref: Pointer to the reference count structure.
+ *
+ * This function queues the destroy_work of the zdd for asynchronous destruction.
+ */
+static void drm_pagemap_zdd_destroy(struct kref *ref)
+{
+ struct drm_pagemap_zdd *zdd =
+ container_of(ref, struct drm_pagemap_zdd, refcount);
+ struct drm_pagemap_devmem *devmem = zdd->devmem_allocation;
+
+ if (devmem) {
+ complete_all(&devmem->detached);
+ if (devmem->ops->devmem_release)
+ devmem->ops->devmem_release(devmem);
+ }
+ kfree(zdd);
+}
+
+/**
+ * drm_pagemap_zdd_put() - Put a zdd reference.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function decrements the reference count of the provided zdd structure
+ * and schedules its destruction if the count drops to zero.
+ */
+static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd)
+{
+ kref_put(&zdd->refcount, drm_pagemap_zdd_destroy);
+}
+
+/**
+ * drm_pagemap_migration_unlock_put_page() - Put a migration page
+ * @page: Pointer to the page to put
+ *
+ * This function unlocks and puts a page.
+ */
+static void drm_pagemap_migration_unlock_put_page(struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+/**
+ * drm_pagemap_migration_unlock_put_pages() - Put migration pages
+ * @npages: Number of pages
+ * @migrate_pfn: Array of migrate page frame numbers
+ *
+ * This function unlocks and puts an array of pages.
+ */
+static void drm_pagemap_migration_unlock_put_pages(unsigned long npages,
+ unsigned long *migrate_pfn)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!migrate_pfn[i])
+ continue;
+
+ page = migrate_pfn_to_page(migrate_pfn[i]);
+ drm_pagemap_migration_unlock_put_page(page);
+ migrate_pfn[i] = 0;
+ }
+}
+
+/**
+ * drm_pagemap_get_devmem_page() - Get a reference to a device memory page
+ * @page: Pointer to the page
+ * @zdd: Pointer to the GPU SVM zone device data
+ *
+ * This function associates the given page with the specified GPU SVM zone
+ * device data and initializes it for zone device usage.
+ */
+static void drm_pagemap_get_devmem_page(struct page *page,
+ struct drm_pagemap_zdd *zdd)
+{
+ page->zone_device_data = drm_pagemap_zdd_get(zdd);
+ zone_device_page_init(page, 0);
+}
+
+/**
+ * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
+ * @dev: The device for which the pages are being mapped
+ * @pagemap_addr: Array to store DMA information corresponding to mapped pages
+ * @migrate_pfn: Array of migrate page frame numbers to map
+ * @npages: Number of pages to map
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function maps pages of memory for migration usage in GPU SVM. It
+ * iterates over each page frame number provided in @migrate_pfn, maps the
+ * corresponding page, and stores the DMA address in the provided @dma_addr
+ * array.
+ *
+ * Returns: 0 on success, -EFAULT if an error occurs during mapping.
+ */
+static int drm_pagemap_migrate_map_pages(struct device *dev,
+ struct drm_pagemap_addr *pagemap_addr,
+ unsigned long *migrate_pfn,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages;) {
+ struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+ dma_addr_t dma_addr;
+ struct folio *folio;
+ unsigned int order = 0;
+
+ if (!page)
+ goto next;
+
+ if (WARN_ON_ONCE(is_zone_device_page(page)))
+ return -EFAULT;
+
+ folio = page_folio(page);
+ order = folio_order(folio);
+
+ dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
+ if (dma_mapping_error(dev, dma_addr))
+ return -EFAULT;
+
+ pagemap_addr[i] =
+ drm_pagemap_addr_encode(dma_addr,
+ DRM_INTERCONNECT_SYSTEM,
+ order, dir);
+
+next:
+ i += NR_PAGES(order);
+ }
+
+ return 0;
+}
+
+/**
+ * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
+ * @dev: The device for which the pages were mapped
+ * @pagemap_addr: Array of DMA information corresponding to mapped pages
+ * @npages: Number of pages to unmap
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function unmaps previously mapped pages of memory for GPU Shared Virtual
+ * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
+ * if it's valid and not already unmapped, and unmaps the corresponding page.
+ */
+static void drm_pagemap_migrate_unmap_pages(struct device *dev,
+ struct drm_pagemap_addr *pagemap_addr,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages;) {
+ if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
+ goto next;
+
+ dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
+
+next:
+ i += NR_PAGES(pagemap_addr[i].order);
+ }
+}
+
+static unsigned long
+npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/**
+ * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
+ * @devmem_allocation: The device memory allocation to migrate to.
+ * The caller should hold a reference to the device memory allocation,
+ * and the reference is consumed by this function unless it returns with
+ * an error.
+ * @mm: Pointer to the struct mm_struct.
+ * @start: Start of the virtual address range to migrate.
+ * @end: End of the virtual address range to migrate.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ * @pgmap_owner: Not used currently, since only system memory is considered.
+ *
+ * This function migrates the specified virtual address range to device memory.
+ * It performs the necessary setup and invokes the driver-specific operations for
+ * migration to device memory. Expected to be called while holding the mmap lock in
+ * at least read mode.
+ *
+ * Note: The @timeslice_ms parameter can typically be used to force data to
+ * remain in pagemap pages long enough for a GPU to perform a task and to prevent
+ * a migration livelock. One alternative would be for the GPU driver to block
+ * in a mmu_notifier for the specified amount of time, but adding the
+ * functionality to the pagemap is likely nicer to the system as a whole.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned long timeslice_ms,
+ void *pgmap_owner)
+{
+ const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+ struct migrate_vma migrate = {
+ .start = start,
+ .end = end,
+ .pgmap_owner = pgmap_owner,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ unsigned long i, npages = npages_in_range(start, end);
+ struct vm_area_struct *vas;
+ struct drm_pagemap_zdd *zdd = NULL;
+ struct page **pages;
+ struct drm_pagemap_addr *pagemap_addr;
+ void *buf;
+ int err;
+
+ mmap_assert_locked(mm);
+
+ if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
+ !ops->copy_to_ram)
+ return -EOPNOTSUPP;
+
+ vas = vma_lookup(mm, start);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ if (end > vas->vm_end || start < vas->vm_start) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!vma_is_anonymous(vas)) {
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
+
+ zdd = drm_pagemap_zdd_alloc(pgmap_owner);
+ if (!zdd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ if (!migrate.cpages) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (migrate.cpages != npages) {
+ err = -EBUSY;
+ goto err_finalize;
+ }
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
+ if (err)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
+ migrate.src, npages, DMA_TO_DEVICE);
+
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = pfn_to_page(migrate.dst[i]);
+
+ pages[i] = page;
+ migrate.dst[i] = migrate_pfn(migrate.dst[i]);
+ drm_pagemap_get_devmem_page(page, zdd);
+ }
+
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages);
+ if (err)
+ goto err_finalize;
+
+ /* Upon success bind devmem allocation to range and zdd */
+ devmem_allocation->timeslice_expiration = get_jiffies_64() +
+ msecs_to_jiffies(timeslice_ms);
+ zdd->devmem_allocation = devmem_allocation; /* Owns ref */
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
+ DMA_TO_DEVICE);
+err_free:
+ if (zdd)
+ drm_pagemap_zdd_put(zdd);
+ kvfree(buf);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
+
+/**
+ * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
+ * @vas: Pointer to the VM area structure, can be NULL
+ * @fault_page: Fault page
+ * @npages: Number of pages to populate
+ * @mpages: Number of pages to migrate
+ * @src_mpfn: Source array of migrate PFNs
+ * @mpfn: Array of migrate PFNs to populate
+ * @addr: Start address for PFN allocation
+ *
+ * This function populates the RAM migrate page frame numbers (PFNs) for the
+ * specified VM area structure. It allocates and locks pages in the VM area for
+ * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
+ * alloc_page for allocation.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
+ struct page *fault_page,
+ unsigned long npages,
+ unsigned long *mpages,
+ unsigned long *src_mpfn,
+ unsigned long *mpfn,
+ unsigned long addr)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages;) {
+ struct page *page = NULL, *src_page;
+ struct folio *folio;
+ unsigned int order = 0;
+
+ if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
+ goto next;
+
+ src_page = migrate_pfn_to_page(src_mpfn[i]);
+ if (!src_page)
+ goto next;
+
+ if (fault_page) {
+ if (src_page->zone_device_data !=
+ fault_page->zone_device_data)
+ goto next;
+ }
+
+ order = folio_order(page_folio(src_page));
+
+ /* TODO: Support fallback to single pages if THP allocation fails */
+ if (vas)
+ folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr);
+ else
+ folio = folio_alloc(GFP_HIGHUSER, order);
+
+ if (!folio)
+ goto free_pages;
+
+ page = folio_page(folio, 0);
+ mpfn[i] = migrate_pfn(page_to_pfn(page));
+
+next:
+ if (page)
+ addr += page_size(page);
+ else
+ addr += PAGE_SIZE;
+
+ i += NR_PAGES(order);
+ }
+
+ for (i = 0; i < npages;) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
+
+ if (!page)
+ goto next_lock;
+
+ WARN_ON_ONCE(!folio_trylock(page_folio(page)));
+
+ order = folio_order(page_folio(page));
+ *mpages += NR_PAGES(order);
+
+next_lock:
+ i += NR_PAGES(order);
+ }
+
+ return 0;
+
+free_pages:
+ for (i = 0; i < npages;) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
+
+ if (!page)
+ goto next_put;
+
+ put_page(page);
+ mpfn[i] = 0;
+
+ order = folio_order(page_folio(page));
+
+next_put:
+ i += NR_PAGES(order);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM
+ * @devmem_allocation: Pointer to the device memory allocation
+ *
+ * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and
+ * migration done via migrate_device_* functions.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
+{
+ const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ unsigned long *src, *dst;
+ struct drm_pagemap_addr *pagemap_addr;
+ void *buf;
+ int i, err = 0;
+ unsigned int retry_count = 2;
+
+ npages = devmem_allocation->size >> PAGE_SHIFT;
+
+retry:
+ if (!mmget_not_zero(devmem_allocation->mm))
+ return -EFAULT;
+
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ src = buf;
+ dst = buf + (sizeof(*src) * npages);
+ pagemap_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
+ if (err)
+ goto err_free;
+
+ err = migrate_device_pfns(src, npages);
+ if (err)
+ goto err_free;
+
+ err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
+ src, dst, 0);
+ if (err || !mpages)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
+ dst, npages, DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(src[i]);
+
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, dst);
+ migrate_device_pages(src, dst, npages);
+ migrate_device_finalize(src, dst, npages);
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+ mmput_async(devmem_allocation->mm);
+
+ if (completion_done(&devmem_allocation->detached))
+ return 0;
+
+ if (retry_count--) {
+ cond_resched();
+ goto retry;
+ }
+
+ return err ?: -EBUSY;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
+
+/**
+ * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
+ * @vas: Pointer to the VM area structure
+ * @device_private_page_owner: Device private pages owner
+ * @page: Pointer to the page for fault handling (can be NULL)
+ * @fault_addr: Fault address
+ * @size: Size of migration
+ *
+ * This internal function performs the migration of the specified GPU SVM range
+ * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
+ * invokes the driver-specific operations for migration to RAM.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
+ void *device_private_page_owner,
+ struct page *page,
+ unsigned long fault_addr,
+ unsigned long size)
+{
+ struct migrate_vma migrate = {
+ .vma = vas,
+ .pgmap_owner = device_private_page_owner,
+ .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT,
+ .fault_page = page,
+ };
+ struct drm_pagemap_zdd *zdd;
+ const struct drm_pagemap_devmem_ops *ops;
+ struct device *dev = NULL;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ struct drm_pagemap_addr *pagemap_addr;
+ unsigned long start, end;
+ void *buf;
+ int i, err = 0;
+
+ if (page) {
+ zdd = page->zone_device_data;
+ if (time_before64(get_jiffies_64(),
+ zdd->devmem_allocation->timeslice_expiration))
+ return 0;
+ }
+
+ start = ALIGN_DOWN(fault_addr, size);
+ end = ALIGN(fault_addr + 1, size);
+
+ /* Corner where VMA area struct has been partially unmapped */
+ if (start < vas->vm_start)
+ start = vas->vm_start;
+ if (end > vas->vm_end)
+ end = vas->vm_end;
+
+ migrate.start = start;
+ migrate.end = end;
+ npages = npages_in_range(start, end);
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ /* Raced with another CPU fault, nothing to do */
+ if (!migrate.cpages)
+ goto err_free;
+
+ if (!page) {
+ for (i = 0; i < npages; ++i) {
+ if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ page = migrate_pfn_to_page(migrate.src[i]);
+ break;
+ }
+
+ if (!page)
+ goto err_finalize;
+ }
+ zdd = page->zone_device_data;
+ ops = zdd->devmem_allocation->ops;
+ dev = zdd->devmem_allocation->dev;
+
+ err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages,
+ migrate.src, migrate.dst,
+ start);
+ if (err)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(dev, pagemap_addr, migrate.dst, npages,
+ DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(migrate.src[i]);
+
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ if (dev)
+ drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+
+ return err;
+}
+
+/**
+ * drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio
+ * @folio: Pointer to the folio
+ *
+ * This function is a callback used to put the GPU SVM zone device data
+ * associated with a page when it is being released.
+ */
+static void drm_pagemap_folio_free(struct folio *folio)
+{
+ drm_pagemap_zdd_put(folio->page.zone_device_data);
+}
+
+/**
+ * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler)
+ * @vmf: Pointer to the fault information structure
+ *
+ * This function is a page fault handler used to migrate a virtual range
+ * to ram. The device memory allocation in which the device page is found is
+ * migrated in its entirety.
+ *
+ * Returns:
+ * VM_FAULT_SIGBUS on failure, 0 on success.
+ */
+static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
+{
+ struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data;
+ int err;
+
+ err = __drm_pagemap_migrate_to_ram(vmf->vma,
+ zdd->device_private_page_owner,
+ vmf->page, vmf->address,
+ zdd->devmem_allocation->size);
+
+ return err ? VM_FAULT_SIGBUS : 0;
+}
+
+static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
+ .folio_free = drm_pagemap_folio_free,
+ .migrate_to_ram = drm_pagemap_migrate_to_ram,
+};
+
+/**
+ * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations
+ *
+ * Returns:
+ * Pointer to the GPU SVM device page map operations structure.
+ */
+const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void)
+{
+ return &drm_pagemap_pagemap_ops;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
+
+/**
+ * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation
+ *
+ * @devmem_allocation: The struct drm_pagemap_devmem to initialize.
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap we're allocating from.
+ * @size: Size of device memory allocation
+ */
+void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size)
+{
+ init_completion(&devmem_allocation->detached);
+ devmem_allocation->dev = dev;
+ devmem_allocation->mm = mm;
+ devmem_allocation->ops = ops;
+ devmem_allocation->dpagemap = dpagemap;
+ devmem_allocation->size = size;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
+
+/**
+ * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page
+ * @page: The struct page.
+ *
+ * Return: A pointer to the struct drm_pagemap of a device private page that
+ * was populated from the struct drm_pagemap. If the page was *not* populated
+ * from a struct drm_pagemap, the result is undefined and the function call
+ * may result in dereferencing and invalid address.
+ */
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
+{
+ struct drm_pagemap_zdd *zdd = page->zone_device_data;
+
+ return zdd->devmem_allocation->dpagemap;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
+
+/**
+ * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages
+ * @dpagemap: Pointer to the drm_pagemap managing the device memory
+ * @start: Start of the virtual range to populate.
+ * @end: End of the virtual range to populate.
+ * @mm: Pointer to the virtual address space.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ *
+ * Attempt to populate a virtual range with device memory pages,
+ * clearing them or migrating data from the existing pages if necessary.
+ * The function is best effort only, and implementations may vary
+ * in how hard they try to satisfy the request.
+ *
+ * Return: %0 on success, negative error code on error. If the hardware
+ * device was removed / unbound the function will return %-ENODEV.
+ */
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms)
+{
+ int err;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+ mmap_read_lock(mm);
+ err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
+ timeslice_ms);
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return err;
+}
+EXPORT_SYMBOL(drm_pagemap_populate_mm);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 9940e96d35e3..d1e6598ea3bc 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -23,6 +23,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -50,7 +51,7 @@ static LIST_HEAD(panel_list);
* @dev: parent device of the panel
* @funcs: panel operations
* @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
- * the panel interface
+ * the panel interface (must NOT be DRM_MODE_CONNECTOR_Unknown)
*
* Initialize the panel structure for subsequent registration with
* drm_panel_add().
@@ -58,6 +59,9 @@ static LIST_HEAD(panel_list);
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs, int connector_type)
{
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+ DRM_WARN("%s: %s: a valid connector type is required!\n", __func__, dev_name(dev));
+
INIT_LIST_HEAD(&panel->list);
INIT_LIST_HEAD(&panel->followers);
mutex_init(&panel->follower_lock);
@@ -71,8 +75,9 @@ EXPORT_SYMBOL(drm_panel_init);
* drm_panel_add - add a panel to the global registry
* @panel: panel to add
*
- * Add a panel to the global registry so that it can be looked up by display
- * drivers.
+ * Add a panel to the global registry so that it can be looked
+ * up by display drivers. The panel to be added must have been
+ * allocated by devm_drm_panel_alloc().
*/
void drm_panel_add(struct drm_panel *panel)
{
@@ -102,21 +107,21 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* Calling this function will enable power and deassert any reset signals to
* the panel. After this has completed it is possible to communicate with any
- * integrated circuitry via a command bus.
- *
- * Return: 0 on success or a negative error code on failure.
+ * integrated circuitry via a command bus. This function cannot fail (as it is
+ * called from the pre_enable call chain). There will always be a call to
+ * drm_panel_disable() afterwards.
*/
-int drm_panel_prepare(struct drm_panel *panel)
+void drm_panel_prepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->prepared) {
dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -129,17 +134,17 @@ int drm_panel_prepare(struct drm_panel *panel)
panel->prepared = true;
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_prepared)
+ continue;
+
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_prepared, ret);
}
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_prepare);
@@ -151,16 +156,14 @@ EXPORT_SYMBOL(drm_panel_prepare);
* reset, turn off power supplies, ...). After this function has completed, it
* is usually no longer possible to communicate with the panel until another
* call to drm_panel_prepare().
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_unprepare(struct drm_panel *panel)
+void drm_panel_unprepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -173,12 +176,15 @@ int drm_panel_unprepare(struct drm_panel *panel)
*/
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_unpreparing)
+ continue;
+
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -192,11 +198,8 @@ int drm_panel_unprepare(struct drm_panel *panel)
}
panel->prepared = false;
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_unprepare);
@@ -206,26 +209,29 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*
* Calling this function will cause the panel display drivers to be turned on
* and the backlight to be enabled. Content will be visible on screen after
- * this call completes.
- *
- * Return: 0 on success or a negative error code on failure.
+ * this call completes. This function cannot fail (as it is called from the
+ * enable call chain). There will always be a call to drm_panel_disable()
+ * afterwards.
*/
-int drm_panel_enable(struct drm_panel *panel)
+void drm_panel_enable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->enabled) {
dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
- return 0;
+ return;
}
+ mutex_lock(&panel->follower_lock);
+
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return ret;
+ goto exit;
}
panel->enabled = true;
@@ -234,7 +240,18 @@ int drm_panel_enable(struct drm_panel *panel)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
- return 0;
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_enabled)
+ continue;
+
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -245,15 +262,14 @@ EXPORT_SYMBOL(drm_panel_enable);
* This will typically turn off the panel's backlight or disable the display
* drivers. For smart panels it should still be possible to communicate with
* the integrated circuitry via any command bus after this call.
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_disable(struct drm_panel *panel)
+void drm_panel_disable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -266,7 +282,19 @@ int drm_panel_disable(struct drm_panel *panel)
*/
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
- return 0;
+ return;
+ }
+
+ mutex_lock(&panel->follower_lock);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_disabling)
+ continue;
+
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
}
ret = backlight_disable(panel->backlight);
@@ -277,11 +305,12 @@ int drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return ret;
+ goto exit;
}
panel->enabled = false;
- return 0;
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -314,6 +343,93 @@ int drm_panel_get_modes(struct drm_panel *panel,
}
EXPORT_SYMBOL(drm_panel_get_modes);
+static void __drm_panel_free(struct kref *kref)
+{
+ struct drm_panel *panel = container_of(kref, struct drm_panel, refcount);
+
+ kfree(panel->container);
+}
+
+/**
+ * drm_panel_get - Acquire a panel reference
+ * @panel: DRM panel
+ *
+ * This function increments the panel's refcount.
+ * Returns:
+ * Pointer to @panel
+ */
+struct drm_panel *drm_panel_get(struct drm_panel *panel)
+{
+ if (!panel)
+ return panel;
+
+ kref_get(&panel->refcount);
+
+ return panel;
+}
+EXPORT_SYMBOL(drm_panel_get);
+
+/**
+ * drm_panel_put - Release a panel reference
+ * @panel: DRM panel
+ *
+ * This function decrements the panel's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_panel_put(struct drm_panel *panel)
+{
+ if (panel)
+ kref_put(&panel->refcount, __drm_panel_free);
+}
+EXPORT_SYMBOL(drm_panel_put);
+
+/**
+ * drm_panel_put_void - wrapper to drm_panel_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_panel, cast to a void pointer
+ *
+ * Wrapper of drm_panel_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_panel_put_void(void *data)
+{
+ struct drm_panel *panel = (struct drm_panel *)data;
+
+ drm_panel_put(panel);
+}
+
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type)
+{
+ void *container;
+ struct drm_panel *panel;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ panel = container + offset;
+ panel->container = container;
+ panel->funcs = funcs;
+ kref_init(&panel->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_panel_put_void, panel);
+ if (err)
+ return ERR_PTR(err);
+
+ drm_panel_init(panel, dev, funcs, connector_type);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_panel_alloc);
+
#ifdef CONFIG_OF
/**
* of_drm_find_panel - look up a panel using a device tree node
@@ -396,13 +512,51 @@ int of_drm_get_panel_orientation(const struct device_node *np,
EXPORT_SYMBOL(of_drm_get_panel_orientation);
#endif
+/* Find panel by fwnode. This should be identical to of_drm_find_panel(). */
+static struct drm_panel *find_panel_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct drm_panel *panel;
+
+ if (!fwnode_device_is_available(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&panel_lock);
+
+ list_for_each_entry(panel, &panel_list, list) {
+ if (dev_fwnode(panel->dev) == fwnode) {
+ mutex_unlock(&panel_lock);
+ return panel;
+ }
+ }
+
+ mutex_unlock(&panel_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+/* Find panel by follower device */
+static struct drm_panel *find_panel_by_dev(struct device *follower_dev)
+{
+ struct fwnode_handle *fwnode;
+ struct drm_panel *panel;
+
+ fwnode = fwnode_find_reference(dev_fwnode(follower_dev), "panel", 0);
+ if (IS_ERR(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ panel = find_panel_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return panel;
+}
+
/**
* drm_is_panel_follower() - Check if the device is a panel follower
* @dev: The 'struct device' to check
*
* This checks to see if a device needs to be power sequenced together with
* a panel using the panel follower API.
- * At the moment panels can only be followed on device tree enabled systems.
+ *
* The "panel" property of the follower points to the panel to be followed.
*
* Return: true if we should be power sequenced with a panel; false otherwise.
@@ -414,7 +568,7 @@ bool drm_is_panel_follower(struct device *dev)
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
- return of_property_present(dev->of_node, "panel");
+ return device_property_present(dev, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
@@ -423,15 +577,14 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* @follower_dev: The 'struct device' for the follower.
* @follower: The panel follower descriptor for the follower.
*
- * A panel follower is called right after preparing the panel and right before
- * unpreparing the panel. It's primary intention is to power on an associated
- * touchscreen, though it could be used for any similar devices. Multiple
- * devices are allowed the follow the same panel.
+ * A panel follower is called right after preparing/enabling the panel and right
+ * before unpreparing/disabling the panel. It's primary intention is to power on
+ * an associated touchscreen, though it could be used for any similar devices.
+ * Multiple devices are allowed the follow the same panel.
*
- * If a follower is added to a panel that's already been turned on, the
- * follower's prepare callback is called right away.
+ * If a follower is added to a panel that's already been prepared/enabled, the
+ * follower's prepared/enabled callback is called right away.
*
- * At the moment panels can only be followed on device tree enabled systems.
* The "panel" property of the follower points to the panel to be followed.
*
* Return: 0 or an error code. Note that -ENODEV means that we detected that
@@ -441,16 +594,10 @@ EXPORT_SYMBOL(drm_is_panel_follower);
int drm_panel_add_follower(struct device *follower_dev,
struct drm_panel_follower *follower)
{
- struct device_node *panel_np;
struct drm_panel *panel;
int ret;
- panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0);
- if (!panel_np)
- return -ENODEV;
-
- panel = of_drm_find_panel(panel_np);
- of_node_put(panel_np);
+ panel = find_panel_by_dev(follower_dev);
if (IS_ERR(panel))
return PTR_ERR(panel);
@@ -460,12 +607,18 @@ int drm_panel_add_follower(struct device *follower_dev,
mutex_lock(&panel->follower_lock);
list_add_tail(&follower->list, &panel->followers);
- if (panel->prepared) {
+ if (panel->prepared && follower->funcs->panel_prepared) {
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_prepared, ret);
}
+ if (panel->enabled && follower->funcs->panel_enabled) {
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
mutex_unlock(&panel->follower_lock);
@@ -478,7 +631,8 @@ EXPORT_SYMBOL(drm_panel_add_follower);
* @follower: The panel follower descriptor for the follower.
*
* Undo drm_panel_add_follower(). This includes calling the follower's
- * unprepare function if we're removed from a panel that's currently prepared.
+ * unpreparing/disabling function if we're removed from a panel that's currently
+ * prepared/enabled.
*
* Return: 0 or an error code.
*/
@@ -489,7 +643,13 @@ void drm_panel_remove_follower(struct drm_panel_follower *follower)
mutex_lock(&panel->follower_lock);
- if (panel->prepared) {
+ if (panel->enabled && follower->funcs->panel_disabling) {
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+ if (panel->prepared && follower->funcs->panel_unpreparing) {
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
index c477d98ade2b..537dc6dd0534 100644
--- a/drivers/gpu/drm/drm_panel_backlight_quirks.c
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -2,28 +2,32 @@
#include <linux/array_size.h>
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <drm/drm_edid.h>
#include <drm/drm_utils.h>
-struct drm_panel_min_backlight_quirk {
- struct {
- enum dmi_field field;
- const char * const value;
- } dmi_match;
+struct drm_panel_match {
+ enum dmi_field field;
+ const char * const value;
+};
+
+struct drm_get_panel_backlight_quirk {
+ struct drm_panel_match dmi_match;
+ struct drm_panel_match dmi_match_other;
struct drm_edid_ident ident;
- u8 min_brightness;
+ struct drm_panel_backlight_quirk quirk;
};
-static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
+static const struct drm_get_panel_backlight_quirk drm_panel_min_backlight_quirks[] = {
/* 13 inch matte panel */
{
.dmi_match.field = DMI_BOARD_VENDOR,
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
.ident.name = "NE135FBM-N41",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
},
/* 13 inch glossy panel */
{
@@ -31,7 +35,7 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
.ident.name = "NE135FBM-N41",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
},
/* 13 inch 2.8k panel */
{
@@ -39,56 +43,114 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
.ident.name = "NE135A1M-NY1",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
+ },
+ /* Steam Deck models */
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "Valve",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "Jupiter",
+ .quirk = { .min_brightness = 1, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "Valve",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "Galileo",
+ .quirk = { .min_brightness = 1, },
+ },
+ /* Have OLED Panels with brightness issue when last byte is 0/1 */
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "AYANEO",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "AYANEO 3",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ZOTAC",
+ .dmi_match_other.field = DMI_BOARD_NAME,
+ .dmi_match_other.value = "G0A1W",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ZOTAC",
+ .dmi_match_other.field = DMI_BOARD_NAME,
+ .dmi_match_other.value = "G1A1W",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ONE-NETBOOK",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "ONEXPLAYER F1Pro",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ONE-NETBOOK",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "ONEXPLAYER F1 EVA-02",
+ .quirk = { .brightness_mask = 3, },
},
};
-static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
- const struct drm_edid *edid)
+static bool drm_panel_min_backlight_quirk_matches(
+ const struct drm_get_panel_backlight_quirk *quirk,
+ const struct drm_edid *edid)
{
- if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ if (quirk->dmi_match.field &&
+ !dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ return false;
+
+ if (quirk->dmi_match_other.field &&
+ !dmi_match(quirk->dmi_match_other.field,
+ quirk->dmi_match_other.value))
return false;
- if (!drm_edid_match(edid, &quirk->ident))
+ if (quirk->ident.panel_id && !drm_edid_match(edid, &quirk->ident))
return false;
return true;
}
/**
- * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
+ * drm_get_panel_backlight_quirk - Get backlight quirks for a panel
* @edid: EDID of the panel to check
*
* This function checks for platform specific (e.g. DMI based) quirks
* providing info on the minimum backlight brightness for systems where this
- * cannot be probed correctly from the hard-/firm-ware.
+ * cannot be probed correctly from the hard-/firm-ware and other sources.
*
* Returns:
- * A negative error value or
- * an override value in the range [0, 255] representing 0-100% to be scaled to
- * the drivers target range.
+ * a drm_panel_backlight_quirk struct if a quirk was found, otherwise an
+ * error pointer.
*/
-int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
+const struct drm_panel_backlight_quirk *
+drm_get_panel_backlight_quirk(const struct drm_edid *edid)
{
- const struct drm_panel_min_backlight_quirk *quirk;
+ const struct drm_get_panel_backlight_quirk *quirk;
size_t i;
if (!IS_ENABLED(CONFIG_DMI))
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
if (!edid)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
quirk = &drm_panel_min_backlight_quirks[i];
if (drm_panel_min_backlight_quirk_matches(quirk, edid))
- return quirk->min_brightness;
+ return &quirk->quirk;
}
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
}
-EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
+EXPORT_SYMBOL(drm_get_panel_backlight_quirk);
MODULE_DESCRIPTION("Quirks for panel backlight overrides");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 4a73821b81f6..3a218fb592ce 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -9,6 +9,7 @@
*/
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_connector.h>
#include <drm/drm_utils.h>
@@ -93,6 +94,12 @@ static const struct drm_dmi_panel_orientation_data onegx1_pro = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd640x960_leftside_up = {
+ .width = 640,
+ .height = 960,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720,
.height = 1280,
@@ -123,6 +130,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1200x1920_leftside_up = {
+ .width = 1200,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -184,10 +197,10 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
- }, { /* AYA NEO AYANEO 2 */
+ }, { /* AYA NEO AYANEO 2/2S */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* AYA NEO 2021 */
@@ -202,6 +215,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* AYA NEO Flip DS Bottom Screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FLIP DS"),
+ },
+ .driver_data = (void *)&lcd640x960_leftside_up,
+ }, { /* AYA NEO Flip KB/DS Top Screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FLIP"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO Founder */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
@@ -226,6 +251,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "KUN"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
+ }, { /* AYA NEO SLIDE */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SLIDE"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYN Loki Max */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
@@ -315,6 +346,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win2,
+ }, { /* GPD Win 2 (correct DMI strings) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WIN2")
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* GPD Win 3 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
@@ -443,6 +480,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
+ }, { /* OneXPlayer Mini (Intel) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)&lcd1200x1920_leftside_up,
}, { /* OrangePi Neo */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
@@ -475,6 +518,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* ZOTAC Gaming Zone */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ZOTAC"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "G0A1W"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* One Mix 2S (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index f128d345b16d..d4b6ea42db0f 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -6,7 +6,9 @@
* Tux Ascii art taken from cowsay written by Tony Monroe
*/
+#include <linux/export.h>
#include <linux/font.h>
+#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/iosys-map.h>
#include <linux/kdebug.h>
@@ -154,6 +156,124 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
+static void drm_panic_write_pixel16(void *vaddr, unsigned int offset, u16 color)
+{
+ u16 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
+{
+ u8 *p = vaddr + offset;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p = color & 0xff;
+}
+
+/*
+ * Special case if the pixel crosses page boundaries
+ */
+static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
+ unsigned int offset, u32 color)
+{
+ u8 *vaddr2;
+ u8 *p = vaddr + offset;
+
+ vaddr2 = kmap_local_page_try_from_panic(next_page);
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 1)
+ p = vaddr2;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 2)
+ p = vaddr2;
+
+ *p = color & 0xff;
+ kunmap_local(vaddr2);
+}
+
+static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
+{
+ u32 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel(void *vaddr, unsigned int offset, u32 color, unsigned int cpp)
+{
+ switch (cpp) {
+ case 2:
+ drm_panic_write_pixel16(vaddr, offset, color);
+ break;
+ case 3:
+ drm_panic_write_pixel24(vaddr, offset, color);
+ break;
+ case 4:
+ drm_panic_write_pixel32(vaddr, offset, color);
+ break;
+ default:
+ pr_debug_once("Can't blit with pixel width %d\n", cpp);
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, const u8 *sbuf8,
+ unsigned int spitch, struct drm_rect *clip,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ unsigned int height = drm_rect_height(clip);
+ unsigned int width = drm_rect_width(clip);
+ void *vaddr = NULL;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = (y + clip->y1) * dpitch + (x + clip->x1) * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (!pages[new_page])
+ continue;
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, fg32);
+ else
+ drm_panic_write_pixel(vaddr, offset, fg32, cpp);
+ }
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_blit - convert a monochrome image to a linear framebuffer
* @sb: destination scanout buffer
@@ -177,6 +297,10 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color);
+ if (sb->pages)
+ return drm_panic_blit_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ sbuf8, spitch, clip, scale, fg_color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -209,6 +333,43 @@ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, color);
}
+static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, struct drm_rect *clip,
+ u32 color)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ void *vaddr = NULL;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ for (x = clip->x1; x < clip->x2; x++) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = y * dpitch + x * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, color);
+ else
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_fill - Fill a rectangle with a color
* @sb: destination scanout buffer
@@ -225,6 +386,10 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_fill_pixel(sb, clip, color);
+ if (sb->pages)
+ return drm_panic_fill_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ clip, color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -306,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
const struct font_desc *font, u32 fg_color)
{
+ if (rect->x2 > sb->width || rect->y2 > sb->height)
+ return;
+
if (logo_mono)
drm_panic_blit(sb, rect, logo_mono->data,
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
@@ -354,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
struct drm_panic_line *line, int yoffset, u32 fg_color)
{
int chars_per_row = sb->width / font->width;
- struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height);
+ struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);
struct drm_panic_line line_wrap;
if (line->len > chars_per_row) {
@@ -397,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
struct drm_panic_line line;
int yoffset;
- if (!font)
+ if (!font || font->width > sb->width)
return;
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
@@ -486,11 +654,6 @@ static void drm_panic_qr_exit(void)
stream.workspace = NULL;
}
-extern size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
-
-extern u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
- u8 *tmp, size_t tmp_size);
-
static int drm_panic_get_qr_code_url(u8 **qr_image)
{
struct kmsg_dump_iter iter;
@@ -499,7 +662,7 @@ static int drm_panic_get_qr_code_url(u8 **qr_image)
char *kmsg;
int max_qr_data_size, url_len;
- url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&zl=",
+ url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&z=",
utsname()->machine, utsname()->release);
max_qr_data_size = drm_panic_qr_max_data_size(panic_qr_version, url_len);
@@ -615,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
pr_debug("QR width %d and scale %d\n", qr_width, scale);
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
- v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
+ v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg);
+ if (v_margin < 0)
+ return -ENOSPC;
+ v_margin /= 5;
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
@@ -628,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
- if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
+ if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
@@ -714,16 +880,24 @@ static void draw_panic_plane(struct drm_plane *plane, const char *description)
if (!drm_panic_trylock(plane->dev, flags))
return;
+ ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+
+ if (ret || !drm_panic_is_format_supported(sb.format))
+ goto unlock;
+
+ /* One of these should be set, or it can't draw pixels */
+ if (!sb.set_pixel && !sb.pages && iosys_map_is_null(&sb.map[0]))
+ goto unlock;
+
drm_panic_set_description(description);
- ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+ draw_panic_dispatch(&sb);
+ if (plane->helper_private->panic_flush)
+ plane->helper_private->panic_flush(plane);
- if (!ret && drm_panic_is_format_supported(sb.format)) {
- draw_panic_dispatch(&sb);
- if (plane->helper_private->panic_flush)
- plane->helper_private->panic_flush(plane);
- }
drm_panic_clear_description();
+
+unlock:
drm_panic_unlock(plane->dev, flags);
}
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index ef2d490965ba..ac27e86c601c 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -5,7 +5,7 @@
//! It is called from a panic handler, so it should't allocate memory and
//! does all the work on the stack or on the provided buffers. For
//! simplification, it only supports low error correction, and applies the
-//! first mask (checkerboard). It will draw the smallest QRcode that can
+//! first mask (checkerboard). It will draw the smallest QR code that can
//! contain the string passed as parameter. To get the most compact
//! QR code, the start of the URL is encoded as binary, and the
//! compressed kmsg is encoded as numeric.
@@ -13,12 +13,13 @@
//! The binary data must be a valid URL parameter, so the easiest way is
//! to use base64 encoding. But this wastes 25% of data space, so the
//! whole stack trace won't fit in the QR code. So instead it encodes
-//! every 13bits of input into 4 decimal digits, and then uses the
+//! every 7 bytes of input into 17 decimal digits, and then uses the
//! efficient numeric encoding, that encode 3 decimal digits into
-//! 10bits. This makes 39bits of compressed data into 12 decimal digits,
-//! into 40bits in the QR code, so wasting only 2.5%. And the numbers are
+//! 10bits. This makes 168bits of compressed data into 51 decimal digits,
+//! into 170bits in the QR code, so wasting only 1.17%. And the numbers are
//! valid URL parameter, so the website can do the reverse, to get the
-//! binary data.
+//! binary data. This is the same algorithm used by Fido v2.2 QR-initiated
+//! authentication specification.
//!
//! Inspired by these 3 projects, all under MIT license:
//!
@@ -26,8 +27,7 @@
//! * <https://github.com/erwanvivien/fast_qr>
//! * <https://github.com/bjguillot/qr>
-use core::cmp;
-use kernel::str::CStr;
+use kernel::prelude::*;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
struct Version(usize);
@@ -296,35 +296,11 @@ const MODE_BINARY: u16 = 4;
/// Padding bytes.
const PADDING: [u8; 2] = [236, 17];
-/// Get the next 13 bits of data, starting at specified offset (in bits).
-fn get_next_13b(data: &[u8], offset: usize) -> Option<(u16, usize)> {
- if offset < data.len() * 8 {
- let size = cmp::min(13, data.len() * 8 - offset);
- let byte_off = offset / 8;
- let bit_off = offset % 8;
- // `b` is 20 at max (`bit_off` <= 7 and `size` <= 13).
- let b = (bit_off + size) as u16;
-
- let first_byte = (data[byte_off] << bit_off >> bit_off) as u16;
-
- let number = match b {
- 0..=8 => first_byte >> (8 - b),
- 9..=16 => (first_byte << (b - 8)) + (data[byte_off + 1] >> (16 - b)) as u16,
- _ => {
- (first_byte << (b - 8))
- + ((data[byte_off + 1] as u16) << (b - 16))
- + (data[byte_off + 2] >> (24 - b)) as u16
- }
- };
- Some((number, size))
- } else {
- None
- }
-}
-
/// Number of bits to encode characters in numeric mode.
const NUM_CHARS_BITS: [usize; 4] = [0, 4, 7, 10];
-const POW10: [u16; 4] = [1, 10, 100, 1000];
+/// Number of decimal digits required to encode n bytes of binary data.
+/// eg: you need 15 decimal digits to fit 6 bytes of binary data.
+const BYTES_TO_DIGITS: [usize; 8] = [0, 3, 5, 8, 10, 13, 15, 17];
enum Segment<'a> {
Numeric(&'a [u8]),
@@ -339,7 +315,7 @@ impl Segment<'_> {
}
}
- // Returns the size of the length field in bits, depending on QR Version.
+ /// Returns the size of the length field in bits, depending on QR Version.
fn length_bits_count(&self, version: Version) -> usize {
let Version(v) = version;
match self {
@@ -355,18 +331,14 @@ impl Segment<'_> {
}
}
- // Number of characters in the segment.
+ /// Number of characters in the segment.
fn character_count(&self) -> usize {
match self {
Segment::Binary(data) => data.len(),
Segment::Numeric(data) => {
- let data_bits = data.len() * 8;
- let last_chars = match data_bits % 13 {
- 1 => 1,
- k => (k + 1) / 3,
- };
- // 4 decimal numbers per 13bits + remainder.
- 4 * (data_bits / 13) + last_chars
+ let last_chars = BYTES_TO_DIGITS[data.len() % 7];
+ // 17 decimal numbers per 7bytes + remainder.
+ 17 * (data.len() / 7) + last_chars
}
}
}
@@ -394,8 +366,68 @@ impl Segment<'_> {
SegmentIterator {
segment: self,
offset: 0,
- carry: 0,
- carry_len: 0,
+ decfifo: Default::default(),
+ }
+ }
+}
+
+/// Max fifo size is 17 (max push) + 2 (max remaining)
+const MAX_FIFO_SIZE: usize = 19;
+
+/// A simple Decimal digit FIFO
+#[derive(Default)]
+struct DecFifo {
+ decimals: [u8; MAX_FIFO_SIZE],
+ len: usize,
+}
+
+// On arm32 architecture, dividing an `u64` by a constant will generate a call
+// to `__aeabi_uldivmod` which is not present in the kernel.
+// So use the multiply by inverse method for this architecture.
+fn div10(val: u64) -> u64 {
+ if cfg!(target_arch = "arm") {
+ let val_h = val >> 32;
+ let val_l = val & 0xFFFFFFFF;
+ let b_h: u64 = 0x66666666;
+ let b_l: u64 = 0x66666667;
+
+ let tmp1 = val_h * b_l + ((val_l * b_l) >> 32);
+ let tmp2 = val_l * b_h + (tmp1 & 0xffffffff);
+ let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32);
+
+ tmp3 >> 2
+ } else {
+ val / 10
+ }
+}
+
+impl DecFifo {
+ fn push(&mut self, data: u64, len: usize) {
+ let mut chunk = data;
+ for i in (0..self.len).rev() {
+ self.decimals[i + len] = self.decimals[i];
+ }
+ for i in 0..len {
+ self.decimals[i] = (chunk % 10) as u8;
+ chunk = div10(chunk);
+ }
+ self.len += len;
+ }
+
+ /// Pop 3 decimal digits from the FIFO
+ fn pop3(&mut self) -> Option<(u16, usize)> {
+ if self.len == 0 {
+ None
+ } else {
+ let poplen = 3.min(self.len);
+ self.len -= poplen;
+ let mut out = 0;
+ let mut exp = 1;
+ for i in 0..poplen {
+ out += u16::from(self.decimals[self.len + i]) * exp;
+ exp *= 10;
+ }
+ Some((out, NUM_CHARS_BITS[poplen]))
}
}
}
@@ -403,8 +435,7 @@ impl Segment<'_> {
struct SegmentIterator<'a> {
segment: &'a Segment<'a>,
offset: usize,
- carry: u16,
- carry_len: usize,
+ decfifo: DecFifo,
}
impl Iterator for SegmentIterator<'_> {
@@ -414,7 +445,7 @@ impl Iterator for SegmentIterator<'_> {
match self.segment {
Segment::Binary(data) => {
if self.offset < data.len() {
- let byte = data[self.offset] as u16;
+ let byte = u16::from(data[self.offset]);
self.offset += 1;
Some((byte, 8))
} else {
@@ -422,41 +453,17 @@ impl Iterator for SegmentIterator<'_> {
}
}
Segment::Numeric(data) => {
- if self.carry_len == 3 {
- let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
- self.carry_len = 0;
- self.carry = 0;
- Some(out)
- } else if let Some((bits, size)) = get_next_13b(data, self.offset) {
- self.offset += size;
- let new_chars = match size {
- 1 => 1,
- k => (k + 1) / 3,
- };
- if self.carry_len + new_chars > 3 {
- self.carry_len = new_chars + self.carry_len - 3;
- let out = (
- self.carry * POW10[new_chars - self.carry_len]
- + bits / POW10[self.carry_len],
- NUM_CHARS_BITS[3],
- );
- self.carry = bits % POW10[self.carry_len];
- Some(out)
- } else {
- let out = (
- self.carry * POW10[new_chars] + bits,
- NUM_CHARS_BITS[self.carry_len + new_chars],
- );
- self.carry_len = 0;
- Some(out)
- }
- } else if self.carry_len > 0 {
- let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
- self.carry_len = 0;
- Some(out)
- } else {
- None
+ if self.decfifo.len < 3 && self.offset < data.len() {
+ // If there are less than 3 decimal digits in the fifo,
+ // take the next 7 bytes of input, and push them to the fifo.
+ let mut buf = [0u8; 8];
+ let len = 7.min(data.len() - self.offset);
+ buf[..len].copy_from_slice(&data[self.offset..self.offset + len]);
+ let chunk = u64::from_le_bytes(buf);
+ self.decfifo.push(chunk, BYTES_TO_DIGITS[len]);
+ self.offset += len;
}
+ self.decfifo.pop3()
}
}
}
@@ -545,7 +552,7 @@ impl EncodedMsg<'_> {
}
self.push(&mut offset, (MODE_STOP, 4));
- let pad_offset = (offset + 7) / 8;
+ let pad_offset = offset.div_ceil(8);
for i in pad_offset..self.version.max_data() {
self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
}
@@ -607,8 +614,8 @@ struct EncodedMsgIterator<'a> {
impl Iterator for EncodedMsgIterator<'_> {
type Item = u8;
- // Send the bytes in interleaved mode, first byte of first block of group1,
- // then first byte of second block of group1, ...
+ /// Send the bytes in interleaved mode, first byte of first block of group1,
+ /// then first byte of second block of group1, ...
fn next(&mut self) -> Option<Self::Item> {
let em = self.em;
let blocks = em.g1_blocks + em.g2_blocks;
@@ -659,7 +666,7 @@ struct QrImage<'a> {
impl QrImage<'_> {
fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
let width = em.version.width();
- let stride = (width + 7) / 8;
+ let stride = width.div_ceil(8);
let data = qrdata;
let mut qr_image = QrImage {
@@ -676,7 +683,7 @@ impl QrImage<'_> {
self.data.fill(0);
}
- // Set pixel to light color.
+ /// Set pixel to light color.
fn set(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
let mut v = self.data[off];
@@ -684,13 +691,13 @@ impl QrImage<'_> {
self.data[off] = v;
}
- // Invert a module color.
+ /// Invert a module color.
fn xor(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
self.data[off] ^= 0x80 >> (x % 8);
}
- // Draw a light square at (x, y) top left corner.
+ /// Draw a light square at (x, y) top left corner.
fn draw_square(&mut self, x: u8, y: u8, size: u8) {
for k in 0..size {
self.set(x + k, y);
@@ -822,7 +829,7 @@ impl QrImage<'_> {
vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6))
}
- // Returns true if the module is reserved (Not usable for data and EC).
+ /// Returns true if the module is reserved (Not usable for data and EC).
fn is_reserved(&self, x: u8, y: u8) -> bool {
self.is_alignment(x, y)
|| self.is_finder(x, y)
@@ -831,13 +838,14 @@ impl QrImage<'_> {
|| self.is_version_info(x, y)
}
- // Last module to draw, at bottom left corner.
+ /// Last module to draw, at bottom left corner.
fn is_last(&self, x: u8, y: u8) -> bool {
x == 0 && y == self.width - 1
}
- // Move to the next module according to QR code order.
- // From bottom right corner, to bottom left corner.
+ /// Move to the next module according to QR code order.
+ ///
+ /// From bottom right corner, to bottom left corner.
fn next(&self, x: u8, y: u8) -> (u8, u8) {
let x_adj = if x <= 6 { x + 1 } else { x };
let column_type = (self.width - x_adj) % 4;
@@ -850,7 +858,7 @@ impl QrImage<'_> {
}
}
- // Find next module that can hold data.
+ /// Find next module that can hold data.
fn next_available(&self, x: u8, y: u8) -> (u8, u8) {
let (mut x, mut y) = self.next(x, y);
while self.is_reserved(x, y) && !self.is_last(x, y) {
@@ -879,7 +887,7 @@ impl QrImage<'_> {
}
}
- // Apply checkerboard mask to all non-reserved modules.
+ /// Apply checkerboard mask to all non-reserved modules.
fn apply_mask(&mut self) {
for x in 0..self.width {
for y in 0..self.width {
@@ -890,7 +898,7 @@ impl QrImage<'_> {
}
}
- // Draw the QR code with the provided data iterator.
+ /// Draw the QR code with the provided data iterator.
fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
// First clear the table, as it may have already some data.
self.clear();
@@ -911,16 +919,16 @@ impl QrImage<'_> {
///
/// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
-/// will be encoded as binary segment, otherwise it will be encoded
-/// efficiently as a numeric segment, and appended to the URL.
+/// will be encoded as binary segment, otherwise it will be encoded
+/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than `data_size`.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
-/// a V40 QR code. It will then be overwritten with the QR code image.
+/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
-/// segments and ECC.
+/// segments and ECC.
/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
-/// long for V40.
+/// long for V40.
///
/// # Safety
///
@@ -929,9 +937,9 @@ impl QrImage<'_> {
/// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
///
/// They must remain valid for the duration of the function call.
-#[no_mangle]
+#[export]
pub unsafe extern "C" fn drm_panic_qr_generate(
- url: *const i8,
+ url: *const kernel::ffi::c_char,
data: *mut u8,
data_len: usize,
data_size: usize,
@@ -960,7 +968,7 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
// nul-terminated string.
let url_cstr: &CStr = unsafe { CStr::from_char_ptr(url) };
let segments = &[
- &Segment::Binary(url_cstr.as_bytes()),
+ &Segment::Binary(url_cstr.to_bytes()),
&Segment::Numeric(&data_slice[0..data_len]),
];
match EncodedMsg::new(segments, tmp_slice) {
@@ -980,8 +988,13 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
/// * If `url_len` > 0, remove the 2 segments header/length and also count the
/// conversion to numeric segments.
/// * If `url_len` = 0, only removes 3 bytes for 1 binary segment.
-#[no_mangle]
-pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
+///
+/// # Safety
+///
+/// Always safe to call.
+// Required to be unsafe due to the `#[export]` annotation.
+#[export]
+pub unsafe extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
#[expect(clippy::manual_range_contains)]
if version < 1 || version > 40 {
return 0;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index c585f1e8803e..cb0f68d7f8ea 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index a28b22fdd7a4..ce76c55913f7 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -141,6 +142,14 @@
* various bugs in this area with inconsistencies between the capability
* flag and per-plane properties.
*
+ * IN_FORMATS_ASYNC:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane for asynchronous flips. The blob is a struct
+ * drm_format_modifier_blob. Userspace cannot change this property. This is an
+ * optional property and if not present then user should expect a failure in
+ * atomic ioctl when the modifier/format is not supported by that plane under
+ * asynchronous flip.
+ *
* SIZE_HINTS:
* Blob property which contains the set of recommended plane size
* which can used for simple "cursor like" use cases (eg. no scaling).
@@ -185,9 +194,13 @@ modifiers_ptr(struct drm_format_modifier_blob *blob)
return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset);
}
-static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane)
+static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
+ struct drm_plane *plane,
+ bool (*format_mod_supported)
+ (struct drm_plane *plane,
+ u32 format,
+ u64 modifier))
{
- const struct drm_mode_config *config = &dev->mode_config;
struct drm_property_blob *blob;
struct drm_format_modifier *mod;
size_t blob_size, formats_size, modifiers_size;
@@ -197,7 +210,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
formats_size = sizeof(__u32) * plane->format_count;
if (WARN_ON(!formats_size)) {
/* 0 formats are never expected */
- return 0;
+ return ERR_PTR(-EINVAL);
}
modifiers_size =
@@ -213,7 +226,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
blob = drm_property_create_blob(dev, blob_size, NULL);
if (IS_ERR(blob))
- return -1;
+ return blob;
blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
@@ -229,10 +242,10 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod = modifiers_ptr(blob_data);
for (i = 0; i < plane->modifier_count; i++) {
for (j = 0; j < plane->format_count; j++) {
- if (!plane->funcs->format_mod_supported ||
- plane->funcs->format_mod_supported(plane,
- plane->format_types[j],
- plane->modifiers[i])) {
+ if (!format_mod_supported ||
+ format_mod_supported(plane,
+ plane->format_types[j],
+ plane->modifiers[i])) {
mod->formats |= 1ULL << j;
}
}
@@ -243,10 +256,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod++;
}
- drm_object_attach_property(&plane->base, config->modifiers_property,
- blob->base.id);
-
- return 0;
+ return blob;
}
/**
@@ -358,6 +368,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
static const uint64_t default_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
};
@@ -414,7 +425,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
plane->modifier_count = format_modifier_count;
plane->modifiers = kmalloc_array(format_modifier_count,
- sizeof(format_modifiers[0]),
+ sizeof(*plane->modifiers),
GFP_KERNEL);
if (format_modifier_count && !plane->modifiers) {
@@ -469,8 +480,24 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_plane_create_hotspot_properties(plane);
}
- if (format_modifier_count)
- create_in_format_blob(dev, plane);
+ if (format_modifier_count) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->modifiers_property,
+ blob->base.id);
+ }
+
+ if (plane->funcs->format_mod_supported_async) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported_async);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->async_modifiers_property,
+ blob->base.id);
+ }
+
return 0;
}
@@ -1793,3 +1820,62 @@ int drm_plane_add_size_hints_property(struct drm_plane *plane,
return 0;
}
EXPORT_SYMBOL(drm_plane_add_size_hints_property);
+
+/**
+ * drm_plane_create_color_pipeline_property - create a new color pipeline
+ * property
+ *
+ * @plane: drm plane
+ * @pipelines: list of pipelines
+ * @num_pipelines: number of pipelines
+ *
+ * Create the COLOR_PIPELINE plane property to specific color pipelines on
+ * the plane.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+int drm_plane_create_color_pipeline_property(struct drm_plane *plane,
+ const struct drm_prop_enum_list *pipelines,
+ int num_pipelines)
+{
+ struct drm_prop_enum_list *all_pipelines;
+ struct drm_property *prop;
+ int len = 0;
+ int i;
+
+ all_pipelines = kcalloc(num_pipelines + 1,
+ sizeof(*all_pipelines),
+ GFP_KERNEL);
+
+ if (!all_pipelines) {
+ drm_err(plane->dev, "failed to allocate color pipeline\n");
+ return -ENOMEM;
+ }
+
+ /* Create default Bypass color pipeline */
+ all_pipelines[len].type = 0;
+ all_pipelines[len].name = "Bypass";
+ len++;
+
+ /* Add all other color pipelines */
+ for (i = 0; i < num_pipelines; i++, len++) {
+ all_pipelines[len].type = pipelines[i].type;
+ all_pipelines[len].name = pipelines[i].name;
+ }
+
+ prop = drm_property_create_enum(plane->dev, DRM_MODE_PROP_ATOMIC,
+ "COLOR_PIPELINE",
+ all_pipelines, len);
+ if (IS_ERR(prop)) {
+ kfree(all_pipelines);
+ return PTR_ERR(prop);
+ }
+
+ drm_object_attach_property(&plane->base, prop, 0);
+ plane->color_pipeline_property = prop;
+
+ kfree(all_pipelines);
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_color_pipeline_property);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7982be4b0306..747d248aaf02 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -23,6 +23,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 32a8781cfd67..21809a82187b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -37,6 +37,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "drm_internal.h"
@@ -93,7 +94,7 @@ struct drm_prime_member {
struct rb_node handle_rb;
};
-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
@@ -190,8 +191,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
{
struct rb_node *rb;
- mutex_lock(&prime_fpriv->lock);
-
rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
@@ -210,8 +209,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
rb = rb->rb_left;
}
}
-
- mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
@@ -605,6 +602,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
/*
* drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
@@ -614,7 +612,16 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
!obj->funcs->get_sg_table)
return -ENOSYS;
- return drm_gem_pin(obj);
+ if (!obj->funcs->pin)
+ return 0;
+
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (ret)
+ return ret;
+ ret = obj->funcs->pin(obj);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -631,8 +638,16 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
+
+ if (!obj->funcs->unpin)
+ return;
- drm_gem_unpin(obj);
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (drm_WARN_ON(obj->dev, ret))
+ return;
+ obj->funcs->unpin(obj);
+ dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -713,7 +728,7 @@ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- return drm_gem_vmap(obj, map);
+ return drm_gem_vmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -729,7 +744,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
@@ -810,7 +825,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
- .cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
@@ -917,6 +931,26 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
}
EXPORT_SYMBOL(drm_gem_prime_export);
+
+/**
+ * drm_gem_is_prime_exported_dma_buf -
+ * checks if the DMA-BUF was exported from a GEM object belonging to @dev.
+ * @dev: drm_device to check against
+ * @dma_buf: dma-buf object to import
+ *
+ * Return: true if the DMA-BUF was exported from a GEM object belonging
+ * to @dev, false otherwise.
+ */
+
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev);
+}
+EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf);
+
/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
@@ -940,16 +974,14 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct drm_gem_object *obj;
int ret;
- if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
obj = dma_buf->priv;
- if (obj->dev == dev) {
- /*
- * Importing dmabuf exported from our own gem increases
- * refcount on gem itself instead of f_count of dmabuf.
- */
- drm_gem_object_get(obj);
- return obj;
- }
+ drm_gem_object_get(obj);
+ return obj;
}
if (!dev->driver->gem_prime_import_sg_table)
@@ -1004,7 +1036,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
- return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
+ return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev));
}
EXPORT_SYMBOL(drm_gem_prime_import);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 79517bd4418f..ded9461df5f2 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -25,6 +25,7 @@
#include <linux/debugfs.h>
#include <linux/dynamic_debug.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index 6cc39e30781f..8959f7084e0b 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -7,6 +7,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 96b266b37ba4..09b12c30df69 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -119,6 +119,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
*status = drm_bridge_chain_mode_valid(bridge,
&connector->display_info,
mode);
+ drm_bridge_put(bridge);
if (*status != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
@@ -202,7 +203,7 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
int
drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
@@ -338,10 +339,23 @@ void drm_kms_helper_poll_reschedule(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_reschedule);
+static int detect_connector_status(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+
+ if (funcs->detect_ctx)
+ return funcs->detect_ctx(connector, ctx, force);
+ else if (connector->funcs->detect)
+ return connector->funcs->detect(connector, force);
+
+ return connector_status_connected;
+}
+
static enum drm_connector_status
drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
{
- const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -349,14 +363,8 @@ drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
retry:
ret = drm_modeset_lock(&connector->dev->mode_config.connection_mutex, &ctx);
- if (!ret) {
- if (funcs->detect_ctx)
- ret = funcs->detect_ctx(connector, &ctx, force);
- else if (connector->funcs->detect)
- ret = connector->funcs->detect(connector, force);
- else
- ret = connector_status_connected;
- }
+ if (!ret)
+ ret = detect_connector_status(connector, &ctx, force);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
@@ -390,7 +398,6 @@ drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_device *dev = connector->dev;
int ret;
@@ -401,12 +408,7 @@ drm_helper_probe_detect(struct drm_connector *connector,
if (ret)
return ret;
- if (funcs->detect_ctx)
- ret = funcs->detect_ctx(connector, ctx, force);
- else if (connector->funcs->detect)
- ret = connector->funcs->detect(connector, force);
- else
- ret = connector_status_connected;
+ ret = detect_connector_status(connector, ctx, force);
if (ret != connector->status)
connector->epoch_counter += 1;
@@ -957,15 +959,16 @@ static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res)
* cleaned up when the DRM device goes away.
*
* See drm_kms_helper_poll_init() for more information.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
*/
-int drmm_kms_helper_poll_init(struct drm_device *dev)
+void drmm_kms_helper_poll_init(struct drm_device *dev)
{
+ int ret;
+
drm_kms_helper_poll_init(dev);
- return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ ret = drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ if (ret)
+ drm_warn(dev, "Connector status will not be updated, error %d\n", ret);
}
EXPORT_SYMBOL(drmm_kms_helper_poll_init);
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index dd33fec5aabd..c0948586b7fd 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -7,6 +7,7 @@
*/
#include <linux/average.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 250819fbc5ce..fcbcaaa36b5f 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -3,6 +3,7 @@
* Copyright (C) 2016 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c
index 38cc7a123819..879ea33dbbc4 100644
--- a/drivers/gpu/drm/drm_suballoc.c
+++ b/drivers/gpu/drm/drm_suballoc.c
@@ -42,6 +42,8 @@
#include <drm/drm_suballoc.h>
#include <drm/drm_print.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4f2ab8a7b50f..e1b0fa4000cd 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -195,6 +195,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/eventfd.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
@@ -741,7 +742,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
- int fd, int handle)
+ int fd, int handle, u64 point)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
@@ -755,14 +756,24 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, fence);
+ if (point) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain)
+ return -ENOMEM;
+
+ drm_syncobj_add_point(syncobj, chain, fence, point);
+ } else {
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
}
static int drm_syncobj_export_sync_file(struct drm_file *file_private,
- int handle, int *p_fd)
+ int handle, u64 point, int *p_fd)
{
int ret;
struct dma_fence *fence;
@@ -772,7 +783,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, point, 0, &fence);
if (ret)
goto err_put_fd;
@@ -869,6 +880,9 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -876,13 +890,18 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
return drm_syncobj_export_sync_file(file_private, args->handle,
- &args->fd);
+ point, &args->fd);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_handle_to_fd(file_private, args->handle,
&args->fd);
@@ -893,6 +912,9 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -900,14 +922,20 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
return drm_syncobj_import_sync_file_fence(file_private,
args->fd,
- args->handle);
+ args->handle,
+ point);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index fb3bbb6adcd1..b01ffa4d6509 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -18,6 +18,7 @@
#include <linux/gfp.h>
#include <linux/i2c.h>
#include <linux/kdev_t.h>
+#include <linux/pci.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -30,6 +31,8 @@
#include <drm/drm_property.h>
#include <drm/drm_sysfs.h>
+#include <asm/video.h>
+
#include "drm_internal.h"
#include "drm_crtc_internal.h"
@@ -261,7 +264,7 @@ static ssize_t enabled_show(struct device *device,
}
static ssize_t edid_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *connector_dev = kobj_to_dev(kobj);
@@ -315,14 +318,14 @@ static struct attribute *connector_dev_attrs[] = {
NULL
};
-static struct bin_attribute edid_attr = {
+static const struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
.size = 0,
.read = edid_show,
};
-static struct bin_attribute *connector_bin_attrs[] = {
+static const struct bin_attribute *const connector_bin_attrs[] = {
&edid_attr,
NULL
};
@@ -508,6 +511,43 @@ void drm_sysfs_connector_property_event(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_sysfs_connector_property_event);
+static ssize_t boot_display_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "1\n");
+}
+static DEVICE_ATTR_RO(boot_display);
+
+static struct attribute *display_attrs[] = {
+ &dev_attr_boot_display.attr,
+ NULL
+};
+
+static umode_t boot_display_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj)->parent;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (video_is_primary_device(&pdev->dev))
+ return a->mode;
+ }
+
+ return 0;
+}
+
+static const struct attribute_group display_attr_group = {
+ .attrs = display_attrs,
+ .is_visible = boot_display_visible,
+};
+
+static const struct attribute_group *card_dev_groups[] = {
+ &display_attr_group,
+ NULL
+};
+
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
@@ -531,6 +571,7 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
kdev->devt = MKDEV(DRM_MAJOR, minor->index);
kdev->class = drm_class;
+ kdev->groups = card_dev_groups;
kdev->type = &drm_sysfs_device_minor;
}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 94e45ed6869d..5c14140cd0c2 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -136,8 +136,17 @@
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
*
- * Drivers for hardware without support for vertical-blanking interrupts
- * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * Drivers for hardware without support for vertical-blanking interrupts can
+ * use DRM vblank timers to send vblank events at the rate of the current
+ * display mode's refresh. While not synchronized to the hardware's
+ * vertical-blanking regions, the timer helps DRM clients and compositors to
+ * adapt their update cycle to the display output. Drivers should set up
+ * vblanking as usual, but call drm_crtc_vblank_start_timer() and
+ * drm_crtc_vblank_cancel_timer() as part of their atomic mode setting.
+ * See also DRM vblank helpers for more information.
+ *
+ * Drivers without support for vertical-blanking interrupts nor timers must
+ * not call drm_vblank_init(). For these drivers, atomic helpers will
* automatically generate fake vblank events as part of the display update.
* This functionality also can be controlled by the driver by enabling and
* disabling struct drm_crtc_state.no_vblank.
@@ -487,7 +496,8 @@ out:
static void vblank_disable_fn(struct timer_list *t)
{
- struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
+ struct drm_vblank_crtc *vblank = timer_container_of(vblank, t,
+ disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
@@ -507,8 +517,11 @@ static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
drm_WARN_ON(dev, READ_ONCE(vblank->enabled) &&
drm_core_check_feature(dev, DRIVER_MODESET));
+ if (vblank->vblank_timer.crtc)
+ hrtimer_cancel(&vblank->vblank_timer.timer);
+
drm_vblank_destroy_worker(vblank);
- del_timer_sync(&vblank->disable_timer);
+ timer_delete_sync(&vblank->disable_timer);
}
/**
@@ -793,10 +806,8 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal(
ts_vblank_time = ktime_to_timespec64(*vblank_time);
drm_dbg_vbl(dev,
- "crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
- pipe, hpos, vpos,
- (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
- (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
+ "crtc %u : v p(%d,%d)@ %ptSp -> %ptSp [e %d us, %d rep]\n",
+ pipe, hpos, vpos, &ts_etime, &ts_vblank_time,
duration_ns / 1000, i);
return true;
@@ -1302,7 +1313,7 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
ret = wait_event_timeout(vblank->queue,
last != drm_vblank_count(dev, pipe),
- msecs_to_jiffies(100));
+ msecs_to_jiffies(1000));
drm_WARN(dev, ret == 0, "vblank wait timed out on crtc %i\n", pipe);
@@ -2161,3 +2172,159 @@ err_free:
return ret;
}
+/*
+ * VBLANK timer
+ */
+
+static enum hrtimer_restart drm_vblank_timer_function(struct hrtimer *timer)
+{
+ struct drm_vblank_crtc_timer *vtimer =
+ container_of(timer, struct drm_vblank_crtc_timer, timer);
+ struct drm_crtc *crtc = vtimer->crtc;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+ ktime_t interval;
+ u64 ret_overrun;
+ bool succ;
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ interval = vtimer->interval;
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ if (!interval)
+ return HRTIMER_NORESTART;
+
+ ret_overrun = hrtimer_forward_now(&vtimer->timer, interval);
+ if (ret_overrun != 1)
+ drm_dbg_vbl(dev, "vblank timer overrun\n");
+
+ if (crtc_funcs->handle_vblank_timeout)
+ succ = crtc_funcs->handle_vblank_timeout(crtc);
+ else
+ succ = drm_crtc_handle_vblank(crtc);
+ if (!succ)
+ return HRTIMER_NORESTART;
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * drm_crtc_vblank_start_timer - Starts the vblank timer on the given CRTC
+ * @crtc: the CRTC
+ *
+ * Drivers should call this function from their CRTC's enable_vblank
+ * function to start a vblank timer. The timer will fire after the duration
+ * of a full frame. drm_crtc_vblank_cancel_timer() disables a running timer.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_crtc_vblank_start_timer(struct drm_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ unsigned long flags;
+
+ if (!vtimer->crtc) {
+ /*
+ * Set up the data structures on the first invocation.
+ */
+ vtimer->crtc = crtc;
+ spin_lock_init(&vtimer->interval_lock);
+ hrtimer_setup(&vtimer->timer, drm_vblank_timer_function,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ } else {
+ /*
+ * Timer should not be active. If it is, wait for the
+ * previous cancel operations to finish.
+ */
+ while (hrtimer_active(&vtimer->timer))
+ hrtimer_try_to_cancel(&vtimer->timer);
+ }
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ vtimer->interval = ns_to_ktime(vblank->framedur_ns);
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ hrtimer_start(&vtimer->timer, vtimer->interval, HRTIMER_MODE_REL);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_vblank_start_timer);
+
+/**
+ * drm_crtc_vblank_cancel_timer - Cancels the given CRTC's vblank timer
+ * @crtc: the CRTC
+ *
+ * Drivers should call this function from their CRTC's disable_vblank
+ * function to stop a vblank timer.
+ */
+void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ unsigned long flags;
+
+ /*
+ * Calling hrtimer_cancel() can result in a deadlock with DRM's
+ * vblank_time_lime_lock and hrtimers' softirq_expiry_lock. So
+ * clear interval and indicate cancellation. The timer function
+ * will cancel itself on the next invocation.
+ */
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ vtimer->interval = 0;
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ hrtimer_try_to_cancel(&vtimer->timer);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_cancel_timer);
+
+/**
+ * drm_crtc_vblank_get_vblank_timeout - Returns the vblank timeout
+ * @crtc: The CRTC
+ * @vblank_time: Returns the next vblank timestamp
+ *
+ * The helper drm_crtc_vblank_get_vblank_timeout() returns the next vblank
+ * timestamp of the CRTC's vblank timer according to the timer's expiry
+ * time.
+ */
+void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ u64 cur_count;
+ ktime_t cur_time;
+
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return;
+ }
+
+ /*
+ * A concurrent vblank timeout could update the expires field before
+ * we compare it with the vblank time. Hence we'd compare the old
+ * expiry time to the new vblank time; deducing the timer had already
+ * expired. Reread until we get consistent values from both fields.
+ */
+ do {
+ cur_count = drm_crtc_vblank_count_and_time(crtc, &cur_time);
+ *vblank_time = READ_ONCE(vtimer->timer.node.expires);
+ } while (cur_count != drm_crtc_vblank_count_and_time(crtc, &cur_time));
+
+ if (drm_WARN_ON(crtc->dev, !ktime_compare(*vblank_time, cur_time)))
+ return; /* Already expired */
+
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt
+ * is only generated after all the vblank registers are updated)
+ * and what the vblank core expects. Therefore we need to always
+ * correct the timestamp by one frame.
+ */
+ *vblank_time = ktime_sub(*vblank_time, vtimer->interval);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_get_vblank_timeout);
diff --git a/drivers/gpu/drm/drm_vblank_helper.c b/drivers/gpu/drm/drm_vblank_helper.c
new file mode 100644
index 000000000000..a04a6ba1b0ca
--- /dev/null
+++ b/drivers/gpu/drm/drm_vblank_helper.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The vblank helper library provides functions for supporting vertical
+ * blanking in DRM drivers.
+ *
+ * For vblank timers, several callback implementations are available.
+ * Drivers enable support for vblank timers by setting the vblank callbacks
+ * in struct &drm_crtc_funcs to the helpers provided by this library. The
+ * initializer macro DRM_CRTC_VBLANK_TIMER_FUNCS does this conveniently.
+ * The driver further has to send the VBLANK event from its atomic_flush
+ * callback and control vblank from the CRTC's atomic_enable and atomic_disable
+ * callbacks. The callbacks are located in struct &drm_crtc_helper_funcs.
+ * The vblank helper library provides implementations of these callbacks
+ * for drivers without further requirements. The initializer macro
+ * DRM_CRTC_HELPER_VBLANK_FUNCS sets them coveniently.
+ *
+ * Once the driver enables vblank support with drm_vblank_init(), each
+ * CRTC's vblank timer fires according to the programmed display mode. By
+ * default, the vblank timer invokes drm_crtc_handle_vblank(). Drivers with
+ * more specific requirements can set their own handler function in
+ * struct &drm_crtc_helper_funcs.handle_vblank_timeout.
+ */
+
+/*
+ * VBLANK helpers
+ */
+
+/**
+ * drm_crtc_vblank_atomic_flush -
+ * Implements struct &drm_crtc_helper_funcs.atomic_flush
+ * @crtc: The CRTC
+ * @state: The atomic state to apply
+ *
+ * The helper drm_crtc_vblank_atomic_flush() implements atomic_flush of
+ * struct drm_crtc_helper_funcs for CRTCs that only need to send out a
+ * VBLANK event.
+ *
+ * See also struct &drm_crtc_helper_funcs.atomic_flush.
+ */
+void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_pending_vblank_event *event;
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+
+ spin_unlock_irq(&dev->event_lock);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_flush);
+
+/**
+ * drm_crtc_vblank_atomic_enable - Implements struct &drm_crtc_helper_funcs.atomic_enable
+ * @crtc: The CRTC
+ * @state: The atomic state
+ *
+ * The helper drm_crtc_vblank_atomic_enable() implements atomic_enable
+ * of struct drm_crtc_helper_funcs for CRTCs the only need to enable VBLANKs.
+ *
+ * See also struct &drm_crtc_helper_funcs.atomic_enable.
+ */
+void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_enable);
+
+/**
+ * drm_crtc_vblank_atomic_disable - Implements struct &drm_crtc_helper_funcs.atomic_disable
+ * @crtc: The CRTC
+ * @state: The atomic state
+ *
+ * The helper drm_crtc_vblank_atomic_disable() implements atomic_disable
+ * of struct drm_crtc_helper_funcs for CRTCs the only need to disable VBLANKs.
+ *
+ * See also struct &drm_crtc_funcs.atomic_disable.
+ */
+void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_off(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_disable);
+
+/*
+ * VBLANK timer
+ */
+
+/**
+ * drm_crtc_vblank_helper_enable_vblank_timer - Implements struct &drm_crtc_funcs.enable_vblank
+ * @crtc: The CRTC
+ *
+ * The helper drm_crtc_vblank_helper_enable_vblank_timer() implements
+ * enable_vblank of struct drm_crtc_helper_funcs for CRTCs that require
+ * a VBLANK timer. It sets up the timer on the first invocation. The
+ * started timer expires after the current frame duration. See struct
+ * &drm_vblank_crtc.framedur_ns.
+ *
+ * See also struct &drm_crtc_helper_funcs.enable_vblank.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc)
+{
+ return drm_crtc_vblank_start_timer(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_enable_vblank_timer);
+
+/**
+ * drm_crtc_vblank_helper_disable_vblank_timer - Implements struct &drm_crtc_funcs.disable_vblank
+ * @crtc: The CRTC
+ *
+ * The helper drm_crtc_vblank_helper_disable_vblank_timer() implements
+ * disable_vblank of struct drm_crtc_funcs for CRTCs that require a
+ * VBLANK timer.
+ *
+ * See also struct &drm_crtc_helper_funcs.disable_vblank.
+ */
+void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc)
+{
+ drm_crtc_vblank_cancel_timer(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_disable_vblank_timer);
+
+/**
+ * drm_crtc_vblank_helper_get_vblank_timestamp_from_timer -
+ * Implements struct &drm_crtc_funcs.get_vblank_timestamp
+ * @crtc: The CRTC
+ * @max_error: Maximum acceptable error
+ * @vblank_time: Returns the next vblank timestamp
+ * @in_vblank_irq: True is called from drm_crtc_handle_vblank()
+ *
+ * The helper drm_crtc_helper_get_vblank_timestamp_from_timer() implements
+ * get_vblank_timestamp of struct drm_crtc_funcs for CRTCs that require a
+ * VBLANK timer. It returns the timestamp according to the timer's expiry
+ * time.
+ *
+ * See also struct &drm_crtc_funcs.get_vblank_timestamp.
+ *
+ * Returns:
+ * True on success, or false otherwise.
+ */
+bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ drm_crtc_vblank_get_vblank_timeout(crtc, vblank_time);
+
+ return true;
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_from_timer);
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 1752ffb44e1d..70f0199251ea 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -2,6 +2,8 @@
#include <uapi/linux/sched/types.h>
+#include <linux/export.h>
+
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
@@ -242,7 +244,7 @@ EXPORT_SYMBOL(drm_vblank_work_flush);
void drm_vblank_work_flush_all(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(crtc)];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
spin_lock_irq(&dev->event_lock);
wait_event_lock_irq(vblank->work_wait_queue,
@@ -277,7 +279,7 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
INIT_LIST_HEAD(&vblank->pending_work);
init_waitqueue_head(&vblank->work_wait_queue);
- worker = kthread_create_worker(0, "card%d-crtc%d",
+ worker = kthread_run_worker(0, "card%d-crtc%d",
vblank->dev->primary->index,
vblank->pipe);
if (IS_ERR(worker))
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 83229a031af0..58659c16874c 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -23,6 +23,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index 33a3c98a962d..95b8a2e4bda6 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -10,11 +10,13 @@
*/
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_property.h>
#include <drm/drm_writeback.h>
@@ -195,14 +197,29 @@ int drm_writeback_connector_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_writeback_connector_init);
+static void delete_writeback_properties(struct drm_device *dev)
+{
+ if (dev->mode_config.writeback_pixel_formats_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_pixel_formats_property);
+ dev->mode_config.writeback_pixel_formats_property = NULL;
+ }
+ if (dev->mode_config.writeback_out_fence_ptr_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_out_fence_ptr_property);
+ dev->mode_config.writeback_out_fence_ptr_property = NULL;
+ }
+ if (dev->mode_config.writeback_fb_id_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_fb_id_property);
+ dev->mode_config.writeback_fb_id_property = NULL;
+ }
+}
+
/**
- * drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
+ * __drm_writeback_connector_init - Initialize a writeback connector with
* a custom encoder
*
* @dev: DRM device
* @wb_connector: Writeback connector to initialize
* @enc: handle to the already initialized drm encoder
- * @con_funcs: Connector funcs vtable
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
*
@@ -218,41 +235,33 @@ EXPORT_SYMBOL(drm_writeback_connector_init);
* assigning the encoder helper functions, possible_crtcs and any other encoder
* specific operation.
*
- * Drivers should always use this function instead of drm_connector_init() to
- * set up writeback connectors if they want to manage themselves the lifetime of the
- * associated encoder.
- *
* Returns: 0 on success, or a negative error code
*/
-int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
- struct drm_writeback_connector *wb_connector, struct drm_encoder *enc,
- const struct drm_connector_funcs *con_funcs, const u32 *formats,
- int n_formats)
+static int __drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc, const u32 *formats,
+ int n_formats)
{
- struct drm_property_blob *blob;
struct drm_connector *connector = &wb_connector->base;
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
int ret = create_writeback_properties(dev);
if (ret != 0)
- return ret;
-
- blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
- formats);
- if (IS_ERR(blob))
- return PTR_ERR(blob);
-
+ goto failed_properties;
connector->interlace_allowed = 0;
- ret = drm_connector_init(dev, connector, con_funcs,
- DRM_MODE_CONNECTOR_WRITEBACK);
- if (ret)
- goto connector_fail;
-
ret = drm_connector_attach_encoder(connector, enc);
if (ret)
- goto attach_fail;
+ goto failed_properties;
+
+ blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+ formats);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ goto failed_properties;
+ }
INIT_LIST_HEAD(&wb_connector->job_queue);
spin_lock_init(&wb_connector->job_lock);
@@ -275,15 +284,138 @@ int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
wb_connector->pixel_formats_blob_ptr = blob;
return 0;
+failed_properties:
+ delete_writeback_properties(dev);
+ return ret;
+}
+
+/**
+ * drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
+ * a custom encoder
+ *
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @enc: handle to the already initialized drm encoder
+ * @con_funcs: Connector funcs vtable
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values.
+ *
+ * This function assumes that the drm_writeback_connector's encoder has already been
+ * created and initialized before invoking this function.
+ *
+ * In addition, this function also assumes that callers of this API will manage
+ * assigning the encoder helper functions, possible_crtcs and any other encoder
+ * specific operation.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors if they want to manage themselves the lifetime of the
+ * associated encoder.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc,
+ const struct drm_connector_funcs *con_funcs,
+ const u32 *formats, int n_formats)
+{
+ struct drm_connector *connector = &wb_connector->base;
+ int ret;
+
+ ret = drm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK);
+ if (ret)
+ return ret;
+
+ ret = __drm_writeback_connector_init(dev, wb_connector, enc, formats,
+ n_formats);
+ if (ret)
+ drm_connector_cleanup(connector);
-attach_fail:
- drm_connector_cleanup(connector);
-connector_fail:
- drm_property_blob_put(blob);
return ret;
}
EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
+/**
+ * drm_writeback_connector_cleanup - Cleanup the writeback connector
+ * @dev: DRM device
+ * @data: Pointer to the writeback connector to clean up
+ *
+ * This will decrement the reference counter of blobs and destroy properties. It
+ * will also clean the remaining jobs in this writeback connector. Caution: This helper will not
+ * clean up the attached encoder and the drm_connector.
+ */
+static void drm_writeback_connector_cleanup(struct drm_device *dev,
+ void *data)
+{
+ unsigned long flags;
+ struct drm_writeback_job *pos, *n;
+ struct drm_writeback_connector *wb_connector = data;
+
+ delete_writeback_properties(dev);
+ drm_property_blob_put(wb_connector->pixel_formats_blob_ptr);
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ list_for_each_entry_safe(pos, n, &wb_connector->job_queue, list_entry) {
+ list_del(&pos->list_entry);
+ drm_writeback_cleanup_job(pos);
+ }
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+
+/**
+ * drmm_writeback_connector_init - Initialize a writeback connector with
+ * a custom encoder
+ *
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc: Encoder to connect this writeback connector
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function initialize a writeback connector and register its cleanup.
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drmm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ struct drm_encoder *enc,
+ const u32 *formats, int n_formats)
+{
+ struct drm_connector *connector = &wb_connector->base;
+ int ret;
+
+ ret = drmm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK, NULL);
+ if (ret)
+ return ret;
+
+ ret = __drm_writeback_connector_init(dev, wb_connector, enc, formats,
+ n_formats);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(dev, drm_writeback_connector_cleanup,
+ wb_connector);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drmm_writeback_connector_init);
+
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index b13a17276d07..ad5e6f7b23f9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
@@ -347,7 +348,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
bool switch_mmu_context = gpu->mmu_context != mmu_context;
- unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
+ unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
bool has_blt = !!(gpu->identity.minor_features5 &
chipMinorFeatures5_BLT_ENGINE);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 7aa5f14d0c87..3a221923f15d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -9,7 +9,6 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_512K
#define SUBALLOC_GRANULE SZ_4K
@@ -100,7 +99,7 @@ retry:
mutex_unlock(&suballoc->lock);
ret = wait_event_interruptible_timeout(suballoc->free_event,
suballoc->free_space,
- msecs_to_jiffies(10 * 1000));
+ secs_to_jiffies(10));
if (!ret) {
dev_err(suballoc->dev,
"Timeout waiting for cmdbuf space\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 00707001d112..54ceae87b401 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_of.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
@@ -488,7 +489,16 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_FOPS(fops);
+static void etnaviv_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ drm_show_memory_stats(p, file);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = drm_show_fdinfo,
+};
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
@@ -498,6 +508,7 @@ static const struct drm_driver etnaviv_drm_driver = {
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
#endif
+ .show_fdinfo = etnaviv_show_fdinfo,
.ioctls = etnaviv_ioctls,
.num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
.fops = &fops,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 16473c371444..5d8f3b03d4ae 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <linux/dma-mapping.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
@@ -342,6 +343,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj)
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
struct page **pages;
+ pgprot_t prot;
lockdep_assert_held(&obj->lock);
@@ -349,8 +351,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
if (IS_ERR(pages))
return NULL;
- return vmap(pages, obj->base.size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ switch (obj->flags & ETNA_BO_CACHE_MASK) {
+ case ETNA_BO_CACHED:
+ prot = PAGE_KERNEL;
+ break;
+ case ETNA_BO_UNCACHED:
+ prot = pgprot_noncached(PAGE_KERNEL);
+ break;
+ case ETNA_BO_WC:
+ default:
+ prot = pgprot_writecombine(PAGE_KERNEL);
+ }
+
+ return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
}
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
@@ -528,6 +541,17 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_unlock(&priv->gem_lock);
}
+static enum drm_gem_object_status etnaviv_gem_status(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (etnaviv_obj->pages)
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ return status;
+}
+
static const struct vm_operations_struct vm_ops = {
.fault = etnaviv_gem_fault,
.open = drm_gem_vm_open,
@@ -541,6 +565,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
.mmap = etnaviv_gem_mmap,
+ .status = etnaviv_gem_status,
.vm_ops = &vm_ops,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 687555aae807..e5ee82a0674c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -44,9 +44,7 @@ struct etnaviv_gem_object {
u32 flags;
struct list_head gem_node;
- struct etnaviv_gpu *gpu; /* non-null if active */
atomic_t gpu_active;
- u32 access;
struct page **pages;
struct sg_table *sgt;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 42e57d142554..40a50c60dfff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -39,7 +39,7 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
@@ -51,7 +51,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3c0a5c3e0e3d..a9611c1a773f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <linux/dma-fence-array.h>
#include <linux/file.h>
#include <linux/dma-resv.h>
@@ -534,7 +535,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
- 1, submit->ctx);
+ 1, submit->ctx, file->client_id);
if (ret)
goto err_submit_put;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 2d4c112ce033..ca0be293f5fe 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -13,8 +13,11 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/thermal.h>
+#include <drm/drm_print.h>
+
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
#include "etnaviv_gpu.h"
@@ -172,6 +175,29 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
return 0;
}
+static int etnaviv_gpu_reset_deassert(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ /*
+ * 32 core clock cycles (slowest clock) required before deassertion
+ * 1 microsecond might match all implementations without computation
+ */
+ usleep_range(1, 2);
+
+ ret = reset_control_deassert(gpu->rst);
+ if (ret)
+ return ret;
+
+ /*
+ * 128 core clock cycles (slowest clock) required before any activity on AHB
+ * 1 microsecond might match all implementations without computation
+ */
+ usleep_range(1, 2);
+
+ return 0;
+}
+
static inline bool etnaviv_is_model_rev(struct etnaviv_gpu *gpu, u32 model, u32 revision)
{
return gpu->identity.model == model &&
@@ -799,6 +825,12 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto pm_put;
}
+ ret = etnaviv_gpu_reset_deassert(gpu);
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset deassert failed\n");
+ goto fail;
+ }
+
etnaviv_hw_identify(gpu);
if (gpu->identity.model == 0) {
@@ -1860,6 +1892,17 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
if (IS_ERR(gpu->mmio))
return PTR_ERR(gpu->mmio);
+
+ /* Get Reset: */
+ gpu->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(gpu->rst))
+ return dev_err_probe(dev, PTR_ERR(gpu->rst),
+ "failed to get reset\n");
+
+ err = reset_control_assert(gpu->rst);
+ if (err)
+ return dev_err_probe(dev, err, "failed to assert reset\n");
+
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 4d8a7d48ade3..5cb46c84e03a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -93,6 +93,7 @@ struct etnaviv_event {
struct etnaviv_cmdbuf_suballoc;
struct regulator;
struct clk;
+struct reset_control;
#define ETNA_NR_EVENTS 30
@@ -158,6 +159,7 @@ struct etnaviv_gpu {
struct clk *clk_reg;
struct clk *clk_core;
struct clk *clk_shader;
+ struct reset_control *rst;
unsigned int freq_scale;
unsigned int fe_waitcycles;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 8665f2658d51..32d710baf17f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -198,6 +198,38 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
},
{
.model = 0x8000,
+ .revision = 0x6205,
+ .product_id = 0x80003,
+ .customer_id = 0x15,
+ .eco_id = 0,
+ .stream_count = 16,
+ .register_max = 64,
+ .thread_count = 512,
+ .shader_core_count = 2,
+ .nn_core_count = 2,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287c8d,
+ .minor_features0 = 0xc1799eff,
+ .minor_features1 = 0xfefbfad9,
+ .minor_features2 = 0xeb9d4fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xdb0dafc7,
+ .minor_features5 = 0x7b5ac333,
+ .minor_features6 = 0xfcce6000,
+ .minor_features7 = 0x03fbfa6f,
+ .minor_features8 = 0x00ef0ef0,
+ .minor_features9 = 0x0eca703c,
+ .minor_features10 = 0x898048f0,
+ .minor_features11 = 0x00000034,
+ },
+ {
+ .model = 0x8000,
.revision = 0x7120,
.product_id = 0x45080009,
.customer_id = 0x88,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 7e065b3723cf..a992be2ede88 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -6,6 +6,8 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <drm/drm_print.h>
+
#include "common.xml.h"
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
@@ -19,12 +21,6 @@ static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
size_t unmapped_page, unmapped = 0;
size_t pgsize = SZ_4K;
- if (!IS_ALIGNED(iova | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
- iova, size, pgsize);
- return;
- }
-
while (unmapped < size) {
unmapped_page = context->global->ops->unmap(context, iova,
pgsize);
@@ -45,12 +41,6 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
size_t orig_size = size;
int ret = 0;
- if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
- iova, &paddr, size, pgsize);
- return -EINVAL;
- }
-
while (size) {
ret = context->global->ops->map(context, iova, paddr, pgsize,
prot);
@@ -82,11 +72,19 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
return -EINVAL;
for_each_sgtable_dma_sg(sgt, sg, i) {
- phys_addr_t pa = sg_dma_address(sg) - sg->offset;
- unsigned int da_len = sg_dma_len(sg) + sg->offset;
+ phys_addr_t pa = sg_dma_address(sg);
+ unsigned int da_len = sg_dma_len(sg);
unsigned int bytes = min_t(unsigned int, da_len, va_len);
- VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
+ VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
+
+ if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
+ dev_err(context->global->dev,
+ "unaligned: iova 0x%x pa %pa size 0x%x\n",
+ iova, &pa, bytes);
+ ret = -EINVAL;
+ goto fail;
+ }
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 5b67eda122db..df4232d7e135 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -40,11 +40,11 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
int change;
/*
- * If the GPU managed to complete this jobs fence, the timout is
- * spurious. Bail out.
+ * If the GPU managed to complete this jobs fence, the timeout has
+ * fired before free-job worker. The timeout is spurious, so bail out.
*/
if (dma_fence_is_signaled(submit->out_fence))
- goto out_no_timeout;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/*
* If the GPU is still making forward progress on the front-end (which
@@ -70,7 +70,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
gpu->hangcheck_dma_addr = dma_addr;
gpu->hangcheck_primid = primid;
gpu->hangcheck_fence = gpu->completed_fence;
- goto out_no_timeout;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
/* block scheduler */
@@ -86,11 +86,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched);
drm_sched_start(&gpu->sched, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
-
-out_no_timeout:
- list_add(&sched_job->list, &sched_job->sched->pending_list);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
@@ -144,17 +140,17 @@ out_unlock:
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
{
- int ret;
-
- ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
- msecs_to_jiffies(500), NULL, NULL,
- dev_name(gpu->dev), gpu->dev);
- if (ret)
- return ret;
-
- return 0;
+ const struct drm_sched_init_args args = {
+ .ops = &etnaviv_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = etnaviv_hw_jobs_limit,
+ .hang_limit = etnaviv_job_hang_limit,
+ .timeout = msecs_to_jiffies(500),
+ .name = dev_name(gpu->dev),
+ .dev = gpu->dev,
+ };
+
+ return drm_sched_init(&gpu->sched, &args);
}
void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index b9e206303b48..9ae0fa4667a9 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -20,6 +20,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 5170f72b0830..bb74b17f9753 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -20,6 +20,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
@@ -43,13 +44,13 @@ struct decon_data {
unsigned int wincon_burstlen_shift;
};
-static struct decon_data exynos7_decon_data = {
+static const struct decon_data exynos7_decon_data = {
.vidw_buf_start_base = 0x80,
.shadowcon_win_protect_shift = 10,
.wincon_burstlen_shift = 11,
};
-static struct decon_data exynos7870_decon_data = {
+static const struct decon_data exynos7870_decon_data = {
.vidw_buf_start_base = 0x880,
.shadowcon_win_protect_shift = 8,
.wincon_burstlen_shift = 10,
@@ -69,7 +70,6 @@ struct decon_context {
void __iomem *regs;
unsigned long irq_flags;
bool i80_if;
- bool suspended;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -132,9 +132,6 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
static void decon_wait_for_vblank(struct decon_context *ctx)
{
- if (ctx->suspended)
- return;
-
atomic_set(&ctx->wait_vsync_event, 1);
/*
@@ -210,9 +207,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
u32 val, clkdiv;
- if (ctx->suspended)
- return;
-
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
@@ -274,9 +268,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return -EPERM;
-
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -299,9 +290,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return;
-
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -404,9 +392,6 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, true);
}
@@ -427,9 +412,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int pitch = fb->pitches[0];
unsigned int vidw_addr0_base = ctx->data->vidw_buf_start_base;
- if (ctx->suspended)
- return;
-
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
@@ -517,9 +499,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
unsigned int win = plane->index;
u32 val;
- if (ctx->suspended)
- return;
-
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
@@ -538,9 +517,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
@@ -568,9 +544,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int ret;
- if (!ctx->suspended)
- return;
-
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
@@ -584,8 +557,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
-
- ctx->suspended = false;
}
static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
@@ -593,9 +564,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
@@ -605,8 +573,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
decon_disable_plane(crtc, &ctx->planes[i]);
pm_runtime_put_sync(ctx->dev);
-
- ctx->suspended = true;
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
@@ -636,6 +602,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
if (!ctx->drm_dev)
goto out;
+ /* check if crtc and vblank have been initialized properly */
+ if (!drm_dev_has_vblank(ctx->drm_dev))
+ goto out;
+
if (!ctx->i80_if) {
drm_crtc_handle_vblank(&ctx->crtc->base);
@@ -723,7 +693,6 @@ static int decon_probe(struct platform_device *pdev)
return -ENOMEM;
ctx->dev = dev;
- ctx->suspended = true;
ctx->data = of_device_get_match_data(dev);
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index f313ae7bc3a3..6cc7bf77bcac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -355,8 +355,7 @@ static void exynos_drm_platform_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- if (drm)
- drm_atomic_helper_shutdown(drm);
+ drm_atomic_helper_shutdown(drm);
}
static struct platform_driver exynos_drm_platform_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 896a03639e2d..c4d098ab7863 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -154,6 +154,11 @@ static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = {
.host_ops = &exynos_dsi_exynos_host_ops,
};
+static const struct samsung_dsim_plat_data exynos7870_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS7870,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
+
static const struct of_device_id exynos_dsi_of_match[] = {
{
.compatible = "samsung,exynos3250-mipi-dsi",
@@ -175,6 +180,10 @@ static const struct of_device_id exynos_dsi_of_match[] = {
.compatible = "samsung,exynos5433-mipi-dsi",
.data = &exynos5433_dsi_pdata,
},
+ {
+ .compatible = "samsung,exynos7870-mipi-dsi",
+ .data = &exynos7870_dsi_pdata,
+ },
{ /* sentinel. */ }
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index fc1c5608db96..6ecd95bcb0c4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -14,6 +14,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/exynos_drm.h>
@@ -56,6 +57,7 @@ static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count)
@@ -76,7 +78,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
fb->obj[i] = &exynos_gem[i]->base;
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
@@ -94,9 +96,9 @@ err:
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_framebuffer *fb;
int i;
@@ -124,7 +126,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
+ fb = exynos_drm_framebuffer_init(dev, info, mode_cmd, exynos_gem, i);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 2f841bbdddc5..fdc6cb40cc9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -14,6 +14,7 @@
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 9526a25e90ac..637927818dfe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -16,6 +16,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -42,8 +43,6 @@ static void exynos_drm_fb_destroy(struct fb_info *info)
drm_framebuffer_remove(fb);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops exynos_drm_fb_ops = {
@@ -59,18 +58,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes,
struct exynos_drm_gem *exynos_gem)
{
- struct fb_info *fbi;
+ struct fb_info *fbi = helper->info;
struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
unsigned long offset;
- fbi = drm_fb_helper_alloc_info(helper);
- if (IS_ERR(fbi)) {
- DRM_DEV_ERROR(to_dma_dev(helper->dev),
- "failed to allocate fb info.\n");
- return PTR_ERR(fbi);
- }
-
fbi->fbops = &exynos_drm_fb_ops;
drm_fb_helper_fill_info(fbi, helper, sizes);
@@ -116,7 +108,10 @@ int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(exynos_gem);
helper->fb =
- exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
+ exynos_drm_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index b150cfd92f6e..09e33a26caaf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -908,7 +908,7 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
u32 buf_num;
u32 cfg;
- DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueue[%d]\n", buf_id, enqueue);
spin_lock_irqsave(&ctx->lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 1ad87584b1c2..b6abdc4f2b0a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -23,6 +23,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
@@ -187,6 +188,7 @@ struct fimd_context {
u32 i80ifcon;
bool i80_if;
bool suspended;
+ bool dp_clk_enabled;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
atomic_t win_updated;
@@ -731,7 +733,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
/*
* Setting dma-burst to 16Word causes permanent tearing for very small
* buffers, e.g. cursor buffer. Burst Mode switching which based on
- * plane size is not recommended as plane size varies alot towards the
+ * plane size is not recommended as plane size varies a lot towards the
* end of the screen and rapid movement causes unstable DMA, but it is
* still better to change dma-burst than displaying garbage.
*/
@@ -1047,7 +1049,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
struct fimd_context *ctx = container_of(clk, struct fimd_context,
dp_clk);
u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+
+ if (enable == ctx->dp_clk_enabled)
+ return;
+
+ if (enable)
+ pm_runtime_resume_and_get(ctx->dev);
+
+ ctx->dp_clk_enabled = enable;
writel(val, ctx->regs + DP_MIE_CLKCON);
+
+ if (!enable)
+ pm_runtime_put(ctx->dev);
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index d32f2474cbaa..2bea107dd960 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 4787fee4696f..b9b2f000072d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -7,11 +7,12 @@
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
#include <linux/shmem_fs.h>
#include <linux/module.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
#include <drm/exynos_drm.h>
@@ -174,7 +175,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
- DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
+ DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %p\n", obj->filp);
return exynos_gem;
}
@@ -330,15 +331,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
unsigned int flags;
int ret;
+ ret = drm_mode_size_dumb(dev, args, 0, 0);
+ if (ret)
+ return ret;
+
/*
* allocate memory to be used for framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
if (is_drm_iommu_supported(dev))
flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
else
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index ea9f66037600..008def51225a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -22,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -271,7 +272,7 @@ static inline struct exynos_drm_ipp_task *
task->src.rect.h = task->dst.rect.h = UINT_MAX;
task->transform.rotation = DRM_MODE_ROTATE_0;
- DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %p\n", task);
return task;
}
@@ -339,7 +340,7 @@ static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
}
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Got task %pK configuration from userspace\n",
+ "Got task %p configuration from userspace\n",
task);
return 0;
}
@@ -394,7 +395,7 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
- DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %p\n", task);
exynos_drm_ipp_task_release_buf(&task->src);
exynos_drm_ipp_task_release_buf(&task->dst);
@@ -559,7 +560,7 @@ static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
if (!fmt) {
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Task %pK: %s format not supported\n",
+ "Task %p: %s format not supported\n",
task, buf == src ? "src" : "dst");
return -EINVAL;
}
@@ -609,7 +610,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
bool rotate = (rotation != DRM_MODE_ROTATE_0);
bool scale = false;
- DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %p\n", task);
if (src->rect.w == UINT_MAX)
src->rect.w = src->buf.width;
@@ -625,7 +626,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
dst->rect.x + dst->rect.w > (dst->buf.width) ||
dst->rect.y + dst->rect.h > (dst->buf.height)) {
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Task %pK: defined area is outside provided buffers\n",
+ "Task %p: defined area is outside provided buffers\n",
task);
return -EINVAL;
}
@@ -642,7 +643,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
src->buf.fourcc != dst->buf.fourcc)) {
- DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %p: hw capabilities exceeded\n",
task);
return -EINVAL;
}
@@ -655,7 +656,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
if (ret)
return ret;
- DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %p: all checks done.\n",
task);
return ret;
@@ -667,25 +668,25 @@ static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
int ret = 0;
- DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %p\n",
task);
ret = exynos_drm_ipp_task_setup_buffer(src, filp);
if (ret) {
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Task %pK: src buffer setup failed\n",
+ "Task %p: src buffer setup failed\n",
task);
return ret;
}
ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
if (ret) {
DRM_DEV_DEBUG_DRIVER(task->dev,
- "Task %pK: dst buffer setup failed\n",
+ "Task %p: dst buffer setup failed\n",
task);
return ret;
}
- DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %p: buffers prepared.\n",
task);
return ret;
@@ -764,7 +765,7 @@ void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
struct exynos_drm_ipp *ipp = task->ipp;
unsigned long flags;
- DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
+ DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %p done: %d\n",
ipp->id, task, ret);
spin_lock_irqsave(&ipp->lock, flags);
@@ -807,7 +808,7 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
spin_unlock_irqrestore(&ipp->lock, flags);
DRM_DEV_DEBUG_DRIVER(ipp->dev,
- "ipp: %d, selected task %pK to run\n", ipp->id,
+ "ipp: %d, selected task %p to run\n", ipp->id,
task);
ret = ipp->funcs->commit(ipp, task);
@@ -917,14 +918,14 @@ int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
*/
if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
DRM_DEV_DEBUG_DRIVER(ipp->dev,
- "ipp: %d, nonblocking processing task %pK\n",
+ "ipp: %d, nonblocking processing task %p\n",
ipp->id, task);
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
exynos_drm_ipp_schedule_task(task->ipp, task);
ret = 0;
} else {
- DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %p\n",
ipp->id, task);
exynos_drm_ipp_schedule_task(ipp, task);
ret = wait_event_interruptible(ipp->done_wq,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b34ec6728337..29a8366513fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -379,11 +379,11 @@ static int exynos_mic_probe(struct platform_device *pdev)
struct resource res;
int ret, i;
- mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
- if (!mic) {
+ mic = devm_drm_bridge_alloc(dev, struct exynos_mic, bridge, &mic_bridge_funcs);
+ if (IS_ERR(mic)) {
DRM_DEV_ERROR(dev,
"mic: Failed to allocate memory for MIC object\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(mic);
goto err;
}
@@ -421,7 +421,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
- mic->bridge.funcs = &mic_bridge_funcs;
mic->bridge.of_node = dev->of_node;
drm_bridge_add(&mic->bridge);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 7c3aa77186d3..67afddd566e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
@@ -58,7 +59,7 @@ static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
struct drm_plane_state *state = &exynos_state->base;
struct drm_crtc *crtc = state->crtc;
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(state->state, crtc);
+ drm_atomic_get_new_crtc_state(state->state, crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index fd388b1dbe68..64c69dd2966e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -159,7 +160,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
static void vidi_fake_vblank_timer(struct timer_list *t)
{
- struct vidi_context *ctx = from_timer(ctx, t, timer);
+ struct vidi_context *ctx = timer_container_of(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
@@ -312,9 +313,6 @@ static int vidi_get_modes(struct drm_connector *connector)
else
drm_edid = drm_edid_alloc(fake_edid_info, sizeof(fake_edid_info));
- if (!drm_edid)
- return 0;
-
drm_edid_connector_update(connector, drm_edid);
count = drm_edid_connector_add_modes(connector);
@@ -427,7 +425,7 @@ static void vidi_unbind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
- del_timer_sync(&ctx->timer);
+ timer_delete_sync(&ctx->timer);
}
static const struct component_ops vidi_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 176fd8871759..01813e11e6c6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -931,7 +931,7 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
}
static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index a3670d2eaab2..69dea5049309 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -28,6 +28,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 03b076db9381..3bbfc1b56a65 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -260,7 +260,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev;
struct drm_device *drm;
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *base;
struct clk *pix_clk_in;
char pix_clk_name[32];
@@ -278,8 +277,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
return -ENODEV;
fsl_dev->soc = id->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
return ret;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 794a87d16f88..a9a341ea6507 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -15,6 +15,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "fsl_dcu_drm_drv.h"
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c418e8496bdf..84eff7519e32 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -63,7 +63,7 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->hdisplay & 0xf)
return MODE_ERROR;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index aa2ea128aa2f..a2acaa699dd5 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
- depends on DRM && PCI && X86 && MMU && HAS_IOPORT
+ depends on DRM && PCI && X86 && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 8711a7a5b8da..c8f1716a12d5 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -11,6 +11,8 @@
#include <acpi/video.h>
+#include <drm/drm_print.h>
+
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3e83299113e3..fd6ea8998dbe 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -9,6 +9,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_print.h>
#include "cdv_device.h"
#include "gma_device.h"
@@ -215,7 +216,7 @@ static void cdv_errata(struct drm_device *dev)
* Bonus Launch to work around the issue, by degrading
* performance.
*/
- CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
+ CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
}
/**
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 5a0acd914f76..06fe7480e7af 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -69,7 +69,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index bbd0abdd8382..5942a9d46b02 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -11,6 +11,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include "cdv_device.h"
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index cc2ed9b3fd2d..54bf626f0524 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
@@ -505,7 +506,7 @@ static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
static enum drm_mode_status
cdv_intel_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -855,8 +856,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
- strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
- intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ strscpy(intel_dp->adapter.name, name);
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = connector->base.kdev;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 2d95e0471291..8e93ee0d0ccd 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -31,6 +31,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
@@ -222,7 +223,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index f3a4517bdf27..fbe7fe317393 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
@@ -153,7 +154,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
}
static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 8edefea2ef59..c26926babc2a 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -6,7 +6,6 @@
**************************************************************************/
#include <linux/fb.h>
-#include <linux/pfn_t.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -33,7 +32,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
for (i = 0; i < page_num; ++i) {
- err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ err = vmf_insert_mixed(vma, address, pfn);
if (unlikely(err & VM_FAULT_ERROR))
break;
address += PAGE_SIZE;
@@ -51,48 +50,6 @@ static const struct vm_operations_struct psb_fbdev_vm_ops = {
* struct fb_ops
*/
-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-
-static int psb_fbdev_fb_setcolreg(unsigned int regno,
- unsigned int red, unsigned int green,
- unsigned int blue, unsigned int transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- uint32_t v;
-
- if (!fb)
- return -ENOMEM;
-
- if (regno > 255)
- return 1;
-
- red = CMAP_TOHW(red, info->var.red.length);
- blue = CMAP_TOHW(blue, info->var.blue.length);
- green = CMAP_TOHW(green, info->var.green.length);
- transp = CMAP_TOHW(transp, info->var.transp.length);
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset) |
- (transp << info->var.transp.offset);
-
- if (regno < 16) {
- switch (fb->format->cpp[0] * 8) {
- case 16:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- case 24:
- case 32:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- }
- }
-
- return 0;
-}
-
static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
if (vma->vm_pgoff != 0)
@@ -121,23 +78,18 @@ static void psb_fbdev_fb_destroy(struct fb_info *info)
drm_fb_helper_fini(fb_helper);
drm_framebuffer_unregister_private(fb);
- fb->obj[0] = NULL;
drm_framebuffer_cleanup(fb);
kfree(fb);
drm_gem_object_put(obj);
drm_client_release(&fb_helper->client);
-
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops psb_fbdev_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_IOMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
- .fb_setcolreg = psb_fbdev_fb_setcolreg,
__FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_mmap = psb_fbdev_fb_mmap,
.fb_destroy = psb_fbdev_fb_destroy,
@@ -156,7 +108,7 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct fb_info *info;
+ struct fb_info *info = fb_helper->info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = { };
int size;
@@ -203,7 +155,10 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return PTR_ERR(backing);
obj = &backing->base;
- fb = psb_framebuffer_create(dev, &mode_cmd, obj);
+ fb = psb_framebuffer_create(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err_drm_gem_object_put;
@@ -212,12 +167,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->funcs = &psb_fbdev_fb_helper_funcs;
fb_helper->fb = fb;
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_framebuffer_unregister_private;
- }
-
info->fbops = &psb_fbdev_fb_ops;
/* Accessed stolen memory directly */
@@ -241,11 +190,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return 0;
-err_drm_framebuffer_unregister_private:
- drm_framebuffer_unregister_private(fb);
- fb->obj[0] = NULL;
- drm_framebuffer_cleanup(fb);
- kfree(fb);
err_drm_gem_object_put:
drm_gem_object_put(obj);
return ret;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1a374702b696..e69b537ded6b 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -29,24 +29,23 @@ static const struct drm_framebuffer_funcs psb_fb_funcs = {
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- const struct drm_format_info *info;
int ret;
/*
* Reject unknown formats, YUV formats, and formats with more than
* 4 bytes per pixel.
*/
- info = drm_get_format_info(dev, mode_cmd);
- if (!info || !info->depth || info->cpp[0] > 4)
+ if (!info->depth || info->cpp[0] > 4)
return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = obj;
ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
if (ret) {
@@ -59,6 +58,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
/**
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
+ * @info: pixel format information
* @mode_cmd: the description of the requested mode
* @obj: the backing object
*
@@ -68,6 +68,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
* TODO: review object references
*/
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -78,7 +79,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
+ ret = psb_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
@@ -96,6 +97,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
struct drm_gem_object *obj;
@@ -110,7 +112,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
- fb = psb_framebuffer_create(dev, cmd, obj);
+ fb = psb_framebuffer_create(dev, info, cmd, obj);
if (IS_ERR(fb))
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 4b7627a72637..2e44a2ac2742 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -16,6 +16,7 @@
#include <asm/set_memory.h>
#include <drm/drm.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
#include "gem.h"
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d5924ca3ed05..b60720560830 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -8,6 +8,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm.h>
+#include <drm/drm_print.h>
#include "intel_bios.h"
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index ee8b047587f2..2b06ba22f9c6 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -32,6 +32,8 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include <drm/drm_print.h>
+
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 7e76790c6a81..0326f3ddc621 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -12,6 +12,7 @@
*/
#include <drm/drm.h>
+#include <drm/drm_print.h>
#include "mid_bios.h"
#include "psb_drv.h"
@@ -279,6 +280,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
0, PCI_DEVFN(2, 0));
int ret = -1;
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
/* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 4d78b33eaa82..e6753282e70e 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -730,44 +730,3 @@ out:
return ret;
}
-
-int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn)
-{
- int ret;
- struct psb_mmu_pt *pt;
- uint32_t tmp;
- spinlock_t *lock = &pd->driver->lock;
-
- down_read(&pd->driver->sem);
- pt = psb_mmu_pt_map_lock(pd, virtual);
- if (!pt) {
- uint32_t *v;
-
- spin_lock(lock);
- v = kmap_atomic(pd->p);
- tmp = v[psb_mmu_pd_index(virtual)];
- kunmap_atomic(v);
- spin_unlock(lock);
-
- if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
- !(pd->invalid_pte & PSB_PTE_VALID)) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *pfn = pd->invalid_pte >> PAGE_SHIFT;
- goto out;
- }
- tmp = pt->v[psb_mmu_pt_index(virtual)];
- if (!(tmp & PSB_PTE_VALID)) {
- ret = -EINVAL;
- } else {
- ret = 0;
- *pfn = tmp >> PAGE_SHIFT;
- }
- psb_mmu_pt_unmap_unlock(pt);
-out:
- up_read(&pd->driver->sem);
- return ret;
-}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
index d4b5720ef08e..e6d39703718c 100644
--- a/drivers/gpu/drm/gma500/mmu.h
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -71,8 +71,6 @@ extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
-extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn);
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index de8ccfe9890f..086d14678a8e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -10,6 +10,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include "framebuffer.h"
#include "gem.h"
@@ -658,10 +659,3 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
-
-/* Not used yet */
-const struct gma_clock_funcs mrst_clock_funcs = {
- .clock = mrst_lvds_clock,
- .limit = mrst_limit,
- .pll_is_valid = gma_pll_is_valid,
-};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index ed8626c73541..20d027d552c7 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
@@ -514,7 +515,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
@@ -726,8 +727,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
if (hdmi_dev) {
pdev = hdmi_dev->dev;
- pci_set_drvdata(pdev, NULL);
oaktrail_hdmi_i2c_exit(pdev);
+ pci_set_drvdata(pdev, NULL);
iounmap(hdmi_dev->regs);
kfree(hdmi_dev);
pci_dev_put(pdev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 6daa6669ed23..48e8ac560a2a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -30,6 +30,9 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+
+#include <drm/drm_print.h>
+
#include "psb_drv.h"
#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 72191d6f0d06..0705ba3813e6 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -13,6 +13,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 0c271072af63..5f0daa25b86d 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -22,6 +22,9 @@
*
*/
#include <linux/acpi.h>
+
+#include <drm/drm_print.h>
+
#include "psb_drv.h"
#include "psb_irq.h"
#include "psb_intel_reg.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 85d3557c2eb9..005ab7f5355f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_pciids.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 7f77cb2b2751..0b27112ec46f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -594,6 +594,7 @@ extern void psb_modeset_cleanup(struct drm_device *dev);
/* framebuffer */
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index ff46e88c4768..1ff2bd23db74 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -11,6 +11,7 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include "framebuffer.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 2499fd6a80c9..979ea8ecf0d5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -182,7 +182,6 @@ struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
void gma_i2c_destroy(struct gma_i2c_chan *chan);
int psb_intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter);
-extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev);
@@ -212,7 +211,7 @@ extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 138f153d38ba..f8f3c42e67a7 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
@@ -331,7 +332,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
}
enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 8be0ec340de5..45b10f30a2a9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -12,37 +12,6 @@
#include "psb_intel_drv.h"
/**
- * psb_intel_ddc_probe
- * @adapter: Associated I2C adaptor
- */
-bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
-{
- u8 out_buf[] = { 0x0, 0x0 };
- u8 buf[2];
- int ret;
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- ret = i2c_transfer(adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
-}
-
-/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: Associated I2C adaptor
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 8dafff963ca8..553e7c7d9bb8 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -36,6 +36,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
@@ -1159,7 +1160,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 7bbb79b0497d..3a946b472064 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -9,6 +9,7 @@
**************************************************************************/
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "power.h"
@@ -249,6 +250,7 @@ static irqreturn_t gma_irq_handler(int irq, void *arg)
void gma_irq_preinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct drm_crtc *crtc;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -259,10 +261,15 @@ void gma_irq_preinstall(struct drm_device *dev)
PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
- if (dev->vblank[0].enabled)
- dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
- if (dev->vblank[1].enabled)
- dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+
+ if (vblank->enabled) {
+ u32 mask = drm_crtc_index(crtc) ? _PSB_VSYNC_PIPEB_FLAG :
+ _PSB_VSYNC_PIPEA_FLAG;
+ dev_priv->vdc_irq_mask |= mask;
+ }
+ }
/* Revisit this area - want per device masks ? */
if (dev_priv->ops->hotplug)
@@ -277,8 +284,8 @@ void gma_irq_preinstall(struct drm_device *dev)
void gma_irq_postinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct drm_crtc *crtc;
unsigned long irqflags;
- unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -291,11 +298,13 @@ void gma_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- for (i = 0; i < dev->num_crtcs; ++i) {
- if (dev->vblank[i].enabled)
- gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+
+ if (vblank->enabled)
+ gma_enable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);
else
- gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);
}
if (dev_priv->ops->hotplug_enable)
@@ -336,8 +345,8 @@ void gma_irq_uninstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct drm_crtc *crtc;
unsigned long irqflags;
- unsigned int i;
if (!dev_priv->irq_enabled)
return;
@@ -349,9 +358,11 @@ void gma_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- for (i = 0; i < dev->num_crtcs; ++i) {
- if (dev->vblank[i].enabled)
- gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+
+ if (vblank->enabled)
+ gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);
}
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index 0f07d77c5d52..1726a3fadff8 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -16,7 +16,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -562,11 +561,11 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn
continue; /* not a DRM property */
property = gud_connector_property_lookup(connector, prop);
- if (WARN_ON(IS_ERR(property)))
+ if (drm_WARN_ON(drm, IS_ERR(property)))
continue;
state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state);
- if (WARN_ON(IS_ERR(state_val)))
+ if (drm_WARN_ON(drm, IS_ERR(state_val)))
continue;
*state_val = val;
@@ -594,7 +593,7 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
unsigned int *state_val;
state_val = gud_connector_tv_state_val(prop, &connector_state->tv);
- if (WARN_ON_ONCE(IS_ERR(state_val)))
+ if (drm_WARN_ON_ONCE(connector_state->connector->dev, IS_ERR(state_val)))
return PTR_ERR(state_val);
val = *state_val;
@@ -607,13 +606,16 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
return gconn->num_properties;
}
+static const struct drm_encoder_funcs gud_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
+};
+
static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
struct gud_connector_descriptor_req *desc)
{
struct drm_device *drm = &gdrm->drm;
struct gud_connector *gconn;
struct drm_connector *connector;
- struct drm_encoder *encoder;
int ret, connector_type;
u32 flags;
@@ -665,7 +667,7 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
return ret;
}
- if (WARN_ON(connector->index != index))
+ if (drm_WARN_ON(drm, connector->index != index))
return -EINVAL;
if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS)
@@ -681,20 +683,13 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
return ret;
}
- /* The first connector is attached to the existing simple pipe encoder */
- if (!connector->index) {
- encoder = &gdrm->pipe.encoder;
- } else {
- encoder = &gconn->encoder;
-
- ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
- if (ret)
- return ret;
-
- encoder->possible_crtcs = 1;
- }
+ gconn->encoder.possible_crtcs = drm_crtc_mask(&gdrm->crtc);
+ ret = drm_encoder_init(drm, &gconn->encoder, &gud_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
- return drm_connector_attach_encoder(connector, encoder);
+ return drm_connector_attach_encoder(connector, &gconn->encoder);
}
int gud_get_connectors(struct gud_device *gdrm)
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index cb405771d6e2..42135a48d92e 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -16,6 +16,7 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -27,7 +28,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -249,7 +249,7 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val)
return gud_usb_set(gdrm, request, 0, &val, sizeof(val));
}
-static int gud_get_properties(struct gud_device *gdrm)
+static int gud_plane_add_properties(struct gud_device *gdrm)
{
struct gud_property_req *properties;
unsigned int i, num_properties;
@@ -289,7 +289,7 @@ static int gud_get_properties(struct gud_device *gdrm)
* but mask out any additions on future devices.
*/
val &= GUD_ROTATION_MASK;
- ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
+ ret = drm_plane_create_rotation_property(&gdrm->plane,
DRM_MODE_ROTATE_0, val);
break;
default:
@@ -309,21 +309,6 @@ out:
return ret;
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
-{
- struct gud_device *gdrm = to_gud_device(drm);
-
- if (!gdrm->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
-}
-
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
@@ -353,10 +338,30 @@ static int gud_stats_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
- .check = gud_pipe_check,
- .update = gud_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS
+static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check
+};
+
+static const struct drm_crtc_funcs gud_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs gud_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = gud_plane_atomic_check,
+ .atomic_update = gud_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs gud_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
@@ -365,7 +370,7 @@ static const struct drm_mode_config_funcs gud_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const u64 gud_pipe_modifiers[] = {
+static const u64 gud_plane_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
@@ -376,7 +381,6 @@ static const struct drm_driver gud_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gud_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = "gud",
@@ -434,6 +438,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
size_t max_buffer_size = 0;
struct gud_device *gdrm;
struct drm_device *drm;
+ struct device *dma_dev;
u8 *formats_dev;
u32 *formats;
int ret, i;
@@ -458,10 +463,6 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
return PTR_ERR(gdrm);
drm = &gdrm->drm;
- drm->mode_config.funcs = &gud_mode_config_funcs;
- ret = drmm_mode_config_init(drm);
- if (ret)
- return ret;
gdrm->flags = le32_to_cpu(desc.flags);
gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4;
@@ -478,11 +479,28 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (ret)
return ret;
+ usb_set_intfdata(intf, gdrm);
+
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ dev_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
+
+ /* Mode config init */
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
drm->mode_config.min_width = le32_to_cpu(desc.min_width);
drm->mode_config.max_width = le32_to_cpu(desc.max_width);
drm->mode_config.min_height = le32_to_cpu(desc.min_height);
drm->mode_config.max_height = le32_to_cpu(desc.max_height);
+ drm->mode_config.funcs = &gud_mode_config_funcs;
+ /* Format init */
formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
/* Add room for emulated XRGB8888 */
formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL);
@@ -582,22 +600,30 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
return -ENOMEM;
}
- ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
- formats, num_formats,
- gud_pipe_modifiers, NULL);
+ /* Pipeline init */
+ ret = drm_universal_plane_init(drm, &gdrm->plane, 0,
+ &gud_plane_funcs,
+ formats, num_formats,
+ gud_plane_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
- devm_kfree(dev, formats);
- devm_kfree(dev, formats_dev);
+ drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&gdrm->plane);
- ret = gud_get_properties(gdrm);
+ ret = gud_plane_add_properties(gdrm);
if (ret) {
- dev_err(dev, "Failed to get properties (error=%d)\n", ret);
+ dev_err(dev, "Failed to add properties (error=%d)\n", ret);
return ret;
}
- drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
+ ret = drm_crtc_init_with_planes(drm, &gdrm->crtc, &gdrm->plane, NULL,
+ &gud_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&gdrm->crtc, &gud_crtc_helper_funcs);
ret = gud_get_connectors(gdrm);
if (ret) {
@@ -606,22 +632,16 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
drm_mode_config_reset(drm);
-
- usb_set_intfdata(intf, gdrm);
-
- gdrm->dmadev = usb_intf_get_dma_device(intf);
- if (!gdrm->dmadev)
- dev_warn(dev, "buffer sharing not supported");
+ drm_kms_helper_poll_init(drm);
drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
ret = drm_dev_register(drm, 0);
- if (ret) {
- put_device(gdrm->dmadev);
+ if (ret)
return ret;
- }
- drm_kms_helper_poll_init(drm);
+ devm_kfree(dev, formats);
+ devm_kfree(dev, formats_dev);
drm_client_setup(drm, NULL);
@@ -633,13 +653,9 @@ static void gud_disconnect(struct usb_interface *interface)
struct gud_device *gdrm = usb_get_intfdata(interface);
struct drm_device *drm = &gdrm->drm;
- drm_dbg(drm, "%s:\n", __func__);
-
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
- put_device(gdrm->dmadev);
- gdrm->dmadev = NULL;
}
static int gud_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 0d148a6f27aa..d27c31648341 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -11,12 +11,11 @@
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modes.h>
-#include <drm/drm_simple_kms_helper.h>
struct gud_device {
struct drm_device drm;
- struct drm_simple_display_pipe pipe;
- struct device *dmadev;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
@@ -63,11 +62,10 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val);
void gud_clear_damage(struct gud_device *gdrm);
void gud_flush_work(struct work_struct *work);
-int gud_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_plane_state,
- struct drm_crtc_state *new_crtc_state);
-void gud_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state);
+int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state);
int gud_connector_fill_properties(struct drm_connector_state *connector_state,
struct gud_property_req *properties);
int gud_get_connectors(struct gud_device *gdrm);
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index e163649816d5..76d77a736d84 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -20,7 +20,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -62,7 +61,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
size_t len;
void *buf;
- WARN_ON_ONCE(format->char_per_block[0] != 1);
+ drm_WARN_ON_ONCE(fb->dev, format->char_per_block[0] != 1);
/* Start on a byte boundary */
rect->x1 = ALIGN_DOWN(rect->x1, block_width);
@@ -70,7 +69,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
height = drm_rect_height(rect);
len = drm_format_info_min_pitch(format, 0, width) * height;
- buf = kmalloc(width * height, GFP_KERNEL);
+ buf = kmalloc_array(height, width, GFP_KERNEL);
if (!buf)
return 0;
@@ -139,7 +138,7 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
break;
default:
- WARN_ON_ONCE(1);
+ drm_WARN_ON_ONCE(fb->dev, 1);
return len;
}
@@ -188,8 +187,13 @@ retry:
} else if (format->format == DRM_FORMAT_RGB332) {
drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state,
- gud_is_big_endian());
+ if (gud_is_big_endian()) {
+ drm_fb_xrgb8888_to_rgb565be(&dst, NULL, src, fb, rect,
+ fmtcnv_state);
+ } else {
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
+ fmtcnv_state);
+ }
} else if (format->format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state);
} else {
@@ -234,7 +238,7 @@ struct gud_usb_bulk_context {
static void gud_usb_bulk_timeout(struct timer_list *t)
{
- struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
+ struct gud_usb_bulk_context *ctx = timer_container_of(ctx, t, timer);
usb_sg_cancel(&ctx->sgr);
}
@@ -254,14 +258,14 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
usb_sg_wait(&ctx.sgr);
- if (!del_timer_sync(&ctx.timer))
+ if (!timer_delete_sync(&ctx.timer))
ret = -ETIMEDOUT;
else if (ctx.sgr.status < 0)
ret = ctx.sgr.status;
else if (ctx.sgr.bytes != len)
ret = -EIO;
- destroy_timer_on_stack(&ctx.timer);
+ timer_destroy_on_stack(&ctx.timer);
return ret;
}
@@ -446,14 +450,15 @@ static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer
gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
}
-int gud_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_plane_state,
- struct drm_crtc_state *new_crtc_state)
+int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
- struct drm_plane_state *old_plane_state = pipe->plane.state;
- const struct drm_display_mode *mode = &new_crtc_state->mode;
- struct drm_atomic_state *state = new_plane_state->state;
+ struct gud_device *gdrm = to_gud_device(plane->dev);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
@@ -464,20 +469,37 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
int idx, ret;
size_t len;
- if (WARN_ON_ONCE(!fb))
+ if (drm_WARN_ON_ONCE(plane->dev, !fb))
+ return -EINVAL;
+
+ if (drm_WARN_ON_ONCE(plane->dev, !crtc))
return -EINVAL;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ mode = &crtc_state->mode;
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!new_plane_state->visible)
+ return 0;
+
if (old_plane_state->rotation != new_plane_state->rotation)
- new_crtc_state->mode_changed = true;
+ crtc_state->mode_changed = true;
if (old_fb && old_fb->format != format)
- new_crtc_state->mode_changed = true;
+ crtc_state->mode_changed = true;
- if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
return 0;
/* Only one connector is supported */
- if (hweight32(new_crtc_state->connector_mask) != 1)
+ if (hweight32(crtc_state->connector_mask) != 1)
return -EINVAL;
if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
@@ -495,7 +517,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
if (!connector_state) {
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
+ drm_connector_list_iter_begin(plane->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->crtc) {
connector_state = connector->state;
@@ -505,7 +527,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
drm_connector_list_iter_end(&conn_iter);
}
- if (WARN_ON_ONCE(!connector_state))
+ if (drm_WARN_ON_ONCE(plane->dev, !connector_state))
return -ENOENT;
len = struct_size(req, properties,
@@ -517,7 +539,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
gud_from_display_mode(&req->mode, mode);
req->format = gud_from_fourcc(format->format);
- if (WARN_ON_ONCE(!req->format)) {
+ if (drm_WARN_ON_ONCE(plane->dev, !req->format)) {
ret = -EINVAL;
goto out;
}
@@ -539,7 +561,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
val = new_plane_state->rotation;
break;
default:
- WARN_ON_ONCE(1);
+ drm_WARN_ON_ONCE(plane->dev, 1);
ret = -EINVAL;
goto out;
}
@@ -562,16 +584,18 @@ out:
return ret;
}
-void gud_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state)
{
- struct drm_device *drm = pipe->crtc.dev;
+ struct drm_device *drm = plane->dev;
struct gud_device *gdrm = to_gud_device(drm);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_framebuffer *fb = state->fb;
- struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(atomic_state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(atomic_state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_crtc *crtc = new_state->crtc;
struct drm_rect damage;
+ struct drm_atomic_helper_damage_iter iter;
int ret, idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
@@ -606,7 +630,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
if (ret)
goto ctrl_disable;
- if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage)
gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 93b8d32e3be1..d1f3f5793f34 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,8 +2,9 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI
- depends on MMU
select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 95a4ed599d98..1f65c683282f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
- dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
+ dp/dp_aux.o dp/dp_link.o dp/dp_hw.o dp/dp_serdes.o hibmc_drm_dp.o \
+ hibmc_drm_debugfs.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
index 0a903cce1fa9..8732cd1d8cb6 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
@@ -8,6 +8,7 @@
#include <drm/drm_print.h>
#include "dp_comm.h"
#include "dp_reg.h"
+#include "dp_hw.h"
#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
@@ -124,7 +125,8 @@ static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_ms
/* ret >= 0 ,ret is size; ret < 0, ret is err code */
static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
- struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
+ struct hibmc_dp *dp_priv = container_of(aux, struct hibmc_dp, aux);
+ struct hibmc_dp_dev *dp = dp_priv->dp_dev;
u32 aux_cmd;
int ret;
u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
@@ -151,14 +153,16 @@ static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *
return hibmc_dp_aux_parse_xfer(dp, msg);
}
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
+void hibmc_dp_aux_init(struct hibmc_dp *dp)
{
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
HIBMC_DP_MIN_PULSE_NUM);
dp->aux.transfer = hibmc_dp_aux_xfer;
- dp->aux.is_remote = 0;
+ dp->aux.name = "HIBMC DRM dp aux";
+ dp->aux.drm_dev = dp->drm_dev;
drm_dp_aux_init(&dp->aux);
+ dp->dp_dev->aux = &dp->aux;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
index 2c52a4476c4d..4add05c7f161 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
@@ -13,6 +13,8 @@
#include <linux/io.h>
#include <drm/display/drm_dp_helper.h>
+#include "dp_hw.h"
+
#define HIBMC_DP_LANE_NUM_MAX 2
struct hibmc_link_status {
@@ -32,12 +34,13 @@ struct hibmc_dp_link {
};
struct hibmc_dp_dev {
- struct drm_dp_aux aux;
+ struct drm_dp_aux *aux;
struct drm_device *dev;
void __iomem *base;
struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
struct hibmc_dp_link link;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ void __iomem *serdes_base;
};
#define dp_field_modify(reg_value, mask, val) \
@@ -57,7 +60,10 @@ struct hibmc_dp_dev {
mutex_unlock(&_dp->lock); \
} while (0)
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
+void hibmc_dp_aux_init(struct hibmc_dp *dp);
int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX]);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
index 74dd9956144e..08f9e1caf7fc 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
@@ -15,5 +15,7 @@
#define HIBMC_DP_CLK_EN 0x7
#define HIBMC_DP_SYNC_EN_MASK 0x3
#define HIBMC_DP_LINK_RATE_CAL 27
+#define HIBMC_DP_SYNC_DELAY(lanes) ((lanes) == 0x2 ? 86 : 46)
+#define HIBMC_DP_INT_ENABLE 0xc
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
index a8d543881c09..8f0daec7d174 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
@@ -72,6 +72,9 @@ static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *m
HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION,
+ HIBMC_DP_SYNC_DELAY(dp->link.cap.lanes));
}
static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
@@ -151,6 +154,7 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
{
struct drm_device *drm_dev = dp->drm_dev;
struct hibmc_dp_dev *dp_dev;
+ int ret;
dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
if (!dp_dev)
@@ -163,10 +167,14 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
dp_dev->dev = drm_dev;
dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
- hibmc_dp_aux_init(dp_dev);
+ hibmc_dp_aux_init(dp);
+
+ ret = hibmc_dp_serdes_init(dp_dev);
+ if (ret)
+ return ret;
dp_dev->link.cap.lanes = 0x2;
- dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
+ dp_dev->link.cap.link_rate = DP_LINK_BW_8_1;
/* hdcp data */
writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
@@ -181,6 +189,36 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
return 0;
}
+void hibmc_dp_enable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+}
+
+void hibmc_dp_disable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+}
+
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, 0x9);
+ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
+ writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
+}
+
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
{
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
@@ -218,3 +256,52 @@ int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
return 0;
}
+
+void hibmc_dp_reset_link(struct hibmc_dp *dp)
+{
+ dp->dp_dev->link.status.clock_recovered = false;
+ dp->dp_dev->link.status.channel_equalized = false;
+}
+
+static const struct hibmc_dp_color_raw g_rgb_raw[] = {
+ {CBAR_COLOR_BAR, 0x000, 0x000, 0x000},
+ {CBAR_WHITE, 0xfff, 0xfff, 0xfff},
+ {CBAR_RED, 0xfff, 0x000, 0x000},
+ {CBAR_ORANGE, 0xfff, 0x800, 0x000},
+ {CBAR_YELLOW, 0xfff, 0xfff, 0x000},
+ {CBAR_GREEN, 0x000, 0xfff, 0x000},
+ {CBAR_CYAN, 0x000, 0x800, 0x800},
+ {CBAR_BLUE, 0x000, 0x000, 0xfff},
+ {CBAR_PURPLE, 0x800, 0x000, 0x800},
+ {CBAR_BLACK, 0x000, 0x000, 0x000},
+};
+
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+ struct hibmc_dp_color_raw raw_data;
+
+ if (cfg->enable) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(9),
+ cfg->self_timing);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(8, 1),
+ cfg->dynamic_rate);
+ if (cfg->pattern == CBAR_COLOR_BAR) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 0);
+ } else {
+ raw_data = g_rgb_raw[cfg->pattern];
+ drm_dbg_dp(dp->drm_dev, "r:%x g:%x b:%x\n", raw_data.r_value,
+ raw_data.g_value, raw_data.b_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 1);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(23, 12),
+ raw_data.r_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(23, 12),
+ raw_data.g_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(11, 0),
+ raw_data.b_value);
+ }
+ }
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(0), cfg->enable);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
index 4dc13b3d9875..665f5b166dfb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
@@ -10,19 +10,55 @@
#include <drm/drm_encoder.h>
#include <drm/drm_connector.h>
#include <drm/drm_print.h>
+#include <drm/display/drm_dp_helper.h>
struct hibmc_dp_dev;
+enum hibmc_dp_cbar_pattern {
+ CBAR_COLOR_BAR,
+ CBAR_WHITE,
+ CBAR_RED,
+ CBAR_ORANGE,
+ CBAR_YELLOW,
+ CBAR_GREEN,
+ CBAR_CYAN,
+ CBAR_BLUE,
+ CBAR_PURPLE,
+ CBAR_BLACK,
+};
+
+struct hibmc_dp_color_raw {
+ enum hibmc_dp_cbar_pattern pattern;
+ u32 r_value;
+ u32 g_value;
+ u32 b_value;
+};
+
+struct hibmc_dp_cbar_cfg {
+ u8 enable;
+ u8 self_timing;
+ u8 dynamic_rate; /* 0:static, 1-255(frame):dynamic */
+ enum hibmc_dp_cbar_pattern pattern;
+};
+
struct hibmc_dp {
struct hibmc_dp_dev *dp_dev;
struct drm_device *drm_dev;
struct drm_encoder encoder;
struct drm_connector connector;
void __iomem *mmio;
+ struct drm_dp_aux aux;
+ struct hibmc_dp_cbar_cfg cfg;
+ u32 irq_status;
};
int hibmc_dp_hw_init(struct hibmc_dp *dp);
int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg);
+void hibmc_dp_reset_link(struct hibmc_dp *dp);
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp);
+void hibmc_dp_enable_int(struct hibmc_dp *dp);
+void hibmc_dp_disable_int(struct hibmc_dp *dp);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
index f6355c16cc0a..0726cb5b736e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -9,6 +9,22 @@
#define HIBMC_EQ_MAX_RETRY 5
+static inline int hibmc_dp_get_serdes_rate_cfg(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.link_rate) {
+ case DP_LINK_BW_1_62:
+ return DP_SERDES_BW_1_62;
+ case DP_LINK_BW_2_7:
+ return DP_SERDES_BW_2_7;
+ case DP_LINK_BW_5_4:
+ return DP_SERDES_BW_5_4;
+ case DP_LINK_BW_8_1:
+ return DP_SERDES_BW_8_1;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
{
u8 buf[2];
@@ -26,7 +42,7 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set rate and lane count */
buf[0] = dp->link.cap.link_rate;
buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
- ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
@@ -35,17 +51,13 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set 8b/10b and downspread */
buf[0] = DP_SPREAD_AMP_0_5;
buf[1] = DP_SET_ANSI_8B10B;
- ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
}
- ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
- if (ret)
- drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
-
- return ret;
+ return 0;
}
static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
@@ -84,7 +96,7 @@ static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -108,9 +120,13 @@ static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
return ret;
for (i = 0; i < dp->link.cap.lanes; i++)
- train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -137,21 +153,29 @@ static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
return false;
}
-static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
+static int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
{
+ int ret;
+
switch (dp->link.cap.link_rate) {
case DP_LINK_BW_2_7:
dp->link.cap.link_rate = DP_LINK_BW_1_62;
- return 0;
+ break;
case DP_LINK_BW_5_4:
dp->link.cap.link_rate = DP_LINK_BW_2_7;
- return 0;
+ break;
case DP_LINK_BW_8_1:
dp->link.cap.link_rate = DP_LINK_BW_5_4;
- return 0;
+ break;
default:
return -EINVAL;
}
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ return hibmc_dp_serdes_rate_switch(ret, dp);
}
static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
@@ -159,6 +183,7 @@ static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
switch (dp->link.cap.lanes) {
case 0x2:
dp->link.cap.lanes--;
+ drm_dbg_dp(dp->dev, "dp link training reduce to 1 lane\n");
break;
case 0x1:
drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
@@ -185,10 +210,10 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
voltage_tries = 1;
for (cr_tries = 0; cr_tries < 80; cr_tries++) {
- drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_clock_recovery_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "Get lane status failed\n");
return ret;
}
@@ -206,7 +231,12 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
}
level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -233,10 +263,10 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
return ret;
for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
- drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "get lane status failed\n");
break;
}
@@ -255,7 +285,12 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
}
hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET,
dp->link.train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -290,11 +325,36 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
return hibmc_dp_link_reduce_rate(dp);
}
+static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp)
+{
+ dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
+ if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate)
+ dp->link.cap.link_rate = DP_LINK_BW_8_1;
+
+ dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+ if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX)
+ dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX;
+}
+
int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
{
struct hibmc_dp_link *link = &dp->link;
int ret;
+ ret = drm_dp_read_dpcd_caps(dp->aux, dp->dpcd);
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+ hibmc_dp_update_caps(dp);
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ ret = hibmc_dp_serdes_rate_switch(ret, dp);
+ if (ret)
+ return ret;
+
while (true) {
ret = hibmc_dp_link_training_cr_pre(dp);
if (ret)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
index 4a515c726d52..394b1e933c3a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
@@ -5,72 +5,128 @@
#define DP_REG_H
#define HIBMC_DP_AUX_CMD_ADDR 0x50
+
#define HIBMC_DP_AUX_WR_DATA0 0x54
#define HIBMC_DP_AUX_WR_DATA1 0x58
#define HIBMC_DP_AUX_WR_DATA2 0x5c
#define HIBMC_DP_AUX_WR_DATA3 0x60
#define HIBMC_DP_AUX_RD_DATA0 0x64
+
#define HIBMC_DP_AUX_REQ 0x74
+#define HIBMC_DP_CFG_AUX_REQ BIT(0)
+#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
+#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
+#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
+
#define HIBMC_DP_AUX_STATUS 0x78
+#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
+#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
+#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
+#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
+
#define HIBMC_DP_PHYIF_CTRL0 0xa0
+#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
+#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
+#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
+
#define HIBMC_DP_VIDEO_CTRL 0x100
+#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
+#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
+#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
+#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
+#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
+
#define HIBMC_DP_VIDEO_CONFIG0 0x104
+#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG1 0x108
+#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG2 0x10c
+#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG3 0x110
+#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_VIDEO_PACKET 0x114
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
+#define HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION GENMASK(31, 20)
+
#define HIBMC_DP_VIDEO_MSA0 0x118
+#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_MSA1 0x11c
#define HIBMC_DP_VIDEO_MSA2 0x120
+
#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
+#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+
+#define HIBMC_DP_COLOR_BAR_CTRL 0x260
+#define HIBMC_DP_COLOR_BAR_CTRL1 0x264
+
#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
+#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
+#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
+#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_HDCP_CFG 0x600
+
#define HIBMC_DP_DPTX_RST_CTRL 0x700
+#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
+
#define HIBMC_DP_DPTX_CLK_CTRL 0x704
+
#define HIBMC_DP_DPTX_GCTL0 0x708
+#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
+
#define HIBMC_DP_INTR_ENABLE 0x720
#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
+
#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
+#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
+
#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
-#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
-#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
-#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
-#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
-#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
-#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
-#define HIBMC_DP_CFG_AUX_REQ BIT(0)
-#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
-#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
-#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
-#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
-#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
-#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
-#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
-#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
-#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
-#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
-#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
-#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
-#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+#define HIBMC_DP_INTSTAT 0x1e0724
+#define HIBMC_DP_INTCLR 0x1e0728
+
+/* dp serdes reg */
+#define HIBMC_DP_HOST_OFFSET 0x10000
+#define HIBMC_DP_LANE0_RATE_OFFSET 0x4
+#define HIBMC_DP_LANE1_RATE_OFFSET 0xc
+#define HIBMC_DP_LANE_STATUS_OFFSET 0x10
+#define HIBMC_DP_PMA_LANE0_OFFSET 0x18
+#define HIBMC_DP_PMA_LANE1_OFFSET 0x1c
+#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
+#define HIBMC_DP_PMA_TXDEEMPH GENMASK(18, 1)
+#define DP_SERDES_DONE 0x3
+
+/* dp serdes TX-Deempth Configuration */
+#define DP_SERDES_VOL0_PRE0 0x280
+#define DP_SERDES_VOL0_PRE1 0x2300
+#define DP_SERDES_VOL0_PRE2 0x53c0
+#define DP_SERDES_VOL0_PRE3 0x8400
+#define DP_SERDES_VOL1_PRE0 0x380
+#define DP_SERDES_VOL1_PRE1 0x3440
+#define DP_SERDES_VOL1_PRE2 0x6480
+#define DP_SERDES_VOL2_PRE0 0x4c1
+#define DP_SERDES_VOL2_PRE1 0x4500
+#define DP_SERDES_VOL3_PRE0 0x600
+#define DP_SERDES_BW_8_1 0x3
+#define DP_SERDES_BW_5_4 0x2
+#define DP_SERDES_BW_2_7 0x1
+#define DP_SERDES_BW_1_62 0x0
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
new file mode 100644
index 000000000000..676059d4c1e6
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_config.h"
+#include "dp_reg.h"
+
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX])
+{
+ static const u32 serdes_tx_cfg[4][4] = { {DP_SERDES_VOL0_PRE0, DP_SERDES_VOL0_PRE1,
+ DP_SERDES_VOL0_PRE2, DP_SERDES_VOL0_PRE3},
+ {DP_SERDES_VOL1_PRE0, DP_SERDES_VOL1_PRE1,
+ DP_SERDES_VOL1_PRE2}, {DP_SERDES_VOL2_PRE0,
+ DP_SERDES_VOL2_PRE1}, {DP_SERDES_VOL3_PRE0}};
+ int cfg[2];
+ int i;
+
+ for (i = 0; i < HIBMC_DP_LANE_NUM_MAX; i++) {
+ cfg[i] = serdes_tx_cfg[FIELD_GET(DP_TRAIN_VOLTAGE_SWING_MASK, train_set[i])]
+ [FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, train_set[i])];
+ if (!cfg[i])
+ return -EINVAL;
+
+ /* lane1 offset is 4 */
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, cfg[i]),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET + i * 4);
+ }
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes cfg failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp)
+{
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE0_RATE_OFFSET);
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE1_RATE_OFFSET);
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes rate switching failed\n");
+ return -EAGAIN;
+ }
+
+ if (rate < DP_SERDES_BW_8_1)
+ drm_dbg_dp(dp->dev, "reducing serdes rate to :%d\n",
+ rate ? rate * HIBMC_DP_LINK_RATE_CAL * 10 : 162);
+
+ return 0;
+}
+
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp)
+{
+ dp->serdes_base = dp->base + HIBMC_DP_HOST_OFFSET;
+
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET);
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE1_OFFSET);
+
+ return hibmc_dp_serdes_rate_switch(DP_SERDES_BW_8_1, dp);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
new file mode 100644
index 000000000000..f585387c3a49
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
+
+#include "hibmc_drm_drv.h"
+
+#define MAX_BUF_SIZE 12
+
+static ssize_t hibmc_control_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hibmc_drm_private *priv = file_inode(file)->i_private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int ret, idx;
+ u8 buf[MAX_BUF_SIZE];
+
+ if (count >= MAX_BUF_SIZE)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ /* Only 4 parameters is allowed, the ranger are as follow:
+ * [0] enable/disable colorbar feature
+ 0: enable colorbar, 1: disable colorbar
+ * [1] the timing source of colorbar displaying
+ 0: timing follows XDP, 1: internal self timing
+ * [2] the movment of colorbar displaying
+ 0: static colorbar image,
+ * 1~255: right shifting a type of color per (1~255)frames
+ * [3] the color type of colorbar displaying
+ 0~9: color bar, white, red, orange,
+ * yellow, green, cyan, bule, pupper, black
+ */
+ if (sscanf(buf, "%hhu %hhu %hhu %u", &cfg->enable, &cfg->self_timing,
+ &cfg->dynamic_rate, &cfg->pattern) != 4) {
+ return -EINVAL;
+ }
+
+ if (cfg->pattern > 9 || cfg->enable > 1 || cfg->self_timing > 1)
+ return -EINVAL;
+
+ ret = drm_dev_enter(&priv->dev, &idx);
+ if (!ret)
+ return -ENODEV;
+
+ hibmc_dp_set_cbar(&priv->dp, cfg);
+
+ drm_dev_exit(idx);
+
+ return count;
+}
+
+static int hibmc_dp_dbgfs_show(struct seq_file *m, void *arg)
+{
+ struct hibmc_drm_private *priv = m->private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int idx;
+
+ if (!drm_dev_enter(&priv->dev, &idx))
+ return -ENODEV;
+
+ seq_printf(m, "hibmc dp colorbar cfg: %u %u %u %u\n", cfg->enable, cfg->self_timing,
+ cfg->dynamic_rate, cfg->pattern);
+
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static int hibmc_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hibmc_dp_dbgfs_show, inode->i_private);
+}
+
+static const struct file_operations hibmc_dbg_fops = {
+ .owner = THIS_MODULE,
+ .write = hibmc_control_write,
+ .read = seq_read,
+ .open = hibmc_open,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root)
+{
+ struct drm_device *dev = connector->dev;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+
+ /* create the file in drm directory, so we don't need to remove manually */
+ debugfs_create_file("colorbar-cfg", 0200,
+ root, priv, &hibmc_dbg_fops);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
index 603d6b198a54..d06832e62e96 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
@@ -13,27 +13,64 @@
#include "hibmc_drm_drv.h"
#include "dp/dp_hw.h"
+#define DP_MASKED_SINK_HPD_PLUG_INT BIT(2)
+
static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
{
+ const struct drm_edid *drm_edid;
int count;
- count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
- connector->dev->mode_config.max_height);
- drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
+
+ drm_edid_free(drm_edid);
return count;
}
+static int hibmc_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ mdelay(200);
+
+ return drm_connector_helper_detect_from_ddc(connector, ctx, force);
+}
+
static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
.get_modes = hibmc_dp_connector_get_modes,
+ .detect_ctx = hibmc_dp_detect,
};
+static int hibmc_dp_late_register(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ hibmc_dp_enable_int(dp);
+
+ return drm_dp_aux_register(&dp->aux);
+}
+
+static void hibmc_dp_early_unregister(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ drm_dp_aux_unregister(&dp->aux);
+
+ hibmc_dp_disable_int(dp);
+}
+
static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = hibmc_dp_late_register,
+ .early_unregister = hibmc_dp_early_unregister,
+ .debugfs_init = hibmc_debugfs_init,
};
static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
@@ -74,6 +111,31 @@ static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
.atomic_disable = hibmc_dp_encoder_disable,
};
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ if (priv->dp.irq_status & DP_MASKED_SINK_HPD_PLUG_INT) {
+ drm_dbg_dp(&priv->dev, "HPD IN isr occur!\n");
+ hibmc_dp_hpd_cfg(&priv->dp);
+ } else {
+ drm_dbg_dp(&priv->dev, "HPD OUT isr occur!\n");
+ hibmc_dp_reset_link(&priv->dp);
+ }
+
+ if (dev->registered)
+ drm_connector_helper_hpd_irq_event(&priv->dp.connector);
+
+ drm_dev_exit(idx);
+
+ return IRQ_HANDLED;
+}
+
int hibmc_dp_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
@@ -103,8 +165,8 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
- ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init_with_ddc(dev, connector, &hibmc_dp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort, &dp->aux.ddc);
if (ret) {
drm_err(dev, "init dp connector failed: %d\n", ret);
return ret;
@@ -114,5 +176,7 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index e6de6d5edf6b..289304500ab0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -28,12 +28,12 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
-#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
-#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
+#include "dp/dp_reg.h"
DEFINE_DRM_GEM_FOPS(hibmc_fops);
+static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" };
+
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
@@ -51,6 +51,22 @@ static irqreturn_t hibmc_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t hibmc_dp_interrupt(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ u32 status;
+
+ status = readl(priv->mmio + HIBMC_DP_INTSTAT);
+ if (status) {
+ priv->dp.irq_status = status;
+ writel(status, priv->mmio + HIBMC_DP_INTCLR);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_HANDLED;
+}
+
static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -99,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = {
static int hibmc_kms_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
+ struct drm_encoder *encoder;
+ u32 clone_mask = 0;
int ret;
ret = drmm_mode_config_init(dev);
@@ -121,9 +139,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
- /* if DP existed, init DP */
- if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
- HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
+ /*
+ * If the serdes reg is readable and is not equal to 0,
+ * DP block exists and initializes it.
+ */
+ ret = readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL);
+ if (ret) {
ret = hibmc_dp_init(priv);
if (ret)
drm_err(dev, "failed to init dp: %d\n", ret);
@@ -135,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
+ drm_for_each_encoder(encoder, dev)
+ clone_mask |= drm_encoder_mask(encoder);
+
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_clones = clone_mask;
+
return 0;
}
@@ -250,15 +277,44 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
return 0;
}
-static int hibmc_unload(struct drm_device *dev)
+static void hibmc_unload(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
drm_atomic_helper_shutdown(dev);
+}
+
+static int hibmc_msi_init(struct drm_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int valid_irq_num;
+ int irq;
+ int ret;
- free_irq(pdev->irq, dev);
+ ret = pci_alloc_irq_vectors(pdev, HIBMC_MIN_VECTORS,
+ HIBMC_MAX_VECTORS, PCI_IRQ_MSI);
+ if (ret < 0) {
+ drm_err(dev, "enabling MSI failed: %d\n", ret);
+ return ret;
+ }
- pci_disable_msi(to_pci_dev(dev->dev));
+ valid_irq_num = ret;
+
+ for (int i = 0; i < valid_irq_num; i++) {
+ irq = pci_irq_vector(pdev, i);
+
+ if (i)
+ /* PCI devices require shared interrupts. */
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ hibmc_dp_interrupt,
+ hibmc_dp_hpd_isr,
+ IRQF_SHARED, g_irqs_names_map[i], dev);
+ else
+ ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
+ IRQF_SHARED, g_irqs_names_map[i], dev);
+ if (ret) {
+ drm_err(dev, "install irq failed: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
@@ -271,13 +327,13 @@ static int hibmc_load(struct drm_device *dev)
ret = hibmc_hw_init(priv);
if (ret)
- goto err;
+ return ret;
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
- goto err;
+ return ret;
}
ret = hibmc_kms_init(priv);
@@ -290,15 +346,10 @@ static int hibmc_load(struct drm_device *dev)
goto err;
}
- ret = pci_enable_msi(pdev);
+ ret = hibmc_msi_init(dev);
if (ret) {
- drm_warn(dev, "enabling MSI failed: %d\n", ret);
- } else {
- /* PCI devices require shared interrupts. */
- ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED,
- dev->driver->name, dev);
- if (ret)
- drm_warn(dev, "install irq failed: %d\n", ret);
+ drm_err(dev, "hibmc msi init failed, ret:%d\n", ret);
+ goto err;
}
/* reset all the states of crtc/plane/encoder/connector */
@@ -374,7 +425,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
static void hibmc_pci_shutdown(struct pci_dev *pdev)
{
- drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+ hibmc_pci_remove(pdev);
}
static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index d982f1e4b958..ca8502e2760c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -22,6 +22,9 @@
#include "dp/dp_hw.h"
+#define HIBMC_MIN_VECTORS 1
+#define HIBMC_MAX_VECTORS 2
+
struct hibmc_vdac {
struct drm_device *dev;
struct drm_encoder encoder;
@@ -47,6 +50,11 @@ static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
return container_of(connector, struct hibmc_vdac, connector);
}
+static inline struct hibmc_dp *to_hibmc_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct hibmc_dp, connector);
+}
+
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
return container_of(dev, struct hibmc_drm_private, dev);
@@ -61,7 +69,12 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
+void hibmc_ddc_del(struct hibmc_vdac *vdac);
int hibmc_dp_init(struct hibmc_drm_private *priv);
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root);
+
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg);
+
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 99b3b77b5445..44860011855e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
return i2c_bit_add_bus(&vdac->adapter);
}
+
+void hibmc_ddc_del(struct hibmc_vdac *vdac)
+{
+ i2c_del_adapter(&vdac->adapter);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 05e19ea4c9f9..841e81f47b68 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -53,13 +53,14 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
{
struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
- i2c_del_adapter(&vdac->adapter);
+ hibmc_ddc_del(vdac);
drm_connector_cleanup(connector);
}
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -109,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
- return ret;
+ goto err;
}
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
@@ -120,12 +121,19 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
&vdac->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
- return ret;
+ goto err;
}
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
return 0;
+
+err:
+ hibmc_ddc_del(vdac);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 2eea9fb0e76b..e80debdc4176 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -825,7 +825,6 @@ static const struct component_ops dsi_ops = {
static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
- struct resource *res;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
@@ -833,8 +832,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
return PTR_ERR(ctx->pclk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap dsi io region\n");
return PTR_ERR(ctx->base);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2eb49177ac42..76384b4581bf 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -844,7 +845,6 @@ static struct drm_plane_funcs ade_plane_funcs = {
static void *ade_hw_ctx_alloc(struct platform_device *pdev,
struct drm_crtc *crtc)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct ade_hw_ctx *ctx = NULL;
@@ -856,8 +856,7 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
return ERR_PTR(-ENOMEM);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 1e1c87be1204..8a11c2df5b88 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
index d2d8582b36df..9e776112c03e 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm.h
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -11,7 +11,9 @@
struct hyperv_drm_device {
/* drm */
struct drm_device dev;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
/* mode */
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index f59abfa7622a..06b5d96e6eaf 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "hyperv_drm.h"
@@ -154,6 +155,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
return 0;
err_free_mmio:
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
@@ -172,6 +174,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 6c6b57298797..7978f8c8108c 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -5,6 +5,8 @@
#include <linux/hyperv.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -15,7 +17,11 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "hyperv_drm.h"
@@ -38,18 +44,6 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
return 0;
}
-static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb,
- const struct iosys_map *map)
-{
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
- return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
-}
-
static int hyperv_connector_get_modes(struct drm_connector *connector)
{
struct hyperv_drm_device *hv = to_hv(connector->dev);
@@ -98,30 +92,71 @@ static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
return 0;
}
-static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static const uint32_t hyperv_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t hyperv_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct hyperv_drm_device *hv = to_hv(crtc->dev);
+ struct drm_plane *plane = &hv->plane;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_crtc_state *crtc_state = crtc->state;
hyperv_hide_hw_ptr(hv->hdev);
hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
plane_state->fb->pitches[0]);
- hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->data[0]);
+
+ drm_crtc_vblank_on(crtc);
}
-static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .atomic_flush = drm_crtc_vblank_atomic_flush,
+ .atomic_enable = hyperv_crtc_helper_atomic_enable,
+ .atomic_disable = drm_crtc_vblank_atomic_disable,
+};
+
+static const struct drm_crtc_funcs hyperv_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
+};
+
+static int hyperv_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret;
- if (fb->format->format != DRM_FORMAT_XRGB8888)
- return -EINVAL;
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!plane_state->visible)
+ return 0;
if (fb->pitches[0] * fb->height > hv->fb_size) {
drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n",
@@ -132,53 +167,120 @@ static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
return 0;
}
-static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+static void hyperv_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_rect rect;
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_rect damage;
+ struct drm_rect dst_clip;
+ struct drm_atomic_helper_damage_iter iter;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ dst_clip = new_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ hyperv_blit_to_vram_rect(new_state->fb, &shadow_plane_state->data[0], &damage);
+ hyperv_update_dirt(hv->hdev, &damage);
+ }
+}
- if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
- hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->data[0], &rect);
- hyperv_update_dirt(hv->hdev, &rect);
+static int hyperv_plane_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
}
+ return -ENODEV;
}
-static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
- .enable = hyperv_pipe_enable,
- .check = hyperv_pipe_check,
- .update = hyperv_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static void hyperv_plane_panic_flush(struct drm_plane *plane)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_rect rect;
+
+ if (!plane->state || !plane->state->fb)
+ return;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ hyperv_update_dirt(hv->hdev, &rect);
+}
+
+static const struct drm_plane_helper_funcs hyperv_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = hyperv_plane_atomic_check,
+ .atomic_update = hyperv_plane_atomic_update,
+ .get_scanout_buffer = hyperv_plane_get_scanout_buffer,
+ .panic_flush = hyperv_plane_panic_flush,
};
-static const uint32_t hyperv_formats[] = {
- DRM_FORMAT_XRGB8888,
+static const struct drm_plane_funcs hyperv_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static const uint64_t hyperv_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const struct drm_encoder_funcs hyperv_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
};
static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
{
+ struct drm_device *dev = &hv->dev;
+ struct drm_encoder *encoder = &hv->encoder;
+ struct drm_plane *plane = &hv->plane;
+ struct drm_crtc *crtc = &hv->crtc;
+ struct drm_connector *connector = &hv->connector;
int ret;
- ret = drm_simple_display_pipe_init(&hv->dev,
- &hv->pipe,
- &hyperv_pipe_funcs,
- hyperv_formats,
- ARRAY_SIZE(hyperv_formats),
- hyperv_modifiers,
- &hv->connector);
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &hyperv_plane_funcs,
+ hyperv_formats, ARRAY_SIZE(hyperv_formats),
+ hyperv_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
+ drm_plane_helper_add(plane, &hyperv_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(plane);
- drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &hyperv_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &hyperv_crtc_helper_funcs);
- return 0;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder,
+ &hyperv_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ ret = hyperv_conn_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized connector.\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(connector, encoder);
}
static enum drm_mode_status
@@ -221,18 +323,16 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv)
dev->mode_config.funcs = &hyperv_mode_config_funcs;
- ret = hyperv_conn_init(hv);
- if (ret) {
- drm_err(dev, "Failed to initialized connector.\n");
- return ret;
- }
-
ret = hyperv_pipe_init(hv);
if (ret) {
drm_err(dev, "Failed to initialized pipe.\n");
return ret;
}
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
deleted file mode 100644
index 6f19e1c35e30..000000000000
--- a/drivers/gpu/drm/i2c/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "I2C encoder or helper chips"
- depends on DRM && DRM_KMS_HELPER && I2C
-
-config DRM_I2C_CH7006
- tristate "Chrontel ch7006 TV encoder"
- default m if DRM_NOUVEAU
- help
- Support for Chrontel ch7006 and similar TV encoders, found
- on some nVidia video cards.
-
- This driver is currently only useful if you're also using
- the nouveau driver.
-
-config DRM_I2C_SIL164
- tristate "Silicon Image sil164 TMDS transmitter"
- default m if DRM_NOUVEAU
- help
- Support for sil164 and similar single-link (or dual-link
- when used in pairs) TMDS transmitters, used in some nVidia
- video cards.
-
-config DRM_I2C_NXP_TDA998X
- tristate "NXP Semiconductors TDA998X HDMI encoder"
- default m if DRM_TILCDC
- select CEC_CORE if CEC_NOTIFIER
- select SND_SOC_HDMI_CODEC if SND_SOC
- help
- Support for NXP Semiconductors TDA998X HDMI encoders.
-
-config DRM_I2C_NXP_TDA9950
- tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC"
- select CEC_NOTIFIER
- select CEC_CORE
-
-endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
deleted file mode 100644
index a962f6f08568..000000000000
--- a/drivers/gpu/drm/i2c/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ch7006-y := ch7006_drv.o ch7006_mode.o
-obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
-
-sil164-y := sil164_drv.o
-obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
-
-tda998x-y := tda998x_drv.o
-obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
-obj-$(CONFIG_DRM_I2C_NXP_TDA9950) += tda9950.o
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
deleted file mode 100644
index cbff851e0c85..000000000000
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ /dev/null
@@ -1,507 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * TDA9950 Consumer Electronics Control driver
- *
- * The NXP TDA9950 implements the HDMI Consumer Electronics Control
- * interface. The host interface is similar to a mailbox: the data
- * registers starting at REG_CDR0 are written to send a command to the
- * internal CPU, and replies are read from these registers.
- *
- * As the data registers represent a mailbox, they must be accessed
- * as a single I2C transaction. See the TDA9950 data sheet for details.
- */
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_data/tda9950.h>
-#include <linux/slab.h>
-#include <drm/drm_edid.h>
-#include <media/cec.h>
-#include <media/cec-notifier.h>
-
-enum {
- REG_CSR = 0x00,
- CSR_BUSY = BIT(7),
- CSR_INT = BIT(6),
- CSR_ERR = BIT(5),
-
- REG_CER = 0x01,
-
- REG_CVR = 0x02,
-
- REG_CCR = 0x03,
- CCR_RESET = BIT(7),
- CCR_ON = BIT(6),
-
- REG_ACKH = 0x04,
- REG_ACKL = 0x05,
-
- REG_CCONR = 0x06,
- CCONR_ENABLE_ERROR = BIT(4),
- CCONR_RETRY_MASK = 7,
-
- REG_CDR0 = 0x07,
-
- CDR1_REQ = 0x00,
- CDR1_CNF = 0x01,
- CDR1_IND = 0x81,
- CDR1_ERR = 0x82,
- CDR1_IER = 0x83,
-
- CDR2_CNF_SUCCESS = 0x00,
- CDR2_CNF_OFF_STATE = 0x80,
- CDR2_CNF_BAD_REQ = 0x81,
- CDR2_CNF_CEC_ACCESS = 0x82,
- CDR2_CNF_ARB_ERROR = 0x83,
- CDR2_CNF_BAD_TIMING = 0x84,
- CDR2_CNF_NACK_ADDR = 0x85,
- CDR2_CNF_NACK_DATA = 0x86,
-};
-
-struct tda9950_priv {
- struct i2c_client *client;
- struct device *hdmi;
- struct cec_adapter *adap;
- struct tda9950_glue *glue;
- u16 addresses;
- struct cec_msg rx_msg;
- struct cec_notifier *notify;
- bool open;
-};
-
-static int tda9950_write_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
-{
- struct i2c_msg msg;
- u8 buf[CEC_MAX_MSG_SIZE + 3];
- int ret;
-
- if (WARN_ON(cnt > sizeof(buf) - 1))
- return -EINVAL;
-
- buf[0] = addr;
- memcpy(buf + 1, p, cnt);
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = cnt + 1;
- msg.buf = buf;
-
- dev_dbg(&client->dev, "wr 0x%02x: %*ph\n", addr, cnt, p);
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0)
- dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
- return ret < 0 ? ret : 0;
-}
-
-static void tda9950_write(struct i2c_client *client, u8 addr, u8 val)
-{
- tda9950_write_range(client, addr, &val, 1);
-}
-
-static int tda9950_read_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
-{
- struct i2c_msg msg[2];
- int ret;
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &addr;
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = cnt;
- msg[1].buf = p;
-
- ret = i2c_transfer(client->adapter, msg, 2);
- if (ret < 0)
- dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
-
- dev_dbg(&client->dev, "rd 0x%02x: %*ph\n", addr, cnt, p);
-
- return ret;
-}
-
-static u8 tda9950_read(struct i2c_client *client, u8 addr)
-{
- int ret;
- u8 val;
-
- ret = tda9950_read_range(client, addr, &val, 1);
- if (ret < 0)
- val = 0;
-
- return val;
-}
-
-static irqreturn_t tda9950_irq(int irq, void *data)
-{
- struct tda9950_priv *priv = data;
- unsigned int tx_status;
- u8 csr, cconr, buf[19];
- u8 arb_lost_cnt, nack_cnt, err_cnt;
-
- if (!priv->open)
- return IRQ_NONE;
-
- csr = tda9950_read(priv->client, REG_CSR);
- if (!(csr & CSR_INT))
- return IRQ_NONE;
-
- cconr = tda9950_read(priv->client, REG_CCONR) & CCONR_RETRY_MASK;
-
- tda9950_read_range(priv->client, REG_CDR0, buf, sizeof(buf));
-
- /*
- * This should never happen: the data sheet says that there will
- * always be a valid message if the interrupt line is asserted.
- */
- if (buf[0] == 0) {
- dev_warn(&priv->client->dev, "interrupt pending, but no message?\n");
- return IRQ_NONE;
- }
-
- switch (buf[1]) {
- case CDR1_CNF: /* transmit result */
- arb_lost_cnt = nack_cnt = err_cnt = 0;
- switch (buf[2]) {
- case CDR2_CNF_SUCCESS:
- tx_status = CEC_TX_STATUS_OK;
- break;
-
- case CDR2_CNF_ARB_ERROR:
- tx_status = CEC_TX_STATUS_ARB_LOST;
- arb_lost_cnt = cconr;
- break;
-
- case CDR2_CNF_NACK_ADDR:
- tx_status = CEC_TX_STATUS_NACK;
- nack_cnt = cconr;
- break;
-
- default: /* some other error, refer to TDA9950 docs */
- dev_err(&priv->client->dev, "CNF reply error 0x%02x\n",
- buf[2]);
- tx_status = CEC_TX_STATUS_ERROR;
- err_cnt = cconr;
- break;
- }
- /* TDA9950 executes all retries for us */
- if (tx_status != CEC_TX_STATUS_OK)
- tx_status |= CEC_TX_STATUS_MAX_RETRIES;
- cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
- nack_cnt, 0, err_cnt);
- break;
-
- case CDR1_IND:
- priv->rx_msg.len = buf[0] - 2;
- if (priv->rx_msg.len > CEC_MAX_MSG_SIZE)
- priv->rx_msg.len = CEC_MAX_MSG_SIZE;
-
- memcpy(priv->rx_msg.msg, buf + 2, priv->rx_msg.len);
- cec_received_msg(priv->adap, &priv->rx_msg);
- break;
-
- default: /* unknown */
- dev_err(&priv->client->dev, "unknown service id 0x%02x\n",
- buf[1]);
- break;
- }
-
- return IRQ_HANDLED;
-}
-
-static int tda9950_cec_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
-{
- struct tda9950_priv *priv = adap->priv;
- u8 buf[CEC_MAX_MSG_SIZE + 2];
-
- buf[0] = 2 + msg->len;
- buf[1] = CDR1_REQ;
- memcpy(buf + 2, msg->msg, msg->len);
-
- if (attempts > 5)
- attempts = 5;
-
- tda9950_write(priv->client, REG_CCONR, attempts);
-
- return tda9950_write_range(priv->client, REG_CDR0, buf, 2 + msg->len);
-}
-
-static int tda9950_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
-{
- struct tda9950_priv *priv = adap->priv;
- u16 addresses;
- u8 buf[2];
-
- if (addr == CEC_LOG_ADDR_INVALID)
- addresses = priv->addresses = 0;
- else
- addresses = priv->addresses |= BIT(addr);
-
- /* TDA9950 doesn't want address 15 set */
- addresses &= 0x7fff;
- buf[0] = addresses >> 8;
- buf[1] = addresses;
-
- return tda9950_write_range(priv->client, REG_ACKH, buf, 2);
-}
-
-/*
- * When operating as part of the TDA998x, we need additional handling
- * to initialise and shut down the TDA9950 part of the device. These
- * two hooks are provided to allow the TDA998x code to perform those
- * activities.
- */
-static int tda9950_glue_open(struct tda9950_priv *priv)
-{
- int ret = 0;
-
- if (priv->glue && priv->glue->open)
- ret = priv->glue->open(priv->glue->data);
-
- priv->open = true;
-
- return ret;
-}
-
-static void tda9950_glue_release(struct tda9950_priv *priv)
-{
- priv->open = false;
-
- if (priv->glue && priv->glue->release)
- priv->glue->release(priv->glue->data);
-}
-
-static int tda9950_open(struct tda9950_priv *priv)
-{
- struct i2c_client *client = priv->client;
- int ret;
-
- ret = tda9950_glue_open(priv);
- if (ret)
- return ret;
-
- /* Reset the TDA9950, and wait 250ms for it to recover */
- tda9950_write(client, REG_CCR, CCR_RESET);
- msleep(250);
-
- tda9950_cec_adap_log_addr(priv->adap, CEC_LOG_ADDR_INVALID);
-
- /* Start the command processor */
- tda9950_write(client, REG_CCR, CCR_ON);
-
- return 0;
-}
-
-static void tda9950_release(struct tda9950_priv *priv)
-{
- struct i2c_client *client = priv->client;
- int timeout = 50;
- u8 csr;
-
- /* Stop the command processor */
- tda9950_write(client, REG_CCR, 0);
-
- /* Wait up to .5s for it to signal non-busy */
- do {
- csr = tda9950_read(client, REG_CSR);
- if (!(csr & CSR_BUSY) || !--timeout)
- break;
- msleep(10);
- } while (1);
-
- /* Warn the user that their IRQ may die if it's shared. */
- if (csr & CSR_BUSY)
- dev_warn(&client->dev, "command processor failed to stop, irq%d may die (csr=0x%02x)\n",
- client->irq, csr);
-
- tda9950_glue_release(priv);
-}
-
-static int tda9950_cec_adap_enable(struct cec_adapter *adap, bool enable)
-{
- struct tda9950_priv *priv = adap->priv;
-
- if (!enable) {
- tda9950_release(priv);
- return 0;
- } else {
- return tda9950_open(priv);
- }
-}
-
-static const struct cec_adap_ops tda9950_cec_ops = {
- .adap_enable = tda9950_cec_adap_enable,
- .adap_log_addr = tda9950_cec_adap_log_addr,
- .adap_transmit = tda9950_cec_transmit,
-};
-
-/*
- * When operating as part of the TDA998x, we need to claim additional
- * resources. These two hooks permit the management of those resources.
- */
-static void tda9950_devm_glue_exit(void *data)
-{
- struct tda9950_glue *glue = data;
-
- if (glue && glue->exit)
- glue->exit(glue->data);
-}
-
-static int tda9950_devm_glue_init(struct device *dev, struct tda9950_glue *glue)
-{
- int ret;
-
- if (glue && glue->init) {
- ret = glue->init(glue->data);
- if (ret)
- return ret;
- }
-
- ret = devm_add_action(dev, tda9950_devm_glue_exit, glue);
- if (ret)
- tda9950_devm_glue_exit(glue);
-
- return ret;
-}
-
-static void tda9950_cec_del(void *data)
-{
- struct tda9950_priv *priv = data;
-
- cec_delete_adapter(priv->adap);
-}
-
-static int tda9950_probe(struct i2c_client *client)
-{
- struct tda9950_glue *glue = client->dev.platform_data;
- struct device *dev = &client->dev;
- struct tda9950_priv *priv;
- unsigned long irqflags;
- int ret;
- u8 cvr;
-
- /*
- * We must have I2C functionality: our multi-byte accesses
- * must be performed as a single contiguous transaction.
- */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev,
- "adapter does not support I2C functionality\n");
- return -ENXIO;
- }
-
- /* We must have an interrupt to be functional. */
- if (client->irq <= 0) {
- dev_err(&client->dev, "driver requires an interrupt\n");
- return -ENXIO;
- }
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->client = client;
- priv->glue = glue;
-
- i2c_set_clientdata(client, priv);
-
- /*
- * If we're part of a TDA998x, we want the class devices to be
- * associated with the HDMI Tx so we have a tight relationship
- * between the HDMI interface and the CEC interface.
- */
- priv->hdmi = dev;
- if (glue && glue->parent)
- priv->hdmi = glue->parent;
-
- priv->adap = cec_allocate_adapter(&tda9950_cec_ops, priv, "tda9950",
- CEC_CAP_DEFAULTS |
- CEC_CAP_CONNECTOR_INFO,
- CEC_MAX_LOG_ADDRS);
- if (IS_ERR(priv->adap))
- return PTR_ERR(priv->adap);
-
- ret = devm_add_action(dev, tda9950_cec_del, priv);
- if (ret) {
- cec_delete_adapter(priv->adap);
- return ret;
- }
-
- ret = tda9950_devm_glue_init(dev, glue);
- if (ret)
- return ret;
-
- ret = tda9950_glue_open(priv);
- if (ret)
- return ret;
-
- cvr = tda9950_read(client, REG_CVR);
-
- dev_info(&client->dev,
- "TDA9950 CEC interface, hardware version %u.%u\n",
- cvr >> 4, cvr & 15);
-
- tda9950_glue_release(priv);
-
- irqflags = IRQF_TRIGGER_FALLING;
- if (glue)
- irqflags = glue->irq_flags;
-
- ret = devm_request_threaded_irq(dev, client->irq, NULL, tda9950_irq,
- irqflags | IRQF_SHARED | IRQF_ONESHOT,
- dev_name(&client->dev), priv);
- if (ret < 0)
- return ret;
-
- priv->notify = cec_notifier_cec_adap_register(priv->hdmi, NULL,
- priv->adap);
- if (!priv->notify)
- return -ENOMEM;
-
- ret = cec_register_adapter(priv->adap, priv->hdmi);
- if (ret < 0) {
- cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
- return ret;
- }
-
- /*
- * CEC documentation says we must not call cec_delete_adapter
- * after a successful call to cec_register_adapter().
- */
- devm_remove_action(dev, tda9950_cec_del, priv);
-
- return 0;
-}
-
-static void tda9950_remove(struct i2c_client *client)
-{
- struct tda9950_priv *priv = i2c_get_clientdata(client);
-
- cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
- cec_unregister_adapter(priv->adap);
-}
-
-static struct i2c_device_id tda9950_ids[] = {
- { "tda9950" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tda9950_ids);
-
-static struct i2c_driver tda9950_driver = {
- .probe = tda9950_probe,
- .remove = tda9950_remove,
- .driver = {
- .name = "tda9950",
- },
- .id_table = tda9950_ids,
-};
-
-module_i2c_driver(tda9950_driver);
-
-MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
-MODULE_DESCRIPTION("TDA9950/TDA998x Consumer Electronics Control Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 1852e0804942..3562a02ef7ad 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -50,7 +50,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_EXPORT_FOR_TESTS if m
- select DRM_DEBUG_SELFTEST
+ select DRM_KUNIT_TEST if KUNIT
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_WERROR
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e465828d748f..4db24050edb0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# drivers. Define I915 when building i915.
subdir-ccflags-y += -DI915
+# FIXME: Disable tracepoints on i915 for PREEMPT_RT, unfortunately
+# it's an all or nothing flag. You cannot selectively disable
+# only some tracepoints.
+subdir-ccflags-$(CONFIG_PREEMPT_RT) += -DNOTRACE
+
subdir-ccflags-y += -I$(src)
# Please keep these build lists sorted!
@@ -26,31 +31,33 @@ i915-y += \
i915_ioctl.o \
i915_irq.o \
i915_mitigations.o \
+ i915_mmio_range.o \
i915_module.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
i915_switcheroo.o \
i915_sysfs.o \
+ i915_timer_util.o \
i915_utils.o \
intel_clock_gating.o \
+ intel_cpu_info.o \
intel_device_info.o \
intel_memory_region.o \
intel_pcode.o \
intel_region_ttm.o \
intel_runtime_pm.o \
- intel_sbi.o \
intel_step.o \
intel_uncore.o \
+ intel_uncore_trace.o \
intel_wakeref.o \
- vlv_sideband.o \
+ vlv_iosf_sb.o \
vlv_suspend.o
# core peripheral code
i915-y += \
soc/intel_dram.o \
soc/intel_gmch.o \
- soc/intel_pch.o \
soc/intel_rom.o
# core library code
@@ -149,6 +156,7 @@ gem-y += \
gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_object.o \
+ gem/i915_gem_object_frontbuffer.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
@@ -218,24 +226,29 @@ i915-$(CONFIG_HWMON) += \
# modesetting core code
i915-y += \
display/hsw_ips.o \
- display/i9xx_plane.o \
display/i9xx_display_sr.o \
+ display/i9xx_plane.o \
display/i9xx_wm.o \
display/intel_alpm.o \
display/intel_atomic.o \
- display/intel_atomic_plane.o \
display/intel_audio.o \
display/intel_bios.o \
display/intel_bo.o \
display/intel_bw.o \
+ display/intel_casf.o \
display/intel_cdclk.o \
+ display/intel_cmtg.o \
display/intel_color.o \
+ display/intel_colorop.o \
+ display/intel_color_pipeline.o \
display/intel_combo_phy.o \
display/intel_connector.o \
display/intel_crtc.o \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
+ display/intel_dbuf_bw.o \
display/intel_display.o \
+ display/intel_display_conversion.o \
display/intel_display_driver.o \
display/intel_display_irq.o \
display/intel_display_params.o \
@@ -243,8 +256,10 @@ i915-y += \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
display/intel_display_reset.o \
+ display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
+ display/intel_display_utils.o \
display/intel_display_wa.o \
display/intel_dmc.o \
display/intel_dmc_wl.o \
@@ -262,6 +277,7 @@ i915-y += \
display/intel_fbc.o \
display/intel_fdi.o \
display/intel_fifo_underrun.o \
+ display/intel_flipq.o \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
@@ -277,21 +293,28 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
+ display/intel_panic.o \
+ display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
+ display/intel_plane.o \
display/intel_plane_initial.o \
display/intel_pmdemand.o \
display/intel_psr.o \
display/intel_quirks.o \
+ display/intel_sbi.o \
display/intel_sprite.o \
display/intel_sprite_uapi.o \
display/intel_tc.o \
display/intel_vblank.o \
display/intel_vga.o \
display/intel_wm.o \
+ display/skl_prefill.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
- display/skl_watermark.o
+ display/skl_watermark.o \
+ display/vlv_clock.o \
+ display/vlv_sideband.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -337,12 +360,14 @@ i915-y += \
display/intel_gmbus.o \
display/intel_hdmi.o \
display/intel_lspcon.o \
+ display/intel_lt_phy.o \
display/intel_lvds.o \
display/intel_panel.o \
display/intel_pfit.o \
display/intel_pps.o \
display/intel_qp_tables.o \
display/intel_sdvo.o \
+ display/intel_snps_hdmi_pll.o \
display/intel_snps_phy.o \
display/intel_tv.o \
display/intel_vdsc.o \
@@ -403,7 +428,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
#
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
ifdef CONFIG_DRM_I915_WERROR
- cmd_checkdoc = $(srctree)/scripts/kernel-doc -none -Werror $<
+ cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none -Werror $<
endif
# header test
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 493e730c685b..f10c0fb8d2c8 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
@@ -247,7 +249,7 @@ static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 160000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 534b8544e0a4..49f02aca818b 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
@@ -276,7 +278,7 @@ static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index 0d5cce6051b1..0713b2709412 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,6 +29,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
@@ -314,7 +316,7 @@ static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 112000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 9d47f8a93e94..80b71bd6a837 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,7 +26,8 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
@@ -518,13 +519,13 @@ static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
* Even if not, the detection bit of the 2501 is unreliable as
* it only works for some display types.
* It is even more unreliable as the PLL must be active for
- * allowing reading from the chiop.
+ * allowing reading from the chip.
*/
return connector_status_connected;
}
static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index a8dd40c00997..017b617a8069 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
@@ -189,7 +191,7 @@ static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index d9a0cd753a87..ed560e3438db 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
@@ -217,7 +219,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index e06405a3b82d..a3ff21b2f69f 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -7,6 +7,8 @@
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
#include "i915_reg.h"
#include "intel_audio.h"
@@ -15,7 +17,9 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
@@ -27,7 +31,6 @@
#include "intel_hotplug.h"
#include "intel_pch_display.h"
#include "intel_pps.h"
-#include "vlv_sideband.h"
static const struct dpll g4x_dpll[] = {
{ .dot = 162000, .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8, },
@@ -50,28 +53,28 @@ static const struct dpll chv_dpll[] = {
{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
};
-const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+const struct dpll *vlv_get_dpll(struct intel_display *display)
{
- return IS_CHERRYVIEW(i915) ? &chv_dpll[0] : &vlv_dpll[0];
+ return display->platform.cherryview ? &chv_dpll[0] : &vlv_dpll[0];
}
static void g4x_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct dpll *divisor = NULL;
int i, count = 0;
- if (IS_G4X(dev_priv)) {
+ if (display->platform.g4x) {
divisor = g4x_dpll;
count = ARRAY_SIZE(g4x_dpll);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ } else if (display->platform.cherryview) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
}
@@ -91,7 +94,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -128,7 +130,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+ if (display->platform.ivybridge && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -139,7 +141,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(display) && port != PORT_A) {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe),
@@ -147,7 +149,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
pipe_config->enhanced_framing ?
TRANS_DP_ENH_FRAMING : 0);
} else {
- if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
+ if (display->platform.g4x && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -159,7 +161,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (pipe_config->enhanced_framing)
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
else
intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
@@ -179,10 +181,9 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
-static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
+static void assert_edp_pll(struct intel_display *display, bool state)
{
- struct intel_display *display = &dev_priv->display;
- bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
+ bool cur_state = intel_de_read(display, DP_A) & EDP_PLL_ENABLE;
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -196,21 +197,20 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_transcoder_disabled(display, pipe_config->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
- assert_edp_pll_disabled(dev_priv);
+ assert_edp_pll_disabled(display);
drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
- intel_dp->DP &= ~DP_PLL_FREQ_MASK;
+ intel_dp->DP &= ~EDP_PLL_FREQ_MASK;
if (pipe_config->port_clock == 162000)
- intel_dp->DP |= DP_PLL_FREQ_162MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_162MHZ;
else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_270MHZ;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -222,10 +222,10 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
* 1. Wait for the start of vertical blank on the enabled pipe going to FDI
* 2. Program DP PLL enable
*/
- if (IS_IRONLAKE(dev_priv))
- intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
+ if (display->platform.ironlake)
+ intel_wait_for_vblank_if_active(display, !crtc->pipe);
- intel_dp->DP |= DP_PLL_ENABLE;
+ intel_dp->DP |= EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -236,26 +236,23 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(display, old_crtc_state->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
- assert_edp_pll_enabled(dev_priv);
+ assert_edp_pll_enabled(display);
drm_dbg_kms(display->drm, "disabling eDP PLL\n");
- intel_dp->DP &= ~DP_PLL_ENABLE;
+ intel_dp->DP &= ~EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
udelay(200);
}
-static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+static bool cpt_dp_port_selected(struct intel_display *display,
enum port port, enum pipe *pipe)
{
- struct intel_display *display = &dev_priv->display;
enum pipe p;
for_each_pipe(display, p) {
@@ -276,11 +273,10 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
return false;
}
-bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
- struct intel_display *display = &dev_priv->display;
bool ret;
u32 val;
@@ -289,14 +285,14 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
ret = val & DP_PORT_EN;
/* asserts want to know the pipe even if the port is disabled */
- if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
- else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
- ret &= cpt_dp_port_selected(dev_priv, port, pipe);
- else if (IS_CHERRYVIEW(dev_priv))
- *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ if (display->platform.ivybridge && port == PORT_A)
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_IVB, val);
+ else if (HAS_PCH_CPT(display) && port != PORT_A)
+ ret &= cpt_dp_port_selected(display, port, pipe);
+ else if (display->platform.cherryview)
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_CHV, val);
else
- *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK, val);
return ret;
}
@@ -304,20 +300,20 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
- ret = g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ ret = g4x_dp_port_enabled(display, intel_dp->output_reg,
encoder->port, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -341,7 +337,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
enum port port = encoder->port;
@@ -356,7 +351,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ if (HAS_PCH_CPT(display) && port != PORT_A) {
u32 trans_dp = intel_de_read(display,
TRANS_DP_CTL(crtc->pipe));
@@ -389,16 +384,15 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.flags |= flags;
- if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
+ if (display->platform.g4x && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->lane_count =
- ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+ pipe_config->lane_count = REG_FIELD_GET(DP_PORT_WIDTH_MASK, tmp) + 1;
g4x_dp_get_m_n(pipe_config);
if (port == PORT_A) {
- if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(display, DP_A) & EDP_PLL_FREQ_MASK) == EDP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -419,7 +413,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
@@ -431,17 +424,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
- if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
- intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
- intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
- } else {
- intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
- intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
- }
- intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(display, intel_dp->output_reg);
-
intel_dp->DP &= ~DP_PORT_EN;
intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(display, intel_dp->output_reg);
@@ -451,13 +433,13 @@ intel_dp_link_down(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
@@ -470,14 +452,14 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(display, intel_dp->output_reg);
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_wait_for_vblank_if_active(display, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
msleep(intel_dp->pps.panel_power_down_delay);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_port_disable(encoder, old_crtc_state);
}
@@ -522,7 +504,7 @@ static void intel_disable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
/*
* Make sure the panel is off before trying to change the mode.
@@ -584,16 +566,10 @@ static void chv_post_disable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
intel_dp_link_down(encoder, old_crtc_state);
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void
@@ -625,6 +601,19 @@ cpt_set_link_train(struct intel_dp *intel_dp,
}
static void
+cpt_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
+}
+
+static void
g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
@@ -652,6 +641,19 @@ g4x_set_link_train(struct intel_dp *intel_dp,
intel_de_posting_read(display, intel_dp->output_reg);
}
+static void
+g4x_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
+
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
+}
+
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -680,7 +682,6 @@ static void intel_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 dp_reg = intel_de_read(display, intel_dp->output_reg);
intel_wakeref_t wakeref;
@@ -689,7 +690,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
return;
with_intel_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_port_enable_unlocked(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -699,13 +700,13 @@ static void intel_enable_dp(struct intel_atomic_state *state,
intel_pps_vdd_off_unlocked(intel_dp, true);
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
unsigned int lane_mask = 0x0;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
- vlv_wait_port_ready(display, dp_to_dig_port(intel_dp), lane_mask);
+ vlv_wait_port_ready(encoder, lane_mask);
}
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -1227,10 +1228,10 @@ static int g4x_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int ret;
- if (HAS_PCH_SPLIT(i915) && encoder->port != PORT_A)
+ if (HAS_PCH_SPLIT(display) && encoder->port != PORT_A)
crtc_state->has_pch_encoder = true;
ret = intel_dp_compute_config(encoder, crtc_state, conn_state);
@@ -1262,7 +1263,6 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder->dev);
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
@@ -1270,7 +1270,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->reset_link_params = true;
intel_dp_invalidate_source_oui(intel_dp);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_pipe_reset(intel_dp);
intel_pps_encoder_reset(intel_dp);
@@ -1281,17 +1281,16 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-bool g4x_dp_init(struct drm_i915_private *dev_priv,
+bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, enum port port)
{
- struct intel_display *display = &dev_priv->display;
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
- if (!assert_port_valid(dev_priv, port))
+ if (!assert_port_valid(display, port))
return false;
devdata = intel_bios_encoder_data_lookup(display, port);
@@ -1301,12 +1300,10 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n",
port_name(port));
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return false;
- dig_port->aux_ch = AUX_CH_NONE;
-
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
@@ -1316,8 +1313,6 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp_mutex);
-
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
"DP %c", port_name(port)))
@@ -1335,14 +1330,14 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->suspend_complete = g4x_dp_suspend_complete;
intel_encoder->shutdown = intel_dp_encoder_shutdown;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
@@ -1357,25 +1352,28 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->audio_enable = g4x_dp_audio_enable;
intel_encoder->audio_disable = g4x_dp_audio_disable;
- if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ if ((display->platform.ivybridge && port == PORT_A) ||
+ (HAS_PCH_CPT(display) && port != PORT_A)) {
dig_port->dp.set_link_train = cpt_set_link_train;
- else
+ dig_port->dp.set_idle_link_train = cpt_set_idle_link_train;
+ } else {
dig_port->dp.set_link_train = g4x_set_link_train;
+ dig_port->dp.set_idle_link_train = g4x_set_idle_link_train;
+ }
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_encoder->set_signal_levels = chv_set_signal_levels;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
intel_encoder->set_signal_levels = vlv_set_signal_levels;
- else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ else if (display->platform.ivybridge && port == PORT_A)
intel_encoder->set_signal_levels = ivb_cpu_edp_set_signal_levels;
- else if (IS_SANDYBRIDGE(dev_priv) && port == PORT_A)
+ else if (display->platform.sandybridge && port == PORT_A)
intel_encoder->set_signal_levels = snb_cpu_edp_set_signal_levels;
else
intel_encoder->set_signal_levels = g4x_set_signal_levels;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
- (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ if (display->platform.valleyview || display->platform.cherryview ||
+ (HAS_PCH_SPLIT(display) && port != PORT_A)) {
dig_port->dp.preemph_max = intel_dp_preemph_max_3;
dig_port->dp.voltage_max = intel_dp_voltage_max_3;
} else {
@@ -1384,11 +1382,10 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
}
dig_port->dp.output_reg = output_reg;
- dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
- intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
- if (IS_CHERRYVIEW(dev_priv)) {
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
+ if (display->platform.cherryview) {
if (port == PORT_D)
intel_encoder->pipe_mask = BIT(PIPE_C);
else
@@ -1398,7 +1395,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_encoder->hpd_pin = intel_hpd_pin_default(port);
dig_port->hpd_pulse = intel_dp_hpd_pulse;
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h
index 839a251dc069..0b28951b8365 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.h
+++ b/drivers/gpu/drm/i915/display/g4x_dp.h
@@ -12,30 +12,30 @@
enum pipe;
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
#ifdef I915
-const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
-bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+const struct dpll *vlv_get_dpll(struct intel_display *display);
+bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe);
-bool g4x_dp_init(struct drm_i915_private *dev_priv,
+bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, enum port port);
#else
-static inline const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+static inline const struct dpll *vlv_get_dpll(struct intel_display *display)
{
return NULL;
}
-static inline bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, int port,
enum pipe *pipe)
{
return false;
}
-static inline bool g4x_dp_init(struct drm_i915_private *dev_priv,
+static inline bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, int port)
{
return false;
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index d1a7d0d57c6b..f6e2d1ed5639 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -5,6 +5,8 @@
* HDMI support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
*/
+#include <drm/drm_print.h>
+
#include "g4x_hdmi.h"
#include "i915_reg.h"
#include "intel_atomic.h"
@@ -13,21 +15,20 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
-#include "intel_fdi.h"
+#include "intel_encoder.h"
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_sdvo.h"
-#include "vlv_sideband.h"
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -36,7 +37,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(display) && crtc_state->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -51,33 +52,33 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
if (crtc_state->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, hdmi_val);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
- ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
+ ret = intel_sdvo_port_enabled(display, intel_hdmi->hdmi_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -130,17 +131,14 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display))
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
- return -EINVAL;
- }
- if (IS_G4X(i915))
+ if (display->platform.g4x)
crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc);
else
crtc_state->has_hdmi_sink =
@@ -152,15 +150,14 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
int dotclock;
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ tmp = intel_de_read(display, intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -184,7 +181,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev_priv) &&
+ if (!HAS_PCH_SPLIT(display) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -220,33 +217,32 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ temp = intel_de_read(display, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
}
static void g4x_hdmi_audio_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
if (!crtc_state->has_audio)
return;
- drm_WARN_ON(&i915->drm, !crtc_state->has_hdmi_sink);
+ drm_WARN_ON(display->drm, !crtc_state->has_hdmi_sink);
/* Enable audio presence detect */
- intel_de_rmw(i915, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE);
+ intel_de_rmw(display, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE);
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -255,7 +251,7 @@ static void g4x_hdmi_audio_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
if (!old_crtc_state->has_audio)
@@ -264,7 +260,7 @@ static void g4x_hdmi_audio_disable(struct intel_encoder *encoder,
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
/* Disable audio presence detect */
- intel_de_rmw(i915, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0);
+ intel_de_rmw(display, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0);
}
static void g4x_enable_hdmi(struct intel_atomic_state *state,
@@ -280,12 +276,11 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ temp = intel_de_read(display, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
@@ -293,10 +288,10 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to toggle enable bit off and on
@@ -307,18 +302,18 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
*/
if (pipe_config->pipe_bpp > 24 &&
pipe_config->pixel_multiplier > 1) {
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
+ intel_de_write(display, intel_hdmi->hdmi_reg,
temp & ~SDVO_ENABLE);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
}
}
@@ -327,14 +322,13 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
enum pipe pipe = crtc->pipe;
u32 temp;
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ temp = intel_de_read(display, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
@@ -349,24 +343,24 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
*/
if (pipe_config->pipe_bpp > 24) {
- intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN1(pipe),
0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
}
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
- intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
}
@@ -383,32 +377,31 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ temp = intel_de_read(display, intel_hdmi->hdmi_reg);
temp &= ~SDVO_ENABLE;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
/*
* HW workaround for IBX, we need to move the port
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
temp &= ~SDVO_PIPE_SEL_MASK;
temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
@@ -416,18 +409,18 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
temp &= ~SDVO_ENABLE;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_wait_for_vblank_if_active(display, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
dig_port->set_infoframes(encoder,
@@ -480,7 +473,6 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -496,7 +488,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
g4x_hdmi_enable_port(encoder, pipe_config);
- vlv_wait_port_ready(display, dig_port, 0x0);
+ vlv_wait_port_ready(encoder, 0x0);
}
static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
@@ -541,15 +533,8 @@ static void chv_hdmi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
@@ -557,7 +542,6 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
chv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -572,7 +556,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
g4x_hdmi_enable_port(encoder, pipe_config);
- vlv_wait_port_ready(display, dig_port, 0x0);
+ vlv_wait_port_ready(encoder, 0x0);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -611,7 +595,7 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_connector_list_iter conn_iter;
struct drm_connector *conn;
int ret;
@@ -620,7 +604,7 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- if (!IS_G4X(i915))
+ if (!display->platform.g4x)
return 0;
if (!intel_connector_needs_modeset(to_intel_atomic_state(state), connector))
@@ -634,7 +618,7 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
*
* See also g4x_compute_has_hdmi_sink().
*/
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -643,7 +627,7 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
if (!connector_is_hdmi(conn))
continue;
- drm_dbg_kms(&i915->drm, "Adding [CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "Adding [CONNECTOR:%d:%s]\n",
conn->base.id, conn->name);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -668,67 +652,61 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
return ret;
}
-static bool is_hdmi_port_valid(struct drm_i915_private *i915, enum port port)
+static bool is_hdmi_port_valid(struct intel_display *display, enum port port)
{
- if (IS_G4X(i915) || IS_VALLEYVIEW(i915))
+ if (display->platform.g4x || display->platform.valleyview)
return port == PORT_B || port == PORT_C;
else
return port == PORT_B || port == PORT_C || port == PORT_D;
}
-static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port)
+static bool assert_hdmi_port_valid(struct intel_display *display, enum port port)
{
- return !drm_WARN(&i915->drm, !is_hdmi_port_valid(i915, port),
+ return !drm_WARN(display->drm, !is_hdmi_port_valid(display, port),
"Platform does not support HDMI %c\n", port_name(port));
}
-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, enum port port)
{
- struct intel_display *display = &dev_priv->display;
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- if (!assert_port_valid(dev_priv, port))
- return;
+ if (!assert_port_valid(display, port))
+ return false;
- if (!assert_hdmi_port_valid(dev_priv, port))
- return;
+ if (!assert_hdmi_port_valid(display, port))
+ return false;
devdata = intel_bios_encoder_data_lookup(display, port);
/* FIXME bail? */
if (!devdata)
- drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n",
+ drm_dbg_kms(display->drm, "No VBT child device for HDMI-%c\n",
port_name(port));
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
- return;
-
- dig_port->aux_ch = AUX_CH_NONE;
+ return false;
intel_connector = intel_connector_alloc();
- if (!intel_connector) {
- kfree(dig_port);
- return;
- }
+ if (!intel_connector)
+ goto err_connector_alloc;
intel_encoder = &dig_port->base;
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp_mutex);
-
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
- &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
- "HDMI %c", port_name(port));
+ if (drm_encoder_init(display->drm, &intel_encoder->base,
+ &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "HDMI %c", port_name(port)))
+ goto err_encoder_init;
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = g4x_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -736,22 +714,22 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
@@ -761,9 +739,9 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI;
- intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
intel_encoder->port = port;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
if (port == PORT_D)
intel_encoder->pipe_mask = BIT(PIPE_C);
else
@@ -772,20 +750,30 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG);
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_encoder->hpd_pin = intel_hpd_pin_default(port);
/*
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
intel_encoder->cloneable |= BIT(INTEL_OUTPUT_HDMI);
dig_port->hdmi.hdmi_reg = hdmi_reg;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
- dig_port->max_lanes = 4;
intel_infoframe_init(dig_port);
- intel_hdmi_init_connector(dig_port, intel_connector);
+ if (!intel_hdmi_init_connector(dig_port, intel_connector))
+ goto err_init_connector;
+
+ return true;
+
+err_init_connector:
+ drm_encoder_cleanup(&intel_encoder->base);
+err_encoder_init:
+ kfree(intel_connector);
+err_connector_alloc:
+ kfree(dig_port);
+
+ return false;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
index 817f55c7a3a1..039d2bdba06c 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.h
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
@@ -13,17 +13,18 @@
enum port;
struct drm_atomic_state;
struct drm_connector;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, enum port port);
int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state);
#else
-static inline void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+static inline bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, int port)
{
+ return false;
}
static inline int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index d02c328bf902..008d339d5c21 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -5,19 +5,20 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "hsw_ips.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 val;
if (!crtc_state->ips_enabled)
@@ -36,10 +37,10 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (display->ips.false_color)
val |= IPS_FALSE_COLOR;
- if (IS_BROADWELL(i915)) {
+ if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
- val | IPS_PCODE_CONTROL));
+ intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL,
+ val | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
@@ -55,7 +56,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read.
*/
- if (intel_de_wait_for_set(display, IPS_CTL, IPS_ENABLE, 50))
+ if (intel_de_wait_for_set_ms(display, IPS_CTL, IPS_ENABLE, 50))
drm_err(display->drm,
"Timed out waiting for IPS enable\n");
}
@@ -64,22 +65,20 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bool need_vblank_wait = false;
if (!crtc_state->ips_enabled)
return need_vblank_wait;
- if (IS_BROADWELL(i915)) {
+ if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
+ intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_de_wait_for_clear(display, IPS_CTL, IPS_ENABLE, 100))
+ if (intel_de_wait_for_clear_ms(display, IPS_CTL, IPS_ENABLE, 100))
drm_err(display->drm,
"Timed out waiting for IPS disable\n");
} else {
@@ -96,7 +95,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
static bool hsw_ips_need_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -114,7 +113,7 @@ static bool hsw_ips_need_disable(struct intel_atomic_state *state,
*
* Disable IPS before we program the LUT.
*/
- if (IS_HASWELL(i915) &&
+ if (display->platform.haswell &&
intel_crtc_needs_color_update(new_crtc_state) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -137,7 +136,7 @@ bool hsw_ips_pre_update(struct intel_atomic_state *state,
static bool hsw_ips_need_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -155,7 +154,7 @@ static bool hsw_ips_need_enable(struct intel_atomic_state *state,
*
* Re-enable IPS after the LUT has been programmed.
*/
- if (IS_HASWELL(i915) &&
+ if (display->platform.haswell &&
intel_crtc_needs_color_update(new_crtc_state) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -192,52 +191,52 @@ bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- /* IPS only exists on ULT machines and is tied to pipe A. */
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!display->params.enable_ips)
- return false;
-
if (crtc_state->pipe_bpp > 24)
return false;
- /*
- * We compare against max which means we must take
- * the increased cdclk requirement into account when
- * calculating the new cdclk.
- *
- * Should measure whether using a lower cdclk w/o IPS
- */
- if (IS_BROADWELL(i915) &&
- crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100)
- return false;
-
return true;
}
+static int _hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (display->platform.broadwell)
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+
+ /* no IPS specific limits to worry about */
+ return 0;
+}
+
int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
+ int min_cdclk;
- if (!IS_BROADWELL(i915))
+ if (!hsw_crtc_state_ips_capable(crtc_state))
return 0;
- if (!hsw_crtc_state_ips_capable(crtc_state))
+ min_cdclk = _hsw_ips_min_cdclk(crtc_state);
+
+ /*
+ * Do not ask for more than the max CDCLK frequency,
+ * if that is not enough IPS will simply not be used.
+ */
+ if (min_cdclk > display->cdclk.max_cdclk_freq)
return 0;
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+ return min_cdclk;
}
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -246,6 +245,12 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!hsw_crtc_state_ips_capable(crtc_state))
return 0;
+ if (_hsw_ips_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq)
+ return 0;
+
+ if (!display->params.enable_ips)
+ return 0;
+
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
@@ -259,18 +264,6 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
return 0;
- if (IS_BROADWELL(i915)) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
- return 0;
- }
-
crtc_state->ips_enabled = true;
return 0;
@@ -280,12 +273,11 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!hsw_crtc_supports_ips(crtc))
return;
- if (IS_HASWELL(i915)) {
+ if (display->platform.haswell) {
crtc_state->ips_enabled = intel_de_read(display, IPS_CTL) & IPS_ENABLE;
} else {
/*
@@ -346,10 +338,9 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "Enabled by kernel parameter: %s\n",
str_yes_no(display->params.enable_ips));
@@ -363,7 +354,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.c b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
index f6b8333f6339..935419441709 100644
--- a/drivers/gpu/drm/i915/display/i9xx_display_sr.c
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
@@ -3,10 +3,12 @@
* Copyright © 2024 Intel Corporation
*/
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_device.h>
+
#include "i9xx_display_sr.h"
+#include "i9xx_wm_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_gmbus.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 17a1e3801a85..51ccc6bd5f21 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -2,23 +2,28 @@
/*
* Copyright © 2020 Intel Corporation
*/
+
#include <linux/kernel.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "i915_reg.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
+#include "intel_plane.h"
#include "intel_sprite.h"
/* Primary plane formats for gen <= 3 */
@@ -108,61 +113,59 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+static bool i9xx_plane_has_fbc(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- if (!HAS_FBC(dev_priv))
+ if (!HAS_FBC(display))
return false;
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (display->platform.broadwell || display->platform.haswell)
return i9xx_plane == PLANE_A; /* tied to pipe A */
- else if (IS_IVYBRIDGE(dev_priv))
+ else if (display->platform.ivybridge)
return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
i9xx_plane == PLANE_C;
- else if (DISPLAY_VER(dev_priv) >= 4)
+ else if (DISPLAY_VER(display) >= 4)
return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
else
return i9xx_plane == PLANE_A;
}
-static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv,
+static struct intel_fbc *i9xx_plane_fbc(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- if (i9xx_plane_has_fbc(dev_priv, i9xx_plane))
- return dev_priv->display.fbc[INTEL_FBC_A];
+ if (i9xx_plane_has_fbc(display, i9xx_plane))
+ return display->fbc[INTEL_FBC_A];
else
return NULL;
}
static bool i9xx_plane_has_windowing(struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
return i9xx_plane == PLANE_B;
- else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return false;
- else if (DISPLAY_VER(dev_priv) == 4)
+ else if (DISPLAY_VER(display) == 4)
return i9xx_plane == PLANE_C;
else
return i9xx_plane == PLANE_B ||
i9xx_plane == PLANE_C;
}
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_plane_ctl(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
u32 dspcntr;
dspcntr = DISP_ENABLE;
- if (IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) ||
- IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ if (display->platform.g4x || display->platform.ironlake ||
+ display->platform.sandybridge || display->platform.ivybridge)
dspcntr |= DISP_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -210,7 +213,7 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (DISPLAY_VER(dev_priv) >= 4 &&
+ if (DISPLAY_VER(display) >= 4 &&
fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISP_TILED;
@@ -225,8 +228,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int src_x, src_y, src_w;
u32 offset;
@@ -244,12 +247,16 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
src_y = plane_state->uapi.src.y1 >> 16;
/* Undocumented hardware limit on i965/g4x/vlv/chv */
- if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ if (HAS_GMCH(display) && fb->format->cpp[0] == 8 && src_w > 2048) {
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] plane too wide (%d) for 64bpp\n",
+ plane->base.base.id, plane->base.name, src_w);
return -EINVAL;
+ }
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
plane_state, 0);
else
@@ -266,14 +273,15 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Linear surfaces seem to work just fine, even on hsw/bdw
* despite them not using the linear offset anymore.
*/
- if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ if (DISPLAY_VER(display) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
unsigned int alignment = plane->min_alignment(plane, fb, 0);
int cpp = fb->format->cpp[0];
while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
if (offset == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Unable to find suitable display surface offset due to X-tiling\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] unable to find suitable display surface offset due to X-tiling\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -290,7 +298,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
src_x << 16, src_y << 16);
/* HSW/BDW do this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ if (!display->platform.haswell && !display->platform.broadwell) {
unsigned int rotation = plane_state->hw.rotation;
int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
@@ -303,11 +311,11 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
}
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
- } else if (DISPLAY_VER(dev_priv) >= 4 &&
+ if (display->platform.haswell || display->platform.broadwell) {
+ drm_WARN_ON(display->drm, src_x > 8191 || src_y > 4095);
+ } else if (DISPLAY_VER(display) >= 4 &&
fb->modifier == I915_FORMAT_MOD_X_TILED) {
- drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
+ drm_WARN_ON(display->drm, src_x > 4095 || src_y > 4095);
}
plane_state->view.color_plane[0].offset = offset;
@@ -328,10 +336,10 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- i9xx_plane_has_windowing(plane));
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
if (ret)
return ret;
@@ -346,15 +354,28 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+ plane_state->ctl = i9xx_plane_ctl(plane_state);
return 0;
}
+static u32 i8xx_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->view.color_plane[0].x;
+ int y = plane_state->view.color_plane[0].y;
+
+ return intel_fb_xy_to_linear(x, y, plane_state, 0);
+}
+
+u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ return plane_state->view.color_plane[0].offset;
+}
+
static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
if (crtc_state->gamma_enable)
@@ -363,7 +384,7 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (crtc_state->csc_enable)
dspcntr |= DISP_PIPE_CSC_ENABLE;
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
dspcntr |= DISP_PIPE_SEL(crtc->pipe);
return dspcntr;
@@ -421,13 +442,13 @@ static void i9xx_plane_update_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_de_write_fw(dev_priv, DSPSTRIDE(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPSTRIDE(display, i9xx_plane),
plane_state->view.color_plane[0].mapping_stride);
- if (DISPLAY_VER(dev_priv) < 4) {
+ if (DISPLAY_VER(display) < 4) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
@@ -438,9 +459,9 @@ static void i9xx_plane_update_noarm(struct intel_dsb *dsb,
* generator but let's assume we still need to
* program whatever is there.
*/
- intel_de_write_fw(dev_priv, DSPPOS(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPPOS(display, i9xx_plane),
DISP_POS_Y(crtc_y) | DISP_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, DSPSIZE(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPSIZE(display, i9xx_plane),
DISP_HEIGHT(crtc_h - 1) | DISP_WIDTH(crtc_w - 1));
}
}
@@ -450,11 +471,11 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
int x = plane_state->view.color_plane[0].x;
int y = plane_state->view.color_plane[0].y;
- u32 dspcntr, dspaddr_offset, linear_offset;
+ u32 dspcntr;
dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
@@ -463,34 +484,27 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
crtc_state->async_flip_planes & BIT(plane->id))
dspcntr |= DISP_ASYNC_FLIP;
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (DISPLAY_VER(dev_priv) >= 4)
- dspaddr_offset = plane_state->view.color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
-
- if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ if (display->platform.cherryview && i9xx_plane == PLANE_B) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int crtc_h = drm_rect_height(&plane_state->uapi.dst);
- intel_de_write_fw(dev_priv, PRIMPOS(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, PRIMPOS(display, i9xx_plane),
PRIM_POS_Y(crtc_y) | PRIM_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, PRIMSIZE(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, PRIMSIZE(display, i9xx_plane),
PRIM_HEIGHT(crtc_h - 1) | PRIM_WIDTH(crtc_w - 1));
- intel_de_write_fw(dev_priv,
- PRIMCNSTALPHA(dev_priv, i9xx_plane), 0);
+ intel_de_write_fw(display,
+ PRIMCNSTALPHA(display, i9xx_plane), 0);
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- intel_de_write_fw(dev_priv, DSPOFFSET(dev_priv, i9xx_plane),
+ if (display->platform.haswell || display->platform.broadwell) {
+ intel_de_write_fw(display, DSPOFFSET(display, i9xx_plane),
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
- } else if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write_fw(dev_priv, DSPLINOFF(dev_priv, i9xx_plane),
- linear_offset);
- intel_de_write_fw(dev_priv, DSPTILEOFF(dev_priv, i9xx_plane),
+ } else if (DISPLAY_VER(display) >= 4) {
+ intel_de_write_fw(display, DSPLINOFF(display, i9xx_plane),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
+ intel_de_write_fw(display, DSPTILEOFF(display, i9xx_plane),
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
}
@@ -499,14 +513,12 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, DSPCNTR(dev_priv, i9xx_plane), dspcntr);
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf);
else
- intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), plane_state->surf);
}
static void i830_plane_update_arm(struct intel_dsb *dsb,
@@ -528,7 +540,7 @@ static void i9xx_plane_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 dspcntr;
@@ -544,12 +556,46 @@ static void i9xx_plane_disable_arm(struct intel_dsb *dsb,
*/
dspcntr = i9xx_plane_ctl_crtc(crtc_state);
- intel_de_write_fw(dev_priv, DSPCNTR(dev_priv, i9xx_plane), dspcntr);
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane), 0);
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), 0);
else
- intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane), 0);
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), 0);
+}
+
+static void g4x_primary_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ error->ctl = intel_de_read(display, DSPCNTR(display, i9xx_plane));
+ error->surf = intel_de_read(display, DSPSURF(display, i9xx_plane));
+ error->surflive = intel_de_read(display, DSPSURFLIVE(display, i9xx_plane));
+}
+
+static void i965_plane_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ error->ctl = intel_de_read(display, DSPCNTR(display, i9xx_plane));
+ error->surf = intel_de_read(display, DSPSURF(display, i9xx_plane));
+}
+
+static void i8xx_plane_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ error->ctl = intel_de_read(display, DSPCNTR(display, i9xx_plane));
+ error->surf = intel_de_read(display, DSPADDR(display, i9xx_plane));
}
static void
@@ -559,18 +605,15 @@ g4x_primary_async_flip(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state,
bool async_flip)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
- u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
if (async_flip)
dspcntr |= DISP_ASYNC_FLIP;
- intel_de_write_fw(dev_priv, DSPCNTR(dev_priv, i9xx_plane), dspcntr);
-
- intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf);
}
static void
@@ -580,102 +623,105 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state,
bool async_flip)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_de_write_fw(dev_priv, DSPADDR_VLV(dev_priv, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane), plane_state->surf);
}
static void
bdw_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
bdw_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
+}
+
+static bool i9xx_plane_can_async_flip(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_X_TILED;
}
static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
intel_wakeref_t wakeref;
@@ -688,30 +734,29 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- val = intel_de_read(dev_priv, DSPCNTR(dev_priv, i9xx_plane));
+ val = intel_de_read(display, DSPCNTR(display, i9xx_plane));
ret = val & DISP_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 5)
+ if (DISPLAY_VER(display) >= 5)
*pipe = plane->pipe;
else
*pipe = REG_FIELD_GET(DISP_PIPE_SEL_MASK, val);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
static unsigned int
hsw_primary_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
@@ -720,10 +765,9 @@ hsw_primary_max_stride(struct intel_plane *plane,
static unsigned int
ilk_primary_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
@@ -735,10 +779,9 @@ ilk_primary_max_stride(struct intel_plane *plane,
unsigned int
i965_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
@@ -750,8 +793,8 @@ i965_plane_max_stride(struct intel_plane *plane,
static unsigned int
i915_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
if (modifier == I915_FORMAT_MOD_X_TILED)
return 8 * 1024;
@@ -761,8 +804,8 @@ i915_plane_max_stride(struct intel_plane *plane,
static unsigned int
i8xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
if (plane->i9xx_plane == PLANE_C)
return 4 * 1024;
@@ -770,16 +813,21 @@ i8xx_plane_max_stride(struct intel_plane *plane,
return 8 * 1024;
}
-static unsigned int vlv_primary_min_alignment(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane)
+unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
+
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
+ return 256 * 1024;
+
+ /* FIXME undocumented so not sure what's actually needed */
+ if (intel_scanout_needs_vtd_wa(display))
+ return 256 * 1024;
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
- if (HAS_ASYNC_FLIPS(i915))
- return 256 * 1024;
return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 128 * 1024;
@@ -793,13 +841,16 @@ static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
+
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
+ return 256 * 1024;
+
+ if (intel_scanout_needs_vtd_wa(display))
+ return 256 * 1024;
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
- if (HAS_ASYNC_FLIPS(i915))
- return 256 * 1024;
- return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 4 * 1024;
default:
@@ -837,6 +888,7 @@ static const struct drm_plane_funcs i965_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i965_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs i8xx_plane_funcs = {
@@ -846,10 +898,32 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i8xx_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
+static void i9xx_disable_tiling(struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 dspcntr;
+ u32 reg;
+
+ dspcntr = intel_de_read_fw(display, DSPCNTR(display, i9xx_plane));
+ dspcntr &= ~DISP_TILED;
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
+
+ if (DISPLAY_VER(display) >= 4) {
+ reg = intel_de_read_fw(display, DSPSURF(display, i9xx_plane));
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), reg);
+
+ } else {
+ reg = intel_de_read_fw(display, DSPADDR(display, i9xx_plane));
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), reg);
+ }
+}
+
struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
{
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
@@ -868,20 +942,20 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
- if (HAS_FBC(dev_priv) && DISPLAY_VER(dev_priv) < 4 &&
- INTEL_NUM_PIPES(dev_priv) == 2)
+ if (HAS_FBC(display) && DISPLAY_VER(display) < 4 &&
+ INTEL_NUM_PIPES(display) == 2)
plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
plane->i9xx_plane = (enum i9xx_plane_id) pipe;
plane->id = PLANE_PRIMARY;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- intel_fbc_add_plane(i9xx_plane_fbc(dev_priv, plane->i9xx_plane), plane);
+ intel_fbc_add_plane(i9xx_plane_fbc(display, plane->i9xx_plane), plane);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
formats = vlv_primary_formats;
num_formats = ARRAY_SIZE(vlv_primary_formats);
- } else if (DISPLAY_VER(dev_priv) >= 4) {
+ } else if (DISPLAY_VER(display) >= 4) {
/*
* WaFP16GammaEnabling:ivb
* "Workaround : When using the 64-bit format, the plane
@@ -895,7 +969,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* planes, so we choose not to expose fp16 on IVB primary
* planes. HSW primary planes no longer have this problem.
*/
- if (IS_IVYBRIDGE(dev_priv)) {
+ if (display->platform.ivybridge) {
formats = ivb_primary_formats;
num_formats = ARRAY_SIZE(ivb_primary_formats);
} else {
@@ -907,44 +981,48 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(i8xx_primary_formats);
}
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
plane_funcs = &i965_plane_funcs;
else
plane_funcs = &i8xx_plane_funcs;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
plane->min_cdclk = vlv_plane_min_cdclk;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ else if (display->platform.broadwell || display->platform.haswell)
plane->min_cdclk = hsw_plane_min_cdclk;
- else if (IS_IVYBRIDGE(dev_priv))
+ else if (display->platform.ivybridge)
plane->min_cdclk = ivb_plane_min_cdclk;
else
plane->min_cdclk = i9xx_plane_min_cdclk;
- if (HAS_GMCH(dev_priv)) {
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (HAS_GMCH(display)) {
+ if (DISPLAY_VER(display) >= 4)
plane->max_stride = i965_plane_max_stride;
- else if (DISPLAY_VER(dev_priv) == 3)
+ else if (DISPLAY_VER(display) == 3)
plane->max_stride = i915_plane_max_stride;
else
plane->max_stride = i8xx_plane_max_stride;
} else {
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (display->platform.broadwell || display->platform.haswell)
plane->max_stride = hsw_primary_max_stride;
else
plane->max_stride = ilk_primary_max_stride;
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- plane->min_alignment = vlv_primary_min_alignment;
- else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
+ plane->min_alignment = vlv_plane_min_alignment;
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
plane->min_alignment = g4x_primary_min_alignment;
- else if (DISPLAY_VER(dev_priv) == 4)
+ else if (DISPLAY_VER(display) == 4)
plane->min_alignment = i965_plane_min_alignment;
else
plane->min_alignment = i9xx_plane_min_alignment;
- if (IS_I830(dev_priv) || IS_I845G(dev_priv)) {
+ /* FIXME undocumented for VLV/CHV so not sure what's actually needed */
+ if (intel_scanout_needs_vtd_wa(display))
+ plane->vtd_guard = 128;
+
+ if (display->platform.i830 || display->platform.i845g) {
plane->update_arm = i830_plane_update_arm;
} else {
plane->update_noarm = i9xx_plane_update_noarm;
@@ -954,36 +1032,56 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- plane->async_flip = vlv_primary_async_flip;
- plane->enable_flip_done = vlv_primary_enable_flip_done;
- plane->disable_flip_done = vlv_primary_disable_flip_done;
- } else if (IS_BROADWELL(dev_priv)) {
- plane->need_async_flip_toggle_wa = true;
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = bdw_primary_enable_flip_done;
- plane->disable_flip_done = bdw_primary_disable_flip_done;
- } else if (DISPLAY_VER(dev_priv) >= 7) {
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = ivb_primary_enable_flip_done;
- plane->disable_flip_done = ivb_primary_disable_flip_done;
- } else if (DISPLAY_VER(dev_priv) >= 5) {
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = ilk_primary_enable_flip_done;
- plane->disable_flip_done = ilk_primary_disable_flip_done;
+ if (DISPLAY_VER(display) >= 4)
+ plane->surf_offset = i965_plane_surf_offset;
+ else
+ plane->surf_offset = i8xx_plane_surf_offset;
+
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ plane->capture_error = g4x_primary_capture_error;
+ else if (DISPLAY_VER(display) >= 4)
+ plane->capture_error = i965_plane_capture_error;
+ else
+ plane->capture_error = i8xx_plane_capture_error;
+
+ if (HAS_ASYNC_FLIPS(display)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
+ plane->async_flip = vlv_primary_async_flip;
+ plane->enable_flip_done = vlv_primary_enable_flip_done;
+ plane->disable_flip_done = vlv_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (display->platform.broadwell) {
+ plane->need_async_flip_toggle_wa = true;
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = bdw_primary_enable_flip_done;
+ plane->disable_flip_done = bdw_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (DISPLAY_VER(display) >= 7) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ivb_primary_enable_flip_done;
+ plane->disable_flip_done = ivb_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (DISPLAY_VER(display) >= 5) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ilk_primary_enable_flip_done;
+ plane->disable_flip_done = ilk_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ }
}
- modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
+ plane->disable_tiling = i9xx_disable_tiling;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_TILING_X);
+
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ ret = drm_universal_plane_init(display->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ ret = drm_universal_plane_init(display->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
modifiers,
@@ -996,18 +1094,18 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
if (ret)
goto fail;
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (display->platform.cherryview && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
- } else if (DISPLAY_VER(dev_priv) >= 4) {
+ } else if (DISPLAY_VER(display) >= 4) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
} else {
supported_rotations = DRM_MODE_ROTATE_0;
}
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
@@ -1062,8 +1160,7 @@ void
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
enum pipe pipe;
@@ -1076,74 +1173,74 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- drm_WARN_ON(dev, pipe != crtc->pipe);
+ drm_WARN_ON(display->drm, pipe != crtc->pipe);
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb) {
- drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
}
fb = &intel_fb->base;
- fb->dev = dev;
+ fb->dev = display->drm;
- val = intel_de_read(dev_priv, DSPCNTR(dev_priv, i9xx_plane));
+ val = intel_de_read(display, DSPCNTR(display, i9xx_plane));
- if (DISPLAY_VER(dev_priv) >= 4) {
- if (val & DISP_TILED) {
- plane_config->tiling = I915_TILING_X;
+ if (DISPLAY_VER(display) >= 4) {
+ if (val & DISP_TILED)
fb->modifier = I915_FORMAT_MOD_X_TILED;
- }
if (val & DISP_ROTATE_180)
plane_config->rotation = DRM_MODE_ROTATE_180;
}
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
- val & DISP_MIRROR)
+ if (display->platform.cherryview &&
+ pipe == PIPE_B && val & DISP_MIRROR)
plane_config->rotation |= DRM_MODE_REFLECT_X;
pixel_format = val & DISP_FORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->format = drm_format_info(fourcc);
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = intel_de_read(dev_priv,
- DSPOFFSET(dev_priv, i9xx_plane));
- base = intel_de_read(dev_priv, DSPSURF(dev_priv, i9xx_plane)) & DISP_ADDR_MASK;
- } else if (DISPLAY_VER(dev_priv) >= 4) {
- if (plane_config->tiling)
- offset = intel_de_read(dev_priv,
- DSPTILEOFF(dev_priv, i9xx_plane));
+
+ fb->format = drm_get_format_info(display->drm, fourcc, fb->modifier);
+
+ if (display->platform.haswell || display->platform.broadwell) {
+ offset = intel_de_read(display,
+ DSPOFFSET(display, i9xx_plane));
+ base = intel_de_read(display, DSPSURF(display, i9xx_plane)) & DISP_ADDR_MASK;
+ } else if (DISPLAY_VER(display) >= 4) {
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
+ offset = intel_de_read(display,
+ DSPTILEOFF(display, i9xx_plane));
else
- offset = intel_de_read(dev_priv,
- DSPLINOFF(dev_priv, i9xx_plane));
- base = intel_de_read(dev_priv, DSPSURF(dev_priv, i9xx_plane)) & DISP_ADDR_MASK;
+ offset = intel_de_read(display,
+ DSPLINOFF(display, i9xx_plane));
+ base = intel_de_read(display, DSPSURF(display, i9xx_plane)) & DISP_ADDR_MASK;
} else {
offset = 0;
- base = intel_de_read(dev_priv, DSPADDR(dev_priv, i9xx_plane));
+ base = intel_de_read(display, DSPADDR(display, i9xx_plane));
}
plane_config->base = base;
- drm_WARN_ON(&dev_priv->drm, offset != 0);
+ drm_WARN_ON(display->drm, offset != 0);
- val = intel_de_read(dev_priv, PIPESRC(dev_priv, pipe));
+ val = intel_de_read(display, PIPESRC(display, pipe));
fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1;
fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1;
- val = intel_de_read(dev_priv, DSPSTRIDE(dev_priv, i9xx_plane));
+ val = intel_de_read(display, DSPSTRIDE(display, i9xx_plane));
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
- drm_dbg_kms(&dev_priv->drm,
- "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s][PLANE:%d:%s] with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ fb->width, fb->height, fb->format->cpp[0] * 8,
+ base, fb->pitches[0], plane_config->size);
plane_config->fb = intel_fb;
}
@@ -1151,29 +1248,26 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 base;
if (!plane_state->uapi.visible)
return false;
- base = intel_plane_ggtt_offset(plane_state);
-
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
- if (plane_config->base == base)
+ if (plane_config->base == plane_state->surf)
return false;
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write(dev_priv, DSPSURF(dev_priv, i9xx_plane), base);
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write(display, DSPSURF(display, i9xx_plane), plane_state->surf);
else
- intel_de_write(dev_priv, DSPADDR(dev_priv, i9xx_plane), base);
+ intel_de_write(display, DSPADDR(display, i9xx_plane), plane_state->surf);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index 0ca12d1e6839..ec78bf4dd35e 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -9,20 +9,26 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
+struct drm_format_info;
+struct drm_framebuffer;
struct intel_crtc;
+struct intel_display;
struct intel_initial_plane_config;
struct intel_plane;
struct intel_plane_state;
#ifdef I915
unsigned int i965_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation);
+unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int colot_plane);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state);
struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+intel_primary_plane_create(struct intel_display *display, enum pipe pipe);
void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
@@ -40,7 +46,7 @@ static inline int i9xx_check_plane_surface(struct intel_plane_state *plane_state
return 0;
}
static inline struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, int pipe)
+intel_primary_plane_create(struct intel_display *display, int pipe)
{
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index e3b13886177a..01f3803fa09f 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -3,12 +3,21 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/iopoll.h>
+
+#include <drm/drm_print.h>
+
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
+#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
#include "intel_bo.h"
+#include "intel_de.h"
#include "intel_display.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_fb.h"
#include "intel_mchbar_regs.h"
@@ -80,117 +89,122 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *i915)
+static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
+ bool is_ddr3 = dram_info->type == INTEL_DRAM_DDR3;
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
const struct cxsr_latency *latency = &cxsr_latency_table[i];
- bool is_desktop = !IS_MOBILE(i915);
+ bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
- i915->is_ddr3 == latency->is_ddr3 &&
- DIV_ROUND_CLOSEST(i915->fsb_freq, 1000) == latency->fsb_freq &&
- DIV_ROUND_CLOSEST(i915->mem_freq, 1000) == latency->mem_freq)
+ is_ddr3 == latency->is_ddr3 &&
+ DIV_ROUND_CLOSEST(dram_info->fsb_freq, 1000) == latency->fsb_freq &&
+ DIV_ROUND_CLOSEST(dram_info->mem_freq, 1000) == latency->mem_freq)
return latency;
}
- drm_dbg_kms(&i915->drm,
- "Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
- i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
+ drm_dbg_kms(display->drm,
+ "Could not find CxSR latency for %s, FSB %u kHz, MEM %u kHz\n",
+ intel_dram_type_str(dram_info->type),
+ dram_info->fsb_freq, dram_info->mem_freq);
return NULL;
}
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
u32 val;
+ int ret;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
if (enable)
val &= ~FORCE_DDR_HIGH_FREQ;
else
val |= FORCE_DDR_HIGH_FREQ;
val &= ~FORCE_DDR_LOW_FREQ;
val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2),
+ (val & FORCE_DDR_FREQ_REQ_ACK) == 0,
+ 500, 3000, false);
+ if (ret)
+ drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_pm5(struct intel_display *display, bool enable)
{
u32 val;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
- struct intel_display *display = &dev_priv->display;
bool was_enabled;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ if (display->platform.valleyview || display->platform.cherryview) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_de_write(display, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF_VLV);
+ } else if (display->platform.g4x || display->platform.i965gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.pineview) {
+ val = intel_de_read(display, DSPFW3(display));
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3(dev_priv));
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, DSPFW3(display), val);
+ intel_de_posting_read(display, DSPFW3(display));
+ } else if (display->platform.i945g || display->platform.i945gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
+ intel_de_write(display, FW_BLC_SELF, val);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.i915gm) {
/*
* FIXME can't find a bit like this for 915G, and
* yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ intel_de_write(display, INSTPM, val);
+ intel_de_posting_read(display, INSTPM);
} else {
return false;
}
trace_intel_memory_cxsr(display, was_enabled, enable);
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ drm_dbg_kms(display->drm, "memory self-refresh is %s (was %s)\n",
str_enabled_disabled(enable),
str_enabled_disabled(was_enabled));
@@ -199,7 +213,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
/**
* intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
+ * @display: display device
* @enable: Allow vs. disallow CxSR
*
* Allow or disallow the system to enter a special CxSR
@@ -234,17 +248,17 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
* the hardware w.r.t. HPLL SR when writing to plane registers.
* Disallowing just CxSR is sufficient.
*/
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(display, enable);
+ if (display->platform.valleyview || display->platform.cherryview)
+ display->wm.vlv.cxsr = enable;
+ else if (display->platform.g4x)
+ display->wm.g4x.cxsr = enable;
+ mutex_unlock(&display->wm.wm_mutex);
return ret;
}
@@ -270,8 +284,8 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
@@ -279,22 +293,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ dsparb2 = intel_de_read(display, DSPARB2);
+ dsparb3 = intel_de_read(display, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -309,26 +321,26 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_CURSOR] = 63;
}
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i9xx_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i830_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x1ff;
@@ -336,22 +348,22 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i845_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
@@ -445,7 +457,7 @@ static const struct intel_watermark_params i845_wm_info = {
* @latency: Memory wakeup latency in 0.1us units
*
* Compute the watermark using the method 1 or "small buffer"
- * formula. The caller may additonally add extra cachelines
+ * formula. The caller may additionally add extra cachelines
* to account for TLB misses and clock crossings.
*
* This method is concerned with the short term drain rate
@@ -492,7 +504,7 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
* @latency: Memory wakeup latency in 0.1us units
*
* Compute the watermark using the method 2 or "large buffer"
- * formula. The caller may additonally add extra cachelines
+ * formula. The caller may additionally add extra cachelines
* to account for TLB misses and clock crossings.
*
* This method is concerned with the long term drain rate
@@ -536,7 +548,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
/**
* intel_calculate_wm - calculate watermark level
- * @i915: the device
+ * @display: display device
* @pixel_rate: pixel clock
* @wm: chip FIFO params
* @fifo_size: size of the FIFO buffer
@@ -554,7 +566,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
-static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
+static unsigned int intel_calculate_wm(struct intel_display *display,
int pixel_rate,
const struct intel_watermark_params *wm,
int fifo_size, int cpp,
@@ -572,10 +584,10 @@ static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
wm->guard_size;
- drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
+ drm_dbg_kms(display->drm, "FIFO entries required for mode: %d\n", entries);
wm_size = fifo_size - entries;
- drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
+ drm_dbg_kms(display->drm, "FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > wm->max_wm)
@@ -625,11 +637,11 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
crtc->config->hw.adjusted_mode.crtc_clock;
}
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+static struct intel_crtc *single_enabled_crtc(struct intel_display *display)
{
struct intel_crtc *crtc, *enabled = NULL;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -640,21 +652,21 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
+static void pnv_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned int wm;
- latency = pnv_get_cxsr_latency(dev_priv);
+ latency = pnv_get_cxsr_latency(display);
if (!latency) {
- drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
+ drm_dbg_kms(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ intel_set_memory_cxsr(display, false);
return;
}
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
@@ -662,47 +674,46 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
int cpp = fb->format->cpp[0];
/* Display SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ reg = intel_de_read(display, DSPFW1(display));
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+ intel_de_write(display, DSPFW1(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ reg = intel_de_read(display, DSPFW3(display));
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+ intel_de_write(display, DSPFW3(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW3 register is %x\n", reg);
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
} else {
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
}
@@ -793,53 +804,51 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
return max(0, tlb_miss);
}
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_write_wm_values(struct intel_display *display,
const struct g4x_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_de_posting_read(display, DSPFW1(display));
}
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_write_wm_values(struct intel_display *display,
const struct vlv_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ intel_de_write(display, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
}
/*
@@ -847,72 +856,72 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPHOWM, 0);
+ intel_de_write(display, DSPHOWM1, 0);
+ intel_de_write(display, DSPFW4, 0);
+ intel_de_write(display, DSPFW5, 0);
+ intel_de_write(display, DSPFW6, 0);
+
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (display->platform.cherryview) {
+ intel_de_write(display, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_de_write(display, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_posting_read(display, DSPFW1(display));
}
#undef FW_WM_VLV
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void g4x_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ display->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ display->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ display->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+ display->wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -961,11 +970,11 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int latency = display->wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1016,10 +1025,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1032,13 +1041,13 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
level = max(level, G4X_WM_LEVEL_SR);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->fbc != value;
@@ -1055,8 +1064,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
bool dirty = false;
int level;
@@ -1068,7 +1077,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
int wm, max_wm;
@@ -1108,7 +1117,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
plane->base.name,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
@@ -1116,7 +1125,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FBC watermarks: SR=%d, HPLL=%d\n",
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
@@ -1136,9 +1145,9 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (level >= dev_priv->display.wm.num_levels)
+ if (level >= display->wm.num_levels)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1280,7 +1289,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -1310,7 +1319,7 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
max(optimal->wm.plane[plane_id],
active->wm.plane[plane_id]);
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ drm_WARN_ON(display->drm, intermediate->wm.plane[plane_id] >
g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
}
@@ -1328,23 +1337,23 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->hpll.fbc = max(optimal->hpll.fbc,
active->hpll.fbc);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
intermediate->hpll_en);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
intermediate->fbc_en && intermediate->hpll_en);
@@ -1375,7 +1384,7 @@ static int g4x_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+static void g4x_merge_wm(struct intel_display *display,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
@@ -1385,7 +1394,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->hpll_en = true;
wm->fbc_en = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
if (!crtc->active)
@@ -1407,7 +1416,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->fbc_en = false;
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
enum pipe pipe = crtc->pipe;
@@ -1419,23 +1428,23 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+static void g4x_program_watermarks(struct intel_display *display)
{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *old_wm = &display->wm.g4x;
struct g4x_wm_values new_wm = {};
- g4x_merge_wm(dev_priv, &new_wm);
+ g4x_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- g4x_write_wm_values(dev_priv, &new_wm);
+ g4x_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
*old_wm = new_wm;
}
@@ -1443,30 +1452,30 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
static void g4x_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1485,18 +1494,18 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void vlv_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ display->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ if (display->platform.cherryview) {
+ display->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ display->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
}
}
@@ -1504,13 +1513,13 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->display.wm.pri_latency[level] == 0)
+ if (display->wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1531,7 +1540,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
+ display->wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -1545,8 +1554,8 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1561,7 +1570,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
/*
* When enabling sprite0 after sprite1 has already been enabled
* we tend to get an underrun unless sprite0 already has some
- * FIFO space allcoated. Hence we always allocate at least one
+ * FIFO space allocated. Hence we always allocate at least one
* cacheline for sprite0 whenever sprite1 is enabled.
*
* All other plane enable sequences appear immune to this problem.
@@ -1615,11 +1624,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
fifo_left -= plane_extra;
}
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+ drm_WARN_ON(display->drm, active_planes != 0 && fifo_left != 0);
/* give it all to the first plane if none are active */
if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ drm_WARN_ON(display->drm, fifo_left != fifo_size);
fifo_state->plane[PLANE_PRIMARY] = fifo_left;
}
@@ -1630,9 +1639,9 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
static void vlv_invalidate_wms(struct intel_crtc *crtc,
struct vlv_wm_state *wm_state, int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1658,10 +1667,10 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1674,8 +1683,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int level;
bool dirty = false;
@@ -1685,7 +1694,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
@@ -1702,7 +1711,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
plane->base.name,
crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
@@ -1733,8 +1742,8 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
@@ -1744,7 +1753,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int level;
/* initially allow all levels */
- wm_state->num_levels = dev_priv->display.wm.num_levels;
+ wm_state->num_levels = display->wm.num_levels;
/*
* Note that enabling cxsr with no primary/sprite planes
* enabled can wedge the pipe. Hence we only allow cxsr
@@ -1754,7 +1763,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(display) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -1854,6 +1863,7 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
const struct intel_crtc_state *crtc_state =
@@ -1870,8 +1880,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+ drm_WARN_ON(display->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(display->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1888,8 +1898,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
switch (crtc->pipe) {
case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1901,12 +1911,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -1918,12 +1928,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb3 = intel_de_read_fw(display, DSPARB3);
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -1935,14 +1945,14 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB3, dsparb3);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
default:
break;
}
- intel_uncore_posting_read_fw(uncore, DSPARB(dev_priv));
+ intel_de_read_fw(display, DSPARB(display));
spin_unlock(&uncore->lock);
}
@@ -2017,16 +2027,16 @@ static int vlv_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+static void vlv_merge_wm(struct intel_display *display,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->level = display->wm.num_levels - 1;
wm->cxsr = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
if (!crtc->active)
@@ -2045,7 +2055,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
enum pipe pipe = crtc->pipe;
@@ -2060,35 +2070,35 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+static void vlv_program_watermarks(struct intel_display *display)
{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values *old_wm = &display->wm.vlv;
struct vlv_wm_values new_wm = {};
- vlv_merge_wm(dev_priv, &new_wm);
+ vlv_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
+ chv_set_memory_dvfs(display, false);
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
+ chv_set_memory_pm5(display, false);
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- vlv_write_wm_values(dev_priv, &new_wm);
+ vlv_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
+ chv_set_memory_pm5(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
+ chv_set_memory_dvfs(display, true);
*old_wm = new_wm;
}
@@ -2096,33 +2106,33 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
static void vlv_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void i965_update_wm(struct drm_i915_private *dev_priv)
+static void i965_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
int srwm = 1;
@@ -2130,7 +2140,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
@@ -2151,7 +2161,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d, wm: %d\n",
entries, srwm);
@@ -2166,7 +2176,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
@@ -2174,39 +2184,38 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
} else {
cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
/* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(cursor_sr, CURSOR_SR));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
#undef FW_WM
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+static struct intel_crtc *intel_crtc_for_plane(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
if (plane->id == PLANE_PRIMARY &&
plane->i9xx_plane == i9xx_plane)
return intel_crtc_for_pipe(display, plane->pipe);
@@ -2215,7 +2224,7 @@ static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
return NULL;
}
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+static void i9xx_update_wm(struct intel_display *display)
{
const struct intel_watermark_params *wm_info;
u32 fwater_lo;
@@ -2225,29 +2234,29 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int planea_wm, planeb_wm;
struct intel_crtc *crtc;
- if (IS_I945GM(dev_priv))
+ if (display->platform.i945gm)
wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(display) != 2)
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_A);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_A);
+ crtc = intel_crtc_for_plane(display, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2256,25 +2265,25 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planea_wm = wm_info->max_wm;
}
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
wm_info = &i830_bc_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_B);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_B);
+ crtc = intel_crtc_for_plane(display, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planeb_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2283,17 +2292,16 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planeb_wm = wm_info->max_wm;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
- struct drm_gem_object *obj;
-
- obj = intel_fb_bo(crtc->base.primary->state->fb);
+ crtc = single_enabled_crtc(display);
+ if (display->platform.i915gm && crtc) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
/* self-refresh seems busted with untiled */
- if (!intel_bo_is_tiled(obj))
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
crtc = NULL;
}
@@ -2303,10 +2311,10 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
+ if (HAS_FW_BLC(display) && crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *pipe_mode =
@@ -2319,7 +2327,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int cpp;
int entries;
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (display->platform.i915gm || display->platform.i945gm)
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2327,20 +2335,20 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ if (display->platform.i945g || display->platform.i945gm)
+ intel_de_write(display, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ intel_de_write(display, FW_BLC_SELF, srwm & 0x3f);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
@@ -2351,34 +2359,34 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+ intel_de_write(display, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC2, fwater_hi);
if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
-static void i845_update_wm(struct drm_i915_private *dev_priv)
+static void i845_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
u32 fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
&i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
+ i845_get_fifo_size(display, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo = intel_de_read(display, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2533,24 +2541,24 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
}
static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+ilk_display_fifo_size(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
return 768;
else
return 512;
}
static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ilk_plane_wm_reg_max(struct intel_display *display,
int level, bool is_sprite)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -2562,30 +2570,30 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
}
static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+ilk_cursor_wm_reg_max(struct intel_display *display, int level)
{
- if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+static unsigned int ilk_fbc_wm_reg_max(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 31;
else
return 15;
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_plane_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+ unsigned int fifo_size = ilk_display_fifo_size(display);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -2593,14 +2601,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
+ fifo_size /= INTEL_NUM_PIPES(display);
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) < 7)
+ if (DISPLAY_VER(display) < 7)
fifo_size /= 2;
}
@@ -2616,11 +2624,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(display, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_cursor_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config)
{
@@ -2629,32 +2637,32 @@ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
+ return ilk_cursor_wm_reg_max(display, level);
}
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_maximums(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_max(display, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(display, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(display, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_reg_maximums(struct intel_display *display,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_reg_max(display, level, false);
+ max->spr = ilk_plane_wm_reg_max(display, level, true);
+ max->cur = ilk_cursor_wm_reg_max(display, level);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static bool ilk_validate_wm_level(struct drm_i915_private *i915,
+static bool ilk_validate_wm_level(struct intel_display *display,
int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
@@ -2678,15 +2686,15 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
@@ -2699,7 +2707,7 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
return ret;
}
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(struct intel_display *display,
const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
@@ -2708,9 +2716,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+ u16 pri_latency = display->wm.pri_latency[level];
+ u16 spr_latency = display->wm.spr_latency[level];
+ u16 cur_latency = display->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2734,11 +2742,12 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void hsw_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u64 sskpd;
- i915->display.wm.num_levels = 5;
+ display->wm.num_levels = 5;
sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
@@ -2751,11 +2760,12 @@ static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
}
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void snb_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 sskpd;
- i915->display.wm.num_levels = 4;
+ display->wm.num_levels = 4;
sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
@@ -2765,11 +2775,12 @@ static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
}
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void ilk_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 mltr;
- i915->display.wm.num_levels = 3;
+ display->wm.num_levels = 3;
mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
@@ -2779,24 +2790,21 @@ static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_spr_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_cur_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
+static bool ilk_increase_wm_latency(struct intel_display *display, u16 wm[5], u16 min)
{
int level;
@@ -2804,13 +2812,13 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return false;
wm[0] = max(wm[0], min);
- for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 1; level < display->wm.num_levels; level++)
wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_latency_quirk(struct intel_display *display)
{
bool changed;
@@ -2818,21 +2826,21 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(display, display->wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.cur_latency, 12);
if (!changed)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_lp3_irq_quirk(struct intel_display *display)
{
/*
* On some SNB machines (Thinkpad X220 Tablet at least)
@@ -2845,50 +2853,50 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
+ if (display->wm.pri_latency[3] == 0 &&
+ display->wm.spr_latency[3] == 0 &&
+ display->wm.cur_latency[3] == 0)
return;
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
+ display->wm.pri_latency[3] = 0;
+ display->wm.spr_latency[3] = 0;
+ display->wm.cur_latency[3] = 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void ilk_setup_wm_latency(struct intel_display *display)
{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ if (display->platform.broadwell || display->platform.haswell)
+ hsw_read_wm_latency(display, display->wm.pri_latency);
+ else if (DISPLAY_VER(display) >= 6)
+ snb_read_wm_latency(display, display->wm.pri_latency);
else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ ilk_read_wm_latency(display, display->wm.pri_latency);
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(display->wm.spr_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
+ memcpy(display->wm.cur_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+ intel_fixup_spr_wm_latency(display, display->wm.spr_latency);
+ intel_fixup_cur_wm_latency(display, display->wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
+ if (DISPLAY_VER(display) == 6) {
+ snb_wm_latency_quirk(display);
+ snb_wm_lp3_irq_quirk(display);
}
}
-static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(struct intel_display *display,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -2900,11 +2908,11 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(display, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
- if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ if (!ilk_validate_wm_level(display, 0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(display->drm, "LP0 watermark invalid\n");
return false;
}
@@ -2915,7 +2923,7 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_pipe_wm *pipe_wm;
@@ -2942,10 +2950,10 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
- usable_level = dev_priv->display.wm.num_levels - 1;
+ usable_level = display->wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(display) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2953,18 +2961,18 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ ilk_compute_wm_level(display, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ if (!ilk_validate_pipe_wm(display, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+ ilk_compute_wm_reg_maximums(display, 1, &max);
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ ilk_compute_wm_level(display, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -2972,7 +2980,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
+ if (!ilk_validate_wm_level(display, level, &max, wm)) {
memset(wm, 0, sizeof(*wm));
break;
}
@@ -2989,7 +2997,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -3014,7 +3022,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->sprites_enabled |= active->sprites_enabled;
intermediate->sprites_scaled |= active->sprites_scaled;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct intel_wm_level *intermediate_wm = &intermediate->wm[level];
const struct intel_wm_level *active_wm = &active->wm[level];
@@ -3035,7 +3043,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev_priv, intermediate))
+ if (!ilk_validate_pipe_wm(display, intermediate))
return -EINVAL;
/*
@@ -3067,7 +3075,7 @@ static int ilk_compute_watermarks(struct intel_atomic_state *state,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_merge_wm_level(struct intel_display *display,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3075,7 +3083,7 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
ret_wm->enable = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3100,31 +3108,31 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+static void ilk_wm_merge(struct intel_display *display,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- int level, num_levels = dev_priv->display.wm.num_levels;
+ int level, num_levels = display->wm.num_levels;
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(display) < 7 || display->platform.ivybridge) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+ merged->fbc_wm_enabled = DISPLAY_VER(display) >= 6;
/* merge each WM1+ level */
for (level = 1; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev_priv, level, wm);
+ ilk_merge_wm_level(display, level, wm);
if (level > last_enabled_level)
wm->enable = false;
- else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
+ else if (!ilk_validate_wm_level(display, level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
@@ -3140,8 +3148,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
}
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
+ if (DISPLAY_VER(display) == 5 && HAS_FBC(display) &&
+ display->params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3157,16 +3165,16 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+static unsigned int ilk_wm_lp_latency(struct intel_display *display,
int level)
{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (display->platform.haswell || display->platform.broadwell)
return 2 * level;
else
- return dev_priv->display.wm.pri_latency[level];
+ return display->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_results(struct intel_display *display,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
@@ -3190,14 +3198,14 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_LATENCY(ilk_wm_lp_latency(display, level)) |
WM_LP_PRIMARY(r->pri_val) |
WM_LP_CURSOR(r->cur_val);
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
else
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
@@ -3208,19 +3216,19 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ if (DISPLAY_VER(display) < 7 && r->spr_val) {
+ drm_WARN_ON(display->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
}
/* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ if (drm_WARN_ON(display->drm, !r->enable))
continue;
results->wm_pipe[pipe] =
@@ -3235,13 +3243,13 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* case both are at the same level. Prefer r1 in case they're the same.
*/
static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
+ilk_find_best_result(struct intel_display *display,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
int level, level1 = 0, level2 = 0;
- for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 1; level < display->wm.num_levels; level++) {
if (r1->wm[level].enable)
level1 = level;
if (r2->wm[level].enable)
@@ -3267,7 +3275,7 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+static unsigned int ilk_compute_wm_dirty(struct intel_display *display,
const struct ilk_wm_values *old,
const struct ilk_wm_values *new)
{
@@ -3275,7 +3283,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
enum pipe pipe;
int wm_lp;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3313,25 +3321,25 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
return dirty;
}
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+static bool _ilk_disable_lp_wm(struct intel_display *display,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3347,73 +3355,73 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+static void ilk_write_wm_values(struct intel_display *display,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
unsigned int dirty;
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ dirty = ilk_compute_wm_dirty(display, previous, results);
if (!dirty)
return;
- _ilk_disable_lp_wm(dev_priv, dirty);
+ _ilk_disable_lp_wm(display, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
+ if (display->platform.haswell || display->platform.broadwell)
+ intel_de_rmw(display, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
}
if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+ intel_de_rmw(display, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_de_write(display, WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (DISPLAY_VER(dev_priv) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_de_write(display, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_de_write(display, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->display.wm.hw = *results;
+ display->wm.hw = *results;
}
-bool ilk_disable_cxsr(struct drm_i915_private *dev_priv)
+bool ilk_disable_cxsr(struct intel_display *display)
{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+ return _ilk_disable_lp_wm(display, WM_DIRTY_LP_ALL);
}
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_config(struct intel_display *display,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -3425,7 +3433,7 @@ static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
}
}
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_program_watermarks(struct intel_display *display)
{
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
@@ -3433,18 +3441,18 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev_priv, &config);
+ ilk_compute_wm_config(display, &config);
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
+ if (DISPLAY_VER(display) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(display, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -3452,50 +3460,49 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(display, best_lp_wm, partitioning, &results);
- ilk_write_wm_values(dev_priv, &results);
+ ilk_write_wm_values(display, &results);
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_display *display = to_intel_display(crtc);
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_de_read(display, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -3522,7 +3529,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
* should be marked as enabled but zeroed,
* which is what we'd compute them to.
*/
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
active->wm[level].enable = true;
}
@@ -3571,7 +3578,7 @@ static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+void ilk_wm_sanitize(struct intel_display *display)
{
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
@@ -3582,14 +3589,14 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ if (!display->funcs.wm->optimize_watermarks)
return;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 9))
return;
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
intel_state = to_intel_atomic_state(state);
@@ -3605,14 +3612,14 @@ retry:
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
intel_state->skip_intermediate_wm = true;
ret = ilk_sanitize_watermarks_add_affected(state);
if (ret)
goto fail;
- ret = intel_atomic_check(&dev_priv->drm, state);
+ ret = intel_atomic_check(display->drm, state);
if (ret)
goto fail;
@@ -3642,7 +3649,7 @@ fail:
* If this actually happens, we'll have to just leave the
* BIOS-programmed watermarks untouched and hope for the best.
*/
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Could not determine valid watermarks for inherited state\n");
drm_atomic_state_put(state);
@@ -3656,18 +3663,18 @@ fail:
#define _FW_WM_VLV(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_read_wm_values(struct intel_display *display,
struct g4x_wm_values *wm)
{
u32 tmp;
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -3675,21 +3682,21 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
}
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_read_wm_values(struct intel_display *display,
struct vlv_wm_values *wm)
{
enum pipe pipe;
u32 tmp;
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+ for_each_pipe(display, pipe) {
+ tmp = intel_de_read(display, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -3701,34 +3708,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ if (display->platform.cherryview) {
+ tmp = intel_de_read(display, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ tmp = intel_de_read(display, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ tmp = intel_de_read(display, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -3740,11 +3747,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ tmp = intel_de_read(display, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -3758,16 +3765,16 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void g4x_wm_get_hw_state(struct intel_display *display)
{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *wm = &display->wm.g4x;
struct intel_crtc *crtc;
- g4x_read_wm_values(dev_priv, wm);
+ g4x_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -3832,7 +3839,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3840,26 +3847,25 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ drm_dbg_kms(display->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
str_yes_no(wm->fbc_en));
}
-static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+static void g4x_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -3872,7 +3878,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.g4x.raw[level];
@@ -3883,45 +3889,40 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.g4x.intermediate =
crtc_state->wm.g4x.optimal;
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
}
- g4x_program_watermarks(dev_priv);
+ g4x_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+static void vlv_wm_get_hw_state(struct intel_display *display)
{
- g4x_wm_get_hw_state(i915);
- g4x_wm_sanitize(i915);
-}
-
-static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
+ int ret;
- vlv_read_wm_values(dev_priv, wm);
+ vlv_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
- if (IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
+ if (display->platform.cherryview) {
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
@@ -3934,26 +3935,28 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
* HIGH/LOW bits so that we don't actually change
* the current state.
*/
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2),
+ (val & FORCE_DDR_FREQ_REQ_ACK) == 0,
+ 500, 3000, false);
+ if (ret) {
+ drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -3993,7 +3996,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -4002,20 +4005,19 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
-static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+static void vlv_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -4028,7 +4030,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
@@ -4036,39 +4038,33 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.vlv.intermediate =
crtc_state->wm.vlv.optimal;
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
}
- vlv_program_watermarks(dev_priv);
+ vlv_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- vlv_wm_get_hw_state(i915);
- vlv_wm_sanitize(i915);
+ mutex_unlock(&display->wm.wm_mutex);
}
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
*/
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_init_lp_watermarks(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM1_LP_ILK, WM_LP_ENABLE, 0);
/*
* Don't touch WM_LP_SPRITE_ENABLE here.
@@ -4076,37 +4072,37 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void ilk_wm_get_hw_state(struct intel_display *display)
{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc *crtc;
- ilk_init_lp_watermarks(dev_priv);
+ ilk_init_lp_watermarks(display);
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+ hw->wm_lp[0] = intel_de_read(display, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_de_read(display, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_de_read(display, WM3_LP_ILK);
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ hw->wm_lp_spr[0] = intel_de_read(display, WM1S_LP_ILK);
+ if (DISPLAY_VER(display) >= 7) {
+ hw->wm_lp_spr[1] = intel_de_read(display, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_de_read(display, WM3S_LP_IVB);
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ if (display->platform.haswell || display->platform.broadwell)
+ hw->partitioning = (intel_de_read(display, WM_MISC) &
WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ else if (display->platform.ivybridge)
+ hw->partitioning = (intel_de_read(display, DISP_ARB_CTL2) &
DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_de_read(display, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
static const struct intel_wm_funcs ilk_wm_funcs = {
@@ -4121,14 +4117,16 @@ static const struct intel_wm_funcs vlv_wm_funcs = {
.initial_watermarks = vlv_initial_watermarks,
.optimize_watermarks = vlv_optimize_watermarks,
.atomic_update_watermarks = vlv_atomic_update_fifo,
- .get_hw_state = vlv_wm_get_hw_state_and_sanitize,
+ .get_hw_state = vlv_wm_get_hw_state,
+ .sanitize = vlv_wm_sanitize,
};
static const struct intel_wm_funcs g4x_wm_funcs = {
.compute_watermarks = g4x_compute_watermarks,
.initial_watermarks = g4x_initial_watermarks,
.optimize_watermarks = g4x_optimize_watermarks,
- .get_hw_state = g4x_wm_get_hw_state_and_sanitize,
+ .get_hw_state = g4x_wm_get_hw_state,
+ .sanitize = g4x_wm_sanitize,
};
static const struct intel_wm_funcs pnv_wm_funcs = {
@@ -4154,39 +4152,39 @@ static const struct intel_wm_funcs i845_wm_funcs = {
static const struct intel_wm_funcs nop_funcs = {
};
-void i9xx_wm_init(struct drm_i915_private *dev_priv)
+void i9xx_wm_init(struct intel_display *display)
{
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!pnv_get_cxsr_latency(dev_priv)) {
- drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ if (HAS_PCH_SPLIT(display)) {
+ ilk_setup_wm_latency(display);
+ display->funcs.wm = &ilk_wm_funcs;
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ vlv_setup_wm_latency(display);
+ display->funcs.wm = &vlv_wm_funcs;
+ } else if (display->platform.g4x) {
+ g4x_setup_wm_latency(display);
+ display->funcs.wm = &g4x_wm_funcs;
+ } else if (display->platform.pineview) {
+ if (!pnv_get_cxsr_latency(display)) {
+ drm_info(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
/* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
+ intel_set_memory_cxsr(display, false);
+ display->funcs.wm = &nop_funcs;
} else {
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ display->funcs.wm = &pnv_wm_funcs;
}
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
+ } else if (DISPLAY_VER(display) == 4) {
+ display->funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(display) == 3) {
+ display->funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(display) == 2) {
+ if (INTEL_NUM_PIPES(display) == 1)
+ display->funcs.wm = &i845_wm_funcs;
else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ display->funcs.wm = &i9xx_wm_funcs;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
+ display->funcs.wm = &nop_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
index 06ac37c6c94b..7bb363b2a756 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.h
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
#ifdef I915
-bool ilk_disable_cxsr(struct drm_i915_private *i915);
-void ilk_wm_sanitize(struct drm_i915_private *i915);
-bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
-void i9xx_wm_init(struct drm_i915_private *i915);
+bool ilk_disable_cxsr(struct intel_display *display);
+void ilk_wm_sanitize(struct intel_display *display);
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable);
+void i9xx_wm_init(struct intel_display *display);
#else
-static inline bool ilk_disable_cxsr(struct drm_i915_private *i915)
+static inline bool ilk_disable_cxsr(struct intel_display *display)
{
return false;
}
-static inline void ilk_wm_sanitize(struct drm_i915_private *i915)
+static inline void ilk_wm_sanitize(struct intel_display *display)
{
}
-static inline bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable)
+static inline bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
return false;
}
-static inline void i9xx_wm_init(struct drm_i915_private *i915)
+static inline void i9xx_wm_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm_regs.h b/drivers/gpu/drm/i915/display/i9xx_wm_regs.h
new file mode 100644
index 000000000000..d68d22235cf2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm_regs.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __I9XX_WM_REGS_H__
+#define __I9XX_WM_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define DSPARB(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
+#define DSPARB_CSTART_MASK (0x7f << 7)
+#define DSPARB_CSTART_SHIFT 7
+#define DSPARB_BSTART_MASK (0x7f)
+#define DSPARB_BSTART_SHIFT 0
+#define DSPARB_BEND_SHIFT 9 /* on 855 */
+#define DSPARB_AEND_SHIFT 0
+#define DSPARB_SPRITEA_SHIFT_VLV 0
+#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEB_SHIFT_VLV 8
+#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
+#define DSPARB_SPRITEC_SHIFT_VLV 16
+#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
+#define DSPARB_SPRITED_SHIFT_VLV 24
+#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
+#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
+#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
+#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
+#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
+#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
+#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
+#define DSPARB_SPRITED_HI_SHIFT_VLV 12
+#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
+#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
+#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
+#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
+#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
+#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define DSPARB_SPRITEE_SHIFT_VLV 0
+#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEF_SHIFT_VLV 8
+#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
+
+/* pnv/gen4/g4x/vlv/chv */
+#define DSPFW1(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
+#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff << 23)
+#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f << 16)
+#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f << 8)
+#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
+#define DSPFW_PLANEA_SHIFT 0
+#define DSPFW_PLANEA_MASK (0x7f << 0)
+#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
+#define DSPFW2(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
+#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
+#define DSPFW_FBC_SR_SHIFT 28
+#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
+#define DSPFW_FBC_HPLL_SR_SHIFT 24
+#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
+#define DSPFW_SPRITEB_SHIFT (16)
+#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
+#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_CURSORA_MASK (0x3f << 8)
+#define DSPFW_PLANEC_OLD_SHIFT 0
+#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
+#define DSPFW_SPRITEA_SHIFT 0
+#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
+#define DSPFW3(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
+#define DSPFW_HPLL_SR_EN (1 << 31)
+#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
+#define DSPFW_CURSOR_SR_SHIFT 24
+#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
+#define DSPFW_HPLL_SR_SHIFT 0
+#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
+
+/* vlv/chv */
+#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
+#define DSPFW_SPRITEB_WM1_SHIFT 16
+#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
+#define DSPFW_CURSORA_WM1_SHIFT 8
+#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
+#define DSPFW_SPRITEA_WM1_SHIFT 0
+#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
+#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
+#define DSPFW_PLANEB_WM1_SHIFT 24
+#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
+#define DSPFW_PLANEA_WM1_SHIFT 16
+#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
+#define DSPFW_CURSORB_WM1_SHIFT 8
+#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
+#define DSPFW_CURSOR_SR_WM1_SHIFT 0
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
+#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
+#define DSPFW_SR_WM1_SHIFT 0
+#define DSPFW_SR_WM1_MASK (0x1ff << 0)
+#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
+#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
+#define DSPFW_SPRITED_WM1_SHIFT 24
+#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
+#define DSPFW_SPRITED_SHIFT 16
+#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
+#define DSPFW_SPRITEC_WM1_SHIFT 8
+#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
+#define DSPFW_SPRITEC_SHIFT 0
+#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
+#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
+#define DSPFW_SPRITEF_WM1_SHIFT 24
+#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
+#define DSPFW_SPRITEF_SHIFT 16
+#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
+#define DSPFW_SPRITEE_WM1_SHIFT 8
+#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
+#define DSPFW_SPRITEE_SHIFT 0
+#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
+#define DSPFW_PLANEC_WM1_SHIFT 24
+#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
+#define DSPFW_PLANEC_SHIFT 16
+#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
+#define DSPFW_CURSORC_WM1_SHIFT 8
+#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
+#define DSPFW_CURSORC_SHIFT 0
+#define DSPFW_CURSORC_MASK (0x3f << 0)
+
+/* vlv/chv high order bits */
+#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
+#define DSPFW_SR_HI_SHIFT 24
+#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SPRITEF_HI_SHIFT 23
+#define DSPFW_SPRITEF_HI_MASK (1 << 23)
+#define DSPFW_SPRITEE_HI_SHIFT 22
+#define DSPFW_SPRITEE_HI_MASK (1 << 22)
+#define DSPFW_PLANEC_HI_SHIFT 21
+#define DSPFW_PLANEC_HI_MASK (1 << 21)
+#define DSPFW_SPRITED_HI_SHIFT 20
+#define DSPFW_SPRITED_HI_MASK (1 << 20)
+#define DSPFW_SPRITEC_HI_SHIFT 16
+#define DSPFW_SPRITEC_HI_MASK (1 << 16)
+#define DSPFW_PLANEB_HI_SHIFT 12
+#define DSPFW_PLANEB_HI_MASK (1 << 12)
+#define DSPFW_SPRITEB_HI_SHIFT 8
+#define DSPFW_SPRITEB_HI_MASK (1 << 8)
+#define DSPFW_SPRITEA_HI_SHIFT 4
+#define DSPFW_SPRITEA_HI_MASK (1 << 4)
+#define DSPFW_PLANEA_HI_SHIFT 0
+#define DSPFW_PLANEA_HI_MASK (1 << 0)
+#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
+#define DSPFW_SR_WM1_HI_SHIFT 24
+#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
+#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
+#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
+#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
+#define DSPFW_PLANEC_WM1_HI_SHIFT 21
+#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
+#define DSPFW_SPRITED_WM1_HI_SHIFT 20
+#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
+#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
+#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
+#define DSPFW_PLANEB_WM1_HI_SHIFT 12
+#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
+#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
+#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
+#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
+#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
+#define DSPFW_PLANEA_WM1_HI_SHIFT 0
+#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
+
+/* drain latency register values*/
+#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
+#define DDL_CURSOR_SHIFT 24
+#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
+#define DDL_PLANE_SHIFT 0
+#define DDL_PRECISION_HIGH (1 << 7)
+#define DDL_PRECISION_LOW (0 << 7)
+#define DRAIN_LATENCY_MASK 0x7f
+
+/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE 64
+#define I915_FIFO_LINE_SIZE 64
+#define I830_FIFO_LINE_SIZE 32
+
+#define VALLEYVIEW_FIFO_SIZE 255
+#define G4X_FIFO_SIZE 127
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127
+#define I915_FIFO_SIZE 95
+#define I855GM_FIFO_SIZE 127 /* In cachelines */
+#define I830_FIFO_SIZE 95
+
+#define VALLEYVIEW_MAX_WM 0xff
+#define G4X_MAX_WM 0x3f
+#define I915_MAX_WM 0x3f
+
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
+
+#define VALLEYVIEW_CURSOR_MAX_WM 64
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
+
+/* define the Watermark register on Ironlake */
+#define _WM0_PIPEA_ILK 0x45100
+#define _WM0_PIPEB_ILK 0x45104
+#define _WM0_PIPEC_IVB 0x45200
+#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
+ _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
+#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
+#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
+#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
+#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
+#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
+#define WM1_LP_ILK _MMIO(0x45108)
+#define WM2_LP_ILK _MMIO(0x4510c)
+#define WM3_LP_ILK _MMIO(0x45110)
+#define WM_LP_ENABLE REG_BIT(31)
+#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
+#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
+#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
+#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
+#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
+#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
+#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
+#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
+#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
+#define WM1S_LP_ILK _MMIO(0x45120)
+#define WM2S_LP_IVB _MMIO(0x45124)
+#define WM3S_LP_IVB _MMIO(0x45128)
+#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
+#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
+#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
+
+#define WM_MISC _MMIO(0x45260)
+#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
+
+#define WM_DBG _MMIO(0x45280)
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
+#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
+#define WM_DBG_DISALLOW_SPRITE (1 << 2)
+
+#endif /* __I9XX_WM_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 74ab3d1a1622..9230792960f2 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -25,10 +25,13 @@
* Jani Nikula <jani.nikula@intel.com>
*/
+#include <linux/iopoll.h>
+
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
@@ -43,6 +46,8 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
@@ -69,8 +74,12 @@ static int payload_credits_available(struct intel_display *display,
static bool wait_for_header_credits(struct intel_display *display,
enum transcoder dsi_trans, int hdr_credit)
{
- if (wait_for_us(header_credits_available(display, dsi_trans) >=
- hdr_credit, 100)) {
+ int ret, available;
+
+ ret = poll_timeout_us(available = header_credits_available(display, dsi_trans),
+ available >= hdr_credit,
+ 10, 100, false);
+ if (ret) {
drm_err(display->drm, "DSI header credits not released\n");
return false;
}
@@ -81,8 +90,12 @@ static bool wait_for_header_credits(struct intel_display *display,
static bool wait_for_payload_credits(struct intel_display *display,
enum transcoder dsi_trans, int payld_credit)
{
- if (wait_for_us(payload_credits_available(display, dsi_trans) >=
- payld_credit, 100)) {
+ int ret, available;
+
+ ret = poll_timeout_us(available = payload_credits_available(display, dsi_trans),
+ available >= payld_credit,
+ 10, 100, false);
+ if (ret) {
drm_err(display->drm, "DSI payload credits not released\n");
return false;
}
@@ -134,8 +147,11 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
- LPTX_IN_PROGRESS), 20))
+
+ ret = intel_de_wait_for_clear_us(display,
+ DSI_LP_MSG(dsi_trans),
+ LPTX_IN_PROGRESS, 20);
+ if (ret)
drm_err(display->drm, "LPTX bit not cleared\n");
}
}
@@ -190,12 +206,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
else
tmp &= ~PAYLOAD_PRESENT;
- tmp &= ~VBLANK_FENCE;
+ tmp &= ~(VBLANK_FENCE | LP_DATA_TRANSFER | PIPELINE_FLUSH);
if (enable_lpdt)
tmp |= LP_DATA_TRANSFER;
else
- tmp &= ~LP_DATA_TRANSFER;
+ tmp |= PIPELINE_FLUSH;
tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
@@ -242,7 +258,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
for_each_dsi_phy(phy, intel_dsi->phys) {
/*
* Program voltage swing and pre-emphasis level values as per
- * table in BSPEC under DDI buffer programing
+ * table in BSPEC under DDI buffer programming.
*/
mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
@@ -344,7 +360,6 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
int afe_clk_khz;
@@ -353,7 +368,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
afe_clk_khz = afe_clk(encoder, crtc_state);
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ if (display->platform.alderlake_s || display->platform.alderlake_p) {
theo_word_clk = DIV_ROUND_UP(afe_clk_khz, 8 * DSI_MAX_ESC_CLK);
act_word_clk = max(3, theo_word_clk + (theo_word_clk + 1) % 2);
esc_clk_div_m = act_word_clk * 8;
@@ -374,7 +389,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
intel_de_posting_read(display, ICL_DPHY_ESC_CLK_DIV(port));
}
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ if (display->platform.alderlake_s || display->platform.alderlake_p) {
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_write(display, ADL_MIPIO_DW(port, 8),
esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY);
@@ -386,13 +401,12 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
static void get_dsi_io_power_domains(struct intel_dsi *intel_dsi)
{
struct intel_display *display = to_intel_display(&intel_dsi->base);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
drm_WARN_ON(display->drm, intel_dsi->io_wakeref[port]);
intel_dsi->io_wakeref[port] =
- intel_display_power_get(dev_priv,
+ intel_display_power_get(display,
port == PORT_A ?
POWER_DOMAIN_PORT_DDI_IO_A :
POWER_DOMAIN_PORT_DDI_IO_B);
@@ -414,19 +428,18 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
for_each_dsi_phy(phy, intel_dsi->phys)
- intel_combo_phy_power_up_lanes(dev_priv, phy, true,
+ intel_combo_phy_power_up_lanes(display, phy, true,
intel_dsi->lane_count, false);
}
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
u32 tmp;
@@ -451,7 +464,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) ||
+ if (display->platform.jasperlake || display->platform.elkhartlake ||
(DISPLAY_VER(display) >= 12)) {
intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy),
LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
@@ -516,13 +529,14 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
+ int ret;
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
- if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE),
- 500))
+ ret = intel_de_wait_for_clear_us(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, 500);
+ if (ret)
drm_err(display->drm, "DDI port:%c buffer idle\n",
port_name(port));
}
@@ -533,7 +547,6 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum phy phy;
@@ -563,7 +576,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
}
}
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
for_each_dsi_phy(phy, intel_dsi->phys)
intel_de_rmw(display, ICL_DPHY_CHKN(phy),
0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
@@ -660,7 +673,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy;
u32 val;
@@ -808,8 +821,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* select data lane width */
tmp = intel_de_read(display,
TRANS_DDI_FUNC_CTL(display, dsi_trans));
- tmp &= ~DDI_PORT_WIDTH_MASK;
- tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+ tmp &= ~TRANS_DDI_PORT_WIDTH_MASK;
+ tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count);
/* select input pipe */
tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
@@ -839,9 +852,14 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
+ int ret;
+
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) &
- LINK_READY), 2500))
+
+ ret = intel_de_wait_for_set_us(display,
+ DSI_TRANS_FUNC_CONF(dsi_trans),
+ LINK_READY, 2500);
+ if (ret)
drm_err(display->drm, "DSI link not ready\n");
}
}
@@ -960,7 +978,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/*
- * FIXME: Programing this by assuming progressive mode, since
+ * FIXME: Programming this by assuming progressive mode, since
* non-interlaced info from VBT is not saved inside
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
@@ -1029,8 +1047,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
TRANSCONF_ENABLE);
/* wait for transcoder to be enabled */
- if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans),
- TRANSCONF_STATE_ENABLE, 10))
+ if (intel_de_wait_for_set_ms(display, TRANSCONF(display, dsi_trans),
+ TRANSCONF_STATE_ENABLE, 10))
drm_err(display->drm,
"DSI transcoder not enabled\n");
}
@@ -1278,6 +1296,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+ intel_panel_prepare(crtc_state, conn_state);
+
intel_crtc_vblank_on(crtc_state);
}
@@ -1296,8 +1316,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
TRANSCONF_ENABLE, 0);
/* wait for transcoder to be disabled */
- if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans),
- TRANSCONF_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, dsi_trans),
+ TRANSCONF_STATE_ENABLE, 50))
drm_err(display->drm,
"DSI trancoder not disabled\n");
}
@@ -1320,6 +1340,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
enum port port;
enum transcoder dsi_trans;
u32 tmp;
+ int ret;
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
@@ -1336,9 +1357,9 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
tmp &= ~LINK_ULPS_TYPE_LP11;
intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp);
- if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
- LINK_IN_ULPS),
- 10))
+ ret = intel_de_wait_for_set_us(display, DSI_LP_MSG(dsi_trans),
+ LINK_IN_ULPS, 10);
+ if (ret)
drm_err(display->drm, "DSI link not in ULPS\n");
}
@@ -1366,14 +1387,16 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
+ int ret;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
- if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE),
- 8))
+ ret = intel_de_wait_for_set_us(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, 8);
+
+ if (ret)
drm_err(display->drm,
"DDI port:%c buffer not idle\n",
port_name(port));
@@ -1384,7 +1407,6 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -1392,7 +1414,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
port == PORT_A ?
POWER_DOMAIN_PORT_DDI_IO_A :
POWER_DOMAIN_PORT_DDI_IO_B,
@@ -1412,6 +1434,8 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ intel_panel_unprepare(old_conn_state);
+
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
@@ -1459,12 +1483,12 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
}
static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
enum drm_mode_status status;
- status = intel_cpu_transcoder_mode_valid(i915, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -1628,7 +1652,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- crtc_state->dsc.compression_enable = true;
+ intel_dsc_enable_on_crtc(crtc_state);
return 0;
}
@@ -1651,7 +1675,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
@@ -1696,7 +1720,6 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum transcoder dsi_trans;
intel_wakeref_t wakeref;
@@ -1704,7 +1727,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
bool ret = false;
u32 tmp;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
@@ -1735,7 +1758,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
ret = tmp & TRANSCONF_ENABLE;
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -1831,107 +1854,56 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
.transfer = gen11_dsi_host_transfer,
};
-#define ICL_PREPARE_CNT_MAX 0x7
-#define ICL_CLK_ZERO_CNT_MAX 0xf
-#define ICL_TRAIL_CNT_MAX 0x7
-#define ICL_TCLK_PRE_CNT_MAX 0x3
-#define ICL_TCLK_POST_CNT_MAX 0x7
-#define ICL_HS_ZERO_CNT_MAX 0xf
-#define ICL_EXIT_ZERO_CNT_MAX 0x7
-
static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 hs_zero_cnt;
- u32 tclk_pre_cnt;
+ u32 tclk_prepare_esc_clk, tclk_zero_esc_clk, tclk_pre_esc_clk;
+ u32 ths_prepare_esc_clk, ths_zero_esc_clk, ths_exit_esc_clk;
tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
/*
- * prepare cnt in escape clocks
- * this field represents a hexadecimal value with a precision
- * of 1.2 – i.e. the most significant bit is the integer
- * and the least significant 2 bits are fraction bits.
- * so, the field can represent a range of 0.25 to 1.75
+ * The clock and data lane prepare timing parameters are in expressed in
+ * units of 1/4 escape clocks, and all the other timings parameters in
+ * escape clocks.
*/
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
- if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n",
- prepare_cnt);
- prepare_cnt = ICL_PREPARE_CNT_MAX;
- }
+ tclk_prepare_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare * 4, tlpx_ns);
+ tclk_prepare_esc_clk = min(tclk_prepare_esc_clk, 7);
- /* clk zero count in escape clocks */
- clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
- ths_prepare_ns, tlpx_ns);
- if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
- clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
- }
+ tclk_zero_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ mipi_config->tclk_prepare, tlpx_ns);
+ tclk_zero_esc_clk = min(tclk_zero_esc_clk, 15);
- /* trail cnt in escape clocks*/
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
- if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n",
- trail_cnt);
- trail_cnt = ICL_TRAIL_CNT_MAX;
- }
+ tclk_pre_esc_clk = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ tclk_pre_esc_clk = min(tclk_pre_esc_clk, 3);
- /* tclk pre count in escape clocks */
- tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
- if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
- tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
- }
+ ths_prepare_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare * 4, tlpx_ns);
+ ths_prepare_esc_clk = min(ths_prepare_esc_clk, 7);
- /* hs zero cnt in escape clocks */
- hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
- ths_prepare_ns, tlpx_ns);
- if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n",
- hs_zero_cnt);
- hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
- }
+ ths_zero_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ mipi_config->ths_prepare, tlpx_ns);
+ ths_zero_esc_clk = min(ths_zero_esc_clk, 15);
- /* hs exit zero cnt in escape clocks */
- exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
- if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "exit_zero_cnt out of range (%d)\n",
- exit_zero_cnt);
- exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
- }
+ ths_exit_esc_clk = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ ths_exit_esc_clk = min(ths_exit_esc_clk, 7);
/* clock lane dphy timings */
intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
- CLK_PREPARE(prepare_cnt) |
+ CLK_PREPARE(tclk_prepare_esc_clk) |
CLK_ZERO_OVERRIDE |
- CLK_ZERO(clk_zero_cnt) |
+ CLK_ZERO(tclk_zero_esc_clk) |
CLK_PRE_OVERRIDE |
- CLK_PRE(tclk_pre_cnt) |
- CLK_TRAIL_OVERRIDE |
- CLK_TRAIL(trail_cnt));
+ CLK_PRE(tclk_pre_esc_clk));
/* data lanes dphy timings */
intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
- HS_PREPARE(prepare_cnt) |
+ HS_PREPARE(ths_prepare_esc_clk) |
HS_ZERO_OVERRIDE |
- HS_ZERO(hs_zero_cnt) |
- HS_TRAIL_OVERRIDE |
- HS_TRAIL(trail_cnt) |
+ HS_ZERO(ths_zero_esc_clk) |
HS_EXIT_OVERRIDE |
- HS_EXIT(exit_zero_cnt));
+ HS_EXIT(ths_exit_esc_clk));
intel_dsi_log_params(intel_dsi);
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi_regs.h b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
index d4845ac65acc..b601b7632339 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
@@ -272,6 +272,7 @@
#define PAYLOAD_PRESENT (1 << 31)
#define LP_DATA_TRANSFER (1 << 30)
#define VBLANK_FENCE (1 << 29)
+#define PIPELINE_FLUSH (1 << 28)
#define PARAM_WC_MASK (0xffff << 8)
#define PARAM_WC_LOWER_SHIFT 8
#define PARAM_WC_UPPER_SHIFT 16
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index c3b29a331d72..68c01932f7b4 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -9,9 +9,12 @@
#include <linux/acpi.h>
#include <acpi/video.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "intel_acpi.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 55f3ae1e68c9..6372f533f65b 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -5,14 +5,25 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "intel_alpm.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_psr.h"
#include "intel_psr_regs.h"
+#define SILENCE_PERIOD_MIN_TIME 80
+#define SILENCE_PERIOD_MAX_TIME 180
+#define SILENCE_PERIOD_TIME (SILENCE_PERIOD_MIN_TIME + \
+ (SILENCE_PERIOD_MAX_TIME - \
+ SILENCE_PERIOD_MIN_TIME) / 2)
+
+#define LFPS_CYCLE_COUNT 10
+
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp)
{
return intel_dp->alpm_dpcd & DP_ALPM_CAP;
@@ -23,7 +34,14 @@ bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp)
return intel_dp->alpm_dpcd & DP_ALPM_AUX_LESS_CAP;
}
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
+bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return intel_psr_needs_alpm_aux_less(intel_dp, crtc_state) ||
+ (crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp));
+}
+
+void intel_alpm_init(struct intel_dp *intel_dp)
{
u8 dpcd;
@@ -31,74 +49,41 @@ void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
return;
intel_dp->alpm_dpcd = dpcd;
+ mutex_init(&intel_dp->alpm.lock);
}
-/*
- * See Bspec: 71632 for the table
- *
- * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
- *
- * Half cycle duration:
- *
- * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
- * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
- *
- * Link rates 5.4 - 8.1
- * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
- * LFPS Period chosen is the mid-point of the min:max values from the table
- * FLOOR( LFPS Period in Symbol clocks /
- * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
- */
-static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
- int *silence_period,
- int *lfps_half_cycle)
+static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state)
{
- switch (link_rate) {
- case 162000:
- *silence_period = 20;
- *lfps_half_cycle = 5;
- break;
- case 216000:
- *silence_period = 27;
- *lfps_half_cycle = 7;
- break;
- case 243000:
- *silence_period = 31;
- *lfps_half_cycle = 8;
- break;
- case 270000:
- *silence_period = 34;
- *lfps_half_cycle = 9;
- break;
- case 324000:
- *silence_period = 41;
- *lfps_half_cycle = 11;
- break;
- case 432000:
- *silence_period = 56;
- *lfps_half_cycle = 15;
- break;
- case 540000:
- *silence_period = 69;
- *lfps_half_cycle = 12;
- break;
- case 648000:
- *silence_period = 84;
- *lfps_half_cycle = 15;
- break;
- case 675000:
- *silence_period = 87;
- *lfps_half_cycle = 15;
- break;
- case 810000:
- *silence_period = 104;
- *lfps_half_cycle = 19;
- break;
- default:
- *silence_period = *lfps_half_cycle = -1;
- return false;
+ return SILENCE_PERIOD_TIME * intel_dp_link_symbol_clock(crtc_state->port_clock) /
+ 1000 / 1000;
+}
+
+static void get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state,
+ int *min, int *max)
+{
+ if (crtc_state->port_clock < 540000) {
+ *min = 65 * LFPS_CYCLE_COUNT;
+ *max = 75 * LFPS_CYCLE_COUNT;
+ } else {
+ *min = 140;
+ *max = 800;
}
- return true;
+}
+
+static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state)
+{
+ int tlfps_cycle_min, tlfps_cycle_max;
+
+ get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min,
+ &tlfps_cycle_max);
+
+ return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2;
+}
+
+static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state)
+{
+ return get_lfps_cycle_time(crtc_state) * crtc_state->port_clock / 1000 /
+ 1000 / (2 * LFPS_CYCLE_COUNT);
}
/*
@@ -120,40 +105,36 @@ static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
* tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
* TPS4 Length = 252 Symbols
*/
-static int _lnl_compute_aux_less_wake_time(int port_clock)
+static int _lnl_compute_aux_less_wake_time(const struct intel_crtc_state *crtc_state)
{
int tphy2_p2_to_p0 = 12 * 1000;
- int tlfps_period_max = 800;
- int tsilence_max = 180;
int t1 = 50 * 1000;
int tps4 = 252;
/* port_clock is link rate in 10kbit/s units */
- int tml_phy_lock = 1000 * 1000 * tps4 / port_clock;
+ int tml_phy_lock = 1000 * 1000 * tps4 / crtc_state->port_clock;
int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
int t2 = num_ml_phy_lock * tml_phy_lock;
int tcds = 1 * t2;
- return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
- t1 + tcds, 1000);
+ return DIV_ROUND_UP(tphy2_p2_to_p0 + get_lfps_cycle_time(crtc_state) +
+ SILENCE_PERIOD_TIME + t1 + tcds, 1000);
}
static int
_lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int aux_less_wake_time, aux_less_wake_lines, silence_period,
lfps_half_cycle;
aux_less_wake_time =
- _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
+ _lnl_compute_aux_less_wake_time(crtc_state);
aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
aux_less_wake_time);
+ silence_period = get_silence_period_symbols(crtc_state);
- if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
- &silence_period,
- &lfps_half_cycle))
- return false;
+ lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state);
if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
@@ -163,15 +144,15 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
if (display->params.psr_safest_params)
aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
- intel_dp->alpm_parameters.aux_less_wake_lines = aux_less_wake_lines;
- intel_dp->alpm_parameters.silence_period_sym_clocks = silence_period;
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
+ crtc_state->alpm_state.aux_less_wake_lines = aux_less_wake_lines;
+ crtc_state->alpm_state.silence_period_sym_clocks = silence_period;
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms = lfps_half_cycle;
return true;
}
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int check_entry_lines;
@@ -192,7 +173,7 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (display->params.psr_safest_params)
check_entry_lines = 15;
- intel_dp->alpm_parameters.check_entry_lines = check_entry_lines;
+ crtc_state->alpm_state.check_entry_lines = check_entry_lines;
return true;
}
@@ -223,7 +204,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
}
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
@@ -261,8 +242,8 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
- intel_dp->alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
- intel_dp->alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
+ crtc_state->alpm_state.io_wake_lines = max(io_wake_lines, 7);
+ crtc_state->alpm_state.fast_wake_lines = max(fast_wake_lines, 7);
return true;
}
@@ -276,6 +257,14 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
+ if (intel_dp->alpm.lobf_disable_debug) {
+ drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
+ return;
+ }
+
+ if (intel_dp->alpm.sink_alpm_error)
+ return;
+
if (!intel_dp_is_edp(intel_dp))
return;
@@ -288,6 +277,10 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (crtc_state->has_psr)
return;
+ if (crtc_state->vrr.vmin != crtc_state->vrr.vmax ||
+ crtc_state->vrr.vmin != crtc_state->vrr.flipline)
+ return;
+
if (!(intel_alpm_aux_wake_supported(intel_dp) ||
intel_alpm_aux_less_wake_supported(intel_dp)))
return;
@@ -300,9 +293,9 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_vdisplay - context_latency;
first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
if (intel_alpm_aux_less_wake_supported(intel_dp))
- waketime_in_lines = intel_dp->alpm_parameters.io_wake_lines;
+ waketime_in_lines = crtc_state->alpm_state.io_wake_lines;
else
- waketime_in_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
+ waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines;
crtc_state->has_lobf = (context_latency + guardband) >
(first_sdp_position + waketime_in_lines);
@@ -313,58 +306,173 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
- if (DISPLAY_VER(display) < 20 ||
- (!intel_dp->psr.sel_update_enabled && !intel_dp_is_edp(intel_dp)))
+ if (DISPLAY_VER(display) < 20 || (!intel_psr_needs_alpm(intel_dp, crtc_state) &&
+ !crtc_state->has_lobf))
return;
+ mutex_lock(&intel_dp->alpm.lock);
/*
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
*/
- if ((intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) ||
- (crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp))) {
+ if (intel_alpm_is_alpm_aux_less(intel_dp, crtc_state)) {
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
- ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
-
- intel_de_write(display,
- PORT_ALPM_CTL(port),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
- PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
- PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
- PORT_ALPM_CTL_SILENCE_PERIOD(
- intel_dp->alpm_parameters.silence_period_sym_clocks));
-
- intel_de_write(display,
- PORT_ALPM_LFPS_CTL(port),
- PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
- PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms));
+ ALPM_CTL_AUX_LESS_WAKE_TIME(crtc_state->alpm_state.aux_less_wake_lines);
+
+ if (intel_dp->as_sdp_supported) {
+ u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1;
+
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
+ DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP)
+ pr_alpm_ctl |= PR_ALPM_CTL_ALLOW_LINK_OFF_BETWEEN_AS_SDP_AND_SU;
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
+ DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR))
+ pr_alpm_ctl |= PR_ALPM_CTL_AS_SDP_TRANSMISSION_IN_ACTIVE_DISABLE;
+
+ intel_de_write(display, PR_ALPM_CTL(display, cpu_transcoder),
+ pr_alpm_ctl);
+ }
+
} else {
alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
- ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(crtc_state->alpm_state.fast_wake_lines);
}
- if (crtc_state->has_lobf)
+ if (crtc_state->has_lobf) {
alpm_ctl |= ALPM_CTL_LOBF_ENABLE;
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n");
+ }
- alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
+ alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(crtc_state->alpm_state.check_entry_lines);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
+ mutex_unlock(&intel_dp->alpm.lock);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
lnl_alpm_configure(intel_dp, crtc_state);
+ intel_dp->alpm.transcoder = crtc_state->cpu_transcoder;
+}
+
+void intel_alpm_port_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ u32 alpm_ctl_val = 0, lfps_ctl_val = 0;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (intel_alpm_is_alpm_aux_less(intel_dp, crtc_state)) {
+ alpm_ctl_val = PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
+ PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
+ PORT_ALPM_CTL_SILENCE_PERIOD(
+ crtc_state->alpm_state.silence_period_sym_clocks);
+ lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(LFPS_CYCLE_COUNT) |
+ PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms);
+ }
+
+ intel_de_write(display, PORT_ALPM_CTL(port), alpm_ctl_val);
+
+ intel_de_write(display, PORT_ALPM_LFPS_CTL(port), lfps_ctl_val);
+}
+
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (crtc_state->has_lobf || crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (old_crtc_state->has_lobf) {
+ mutex_lock(&intel_dp->alpm.lock);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
+ mutex_unlock(&intel_dp->alpm.lock);
+ }
+ }
+}
+
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 val;
+
+ if (!intel_psr_needs_alpm(intel_dp, crtc_state) && !crtc_state->has_lobf)
+ return;
+
+ val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
+
+ if (crtc_state->has_panel_replay || (crtc_state->has_lobf &&
+ intel_alpm_aux_less_wake_supported(intel_dp)))
+ val |= DP_ALPM_MODE_AUX_LESS;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
+}
+
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+
+ if (crtc_state->has_psr || !crtc_state->has_lobf ||
+ crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ intel_alpm_enable_sink(intel_dp, crtc_state);
+ intel_alpm_configure(intel_dp, crtc_state);
+ }
+ }
}
static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
@@ -403,6 +511,32 @@ out:
DEFINE_SHOW_ATTRIBUTE(i915_edp_lobf_info);
+static int
+i915_edp_lobf_debug_get(void *data, u64 *val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ *val = intel_dp->alpm.lobf_disable_debug;
+
+ return 0;
+}
+
+static int
+i915_edp_lobf_debug_set(void *data, u64 val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ intel_dp->alpm.lobf_disable_debug = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_lobf_debug_fops,
+ i915_edp_lobf_debug_get, i915_edp_lobf_debug_set,
+ "%llu\n");
+
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
@@ -412,6 +546,55 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
return;
+ debugfs_create_file("i915_edp_lobf_debug", 0644, root,
+ connector, &i915_edp_lobf_debug_fops);
+
debugfs_create_file("i915_edp_lobf_info", 0444, root,
connector, &i915_edp_lobf_info_fops);
}
+
+void intel_alpm_disable(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->alpm.transcoder;
+
+ if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd)
+ return;
+
+ mutex_lock(&intel_dp->alpm.lock);
+
+ intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
+ ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ intel_de_rmw(display,
+ PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ drm_dbg_kms(display->drm, "Disabling ALPM\n");
+ mutex_unlock(&intel_dp->alpm.lock);
+}
+
+bool intel_alpm_get_error(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ drm_err(display->drm, "Error reading ALPM status\n");
+ return true;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ drm_dbg_kms(display->drm, "ALPM lock timeout error\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index 8c409b10dce6..53599b464dea 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -12,16 +12,30 @@ struct intel_dp;
struct intel_crtc_state;
struct drm_connector_state;
struct intel_connector;
+struct intel_atomic_state;
+struct intel_crtc;
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp);
+void intel_alpm_init(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+ struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_alpm_port_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
+bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_disable(struct intel_dp *intel_dp);
+bool intel_alpm_get_error(struct intel_dp *intel_dp);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 03dc54c802d3..348b1655435e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -26,23 +26,24 @@
*
* The functions here implement the state management and hardware programming
* dispatch required by the atomic modeset infrastructure.
- * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ * See intel_plane.c for the plane-specific atomic functionality.
*/
#include <drm/display/drm_dp_tunnel.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_tunnel.h"
+#include "intel_fb.h"
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
-#include "intel_fb.h"
#include "skl_universal_plane.h"
/**
@@ -59,17 +60,16 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct drm_property *property,
u64 *val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio)
+ if (property == display->properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->display.properties.broadcast_rgb)
+ else if (property == display->properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
@@ -92,22 +92,21 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_property *property,
u64 val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio) {
+ if (property == display->properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->display.properties.broadcast_rgb) {
+ if (property == display->properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
- drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ drm_dbg_atomic(display->drm, "Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
}
@@ -275,7 +274,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->do_async_flip = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- crtc_state->dsb_color_vblank = NULL;
+ crtc_state->dsb_color = NULL;
crtc_state->dsb_commit = NULL;
crtc_state->use_dsb = false;
@@ -311,7 +310,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
- drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_color);
drm_WARN_ON(crtc->dev, crtc_state->dsb_commit);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index e506f6a87344..a5a7e2906ba8 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -14,7 +14,6 @@ struct drm_connector_state;
struct drm_crtc;
struct drm_crtc_state;
struct drm_device;
-struct drm_i915_private;
struct drm_property;
struct intel_atomic_state;
struct intel_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ce8a4319a63c..5bdaef38f13d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -27,9 +27,9 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
@@ -188,15 +188,17 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
* WA_14020863754: Implement Audio Workaround
* Corner case with Min Hblank Fix can cause audio hang
*/
-static bool needs_wa_14020863754(struct drm_i915_private *i915)
+static bool needs_wa_14020863754(struct intel_display *display)
{
- return (DISPLAY_VER(i915) == 20 || IS_BATTLEMAGE(i915));
+ return DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 2000 ||
+ DISPLAY_VERx100(display) == 1401;
}
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -206,17 +208,17 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
- if (DISPLAY_VER(i915) < 12 && adjusted_mode->crtc_clock > 148500)
+ if (DISPLAY_VER(display) < 12 && adjusted_mode->crtc_clock > 148500)
i = ARRAY_SIZE(hdmi_audio_clock);
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
i = 1;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Configuring HDMI audio for pixel clock %d (0x%08x)\n",
hdmi_audio_clock[i].clock,
hdmi_audio_clock[i].config);
@@ -251,11 +253,11 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
}
/* ELD buffer size in dwords */
-static int g4x_eld_buffer_size(struct drm_i915_private *i915)
+static int g4x_eld_buffer_size(struct intel_display *display)
{
u32 tmp;
- tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(display, G4X_AUD_CNTL_ST);
return REG_FIELD_GET(G4X_ELD_BUFFER_SIZE_MASK, tmp);
}
@@ -263,33 +265,33 @@ static int g4x_eld_buffer_size(struct drm_i915_private *i915)
static void g4x_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 *eld = (u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
u32 tmp;
- tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(display, G4X_AUD_CNTL_ST);
if ((tmp & G4X_ELD_VALID) == 0)
return;
- intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
+ intel_de_rmw(display, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
- eld_buffer_size = g4x_eld_buffer_size(i915);
+ eld_buffer_size = g4x_eld_buffer_size(display);
len = min_t(int, sizeof(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
- eld[i] = intel_de_read(i915, G4X_HDMIW_HDMIEDID);
+ eld[i] = intel_de_read(display, G4X_HDMIW_HDMIEDID);
}
static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
/* Invalidate ELD */
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
G4X_ELD_VALID, 0);
intel_crtc_wait_for_next_vblank(crtc);
@@ -300,28 +302,28 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const u32 *eld = (const u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
intel_crtc_wait_for_next_vblank(crtc);
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
G4X_ELD_VALID | G4X_ELD_ADDRESS_MASK, 0);
- eld_buffer_size = g4x_eld_buffer_size(i915);
+ eld_buffer_size = g4x_eld_buffer_size(display);
len = min(drm_eld_size(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
- intel_de_write(i915, G4X_HDMIW_HDMIEDID, eld[i]);
+ intel_de_write(display, G4X_HDMIW_HDMIEDID, eld[i]);
for (; i < eld_buffer_size; i++)
- intel_de_write(i915, G4X_HDMIW_HDMIEDID, 0);
+ intel_de_write(display, G4X_HDMIW_HDMIEDID, 0);
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, G4X_AUD_CNTL_ST) & G4X_ELD_ADDRESS_MASK) != 0);
+ drm_WARN_ON(display->drm,
+ (intel_de_read(display, G4X_AUD_CNTL_ST) & G4X_ELD_ADDRESS_MASK) != 0);
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
0, G4X_ELD_VALID);
}
@@ -329,11 +331,11 @@ static void
hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/* Enable time stamps. Let HW calculate Maud/Naud values */
- intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ intel_de_rmw(display, HSW_AUD_CFG(cpu_transcoder),
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
AUD_CONFIG_UPPER_N_MASK |
@@ -347,8 +349,8 @@ static void
hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@@ -356,7 +358,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(display, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -364,25 +366,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
- drm_dbg_kms(&i915->drm, "using N %d\n", n);
+ drm_dbg_kms(display->drm, "using N %d\n", n);
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
- drm_dbg_kms(&i915->drm, "using automatic N\n");
+ drm_dbg_kms(display->drm, "using automatic N\n");
}
- intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(display, HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(display, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(display, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -395,18 +397,31 @@ hsw_audio_config_update(struct intel_encoder *encoder,
hsw_hdmi_audio_config_update(encoder, crtc_state);
}
+static void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder trans = crtc_state->cpu_transcoder;
+
+ if (!HAS_DP20(display))
+ return;
+
+ intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
+ enable && crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
+}
+
static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Disable timestamps */
- intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ intel_de_rmw(display, HSW_AUD_CFG(cpu_transcoder),
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_UPPER_N_MASK |
AUD_CONFIG_LOWER_N_MASK,
@@ -415,26 +430,28 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX : 0));
/* Invalidate ELD */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
/* Disable audio presence detect */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_OUTPUT_ENABLE(cpu_transcoder), 0);
- if (needs_wa_14020863754(i915))
- intel_de_rmw(i915, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
+ if (needs_wa_14020863754(display))
+ intel_de_rmw(display, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
- mutex_unlock(&i915->display.audio.mutex);
+ intel_audio_sdp_split_update(old_crtc_state, false);
+
+ mutex_unlock(&display->audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
unsigned int link_clks_available, link_clks_required;
unsigned int tu_data, tu_line, link_clks_active;
unsigned int h_active, h_total, hblank_delta, pixel_clk;
@@ -446,13 +463,13 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16;
- cdclk = i915->display.cdclk.hw.cdclk;
+ cdclk = display->cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
lanes = crtc_state->lane_count;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " FXP_Q4_FMT " cdclk = %u\n",
h_active, link_clk, lanes, FXP_Q4_ARGS(vdsc_bppx16), cdclk);
@@ -497,19 +514,19 @@ static unsigned int calc_samples_room(const struct intel_crtc_state *crtc_state)
static void enable_audio_dsc_wa(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
unsigned int hblank_early_prog, samples_room;
unsigned int val;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
- val = intel_de_read(i915, AUD_CONFIG_BE);
+ val = intel_de_read(display, AUD_CONFIG_BE);
- if (DISPLAY_VER(i915) == 11)
+ if (DISPLAY_VER(display) == 11)
val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder);
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder);
if (crtc_state->dsc.compression_enable &&
@@ -536,66 +553,68 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0);
}
- intel_de_write(i915, AUD_CONFIG_BE, val);
+ intel_de_write(display, AUD_CONFIG_BE, val);
}
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
enable_audio_dsc_wa(encoder, crtc_state);
- if (needs_wa_14020863754(i915))
- intel_de_rmw(i915, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
+ intel_audio_sdp_split_update(crtc_state, true);
+
+ if (needs_wa_14020863754(display))
+ intel_de_rmw(display, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
/* Enable audio presence detect */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
0, AUDIO_OUTPUT_ENABLE(cpu_transcoder));
intel_crtc_wait_for_next_vblank(crtc);
/* Invalidate ELD */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
/*
- * The audio componenent is used to convey the ELD
+ * The audio component is used to convey the ELD
* instead using of the hardware ELD buffer.
*/
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
struct ibx_audio_regs {
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
};
-static void ibx_audio_regs_init(struct drm_i915_private *i915,
+static void ibx_audio_regs_init(struct intel_display *display,
enum pipe pipe,
struct ibx_audio_regs *regs)
{
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else if (HAS_PCH_CPT(i915)) {
+ } else if (HAS_PCH_CPT(display)) {
regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
regs->aud_config = CPT_AUD_CFG(pipe);
regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- } else if (HAS_PCH_IBX(i915)) {
+ } else if (HAS_PCH_IBX(display)) {
regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
regs->aud_config = IBX_AUD_CFG(pipe);
regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
@@ -607,21 +626,21 @@ static void ibx_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
struct ibx_audio_regs regs;
- if (drm_WARN_ON(&i915->drm, port == PORT_A))
+ if (drm_WARN_ON(display->drm, port == PORT_A))
return;
- ibx_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(display, pipe, &regs);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Disable timestamps */
- intel_de_rmw(i915, regs.aud_config,
+ intel_de_rmw(display, regs.aud_config,
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_UPPER_N_MASK |
AUD_CONFIG_LOWER_N_MASK,
@@ -630,10 +649,10 @@ static void ibx_audio_codec_disable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX : 0));
/* Invalidate ELD */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
+ intel_de_rmw(display, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
@@ -643,32 +662,32 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
struct ibx_audio_regs regs;
- if (drm_WARN_ON(&i915->drm, port == PORT_A))
+ if (drm_WARN_ON(display->drm, port == PORT_A))
return;
intel_crtc_wait_for_next_vblank(crtc);
- ibx_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(display, pipe, &regs);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Invalidate ELD */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
+ intel_de_rmw(display, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
/*
- * The audio componenent is used to convey the ELD
+ * The audio component is used to convey the ELD
* instead using of the hardware ELD buffer.
*/
/* Enable timestamps */
- intel_de_rmw(i915, regs.aud_config,
+ intel_de_rmw(display, regs.aud_config,
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_N_PROG_ENABLE |
AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK,
@@ -676,31 +695,21 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX :
audio_config_hdmi_pixel_clock(crtc_state)));
- mutex_unlock(&i915->display.audio.mutex);
-}
-
-void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder trans = crtc_state->cpu_transcoder;
-
- if (HAS_DP20(display))
- intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
- crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
+ mutex_unlock(&display->audio.mutex);
}
bool intel_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
mutex_lock(&connector->eld_mutex);
if (!connector->eld[0]) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
mutex_unlock(&connector->eld_mutex);
@@ -729,8 +738,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -740,26 +749,27 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
if (!crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
drm_eld_size(crtc_state->eld));
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_enable(encoder,
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_enable(encoder,
crtc_state,
conn_state);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
audio_state->encoder = encoder;
BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld));
memcpy(audio_state->eld, crtc_state->eld, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -770,7 +780,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
(int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld,
+ intel_lpe_audio_notify(display, cpu_transcoder, port, crtc_state->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -788,8 +798,8 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
@@ -799,24 +809,25 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
if (!old_crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name);
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_disable(encoder,
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_disable(encoder,
old_crtc_state,
old_conn_state);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
audio_state->encoder = NULL;
memset(audio_state->eld, 0, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -827,36 +838,36 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
(int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false);
+ intel_lpe_audio_notify(display, cpu_transcoder, port, NULL, 0, false);
}
static void intel_acomp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
if (audio_state->encoder)
memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->has_audio)
return;
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_get_config(encoder, crtc_state);
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_get_config(encoder, crtc_state);
}
static const struct intel_audio_funcs g4x_audio_funcs = {
@@ -879,17 +890,17 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
/**
* intel_audio_hooks_init - Set up chip specific audio hooks
- * @i915: device private
+ * @display: display device
*/
-void intel_audio_hooks_init(struct drm_i915_private *i915)
+void intel_audio_hooks_init(struct intel_display *display)
{
- if (IS_G4X(i915))
- i915->display.funcs.audio = &g4x_audio_funcs;
- else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915) ||
- HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
- i915->display.funcs.audio = &ibx_audio_funcs;
- else if (IS_HASWELL(i915) || DISPLAY_VER(i915) >= 8)
- i915->display.funcs.audio = &hsw_audio_funcs;
+ if (display->platform.g4x)
+ display->funcs.audio = &g4x_audio_funcs;
+ else if (display->platform.valleyview || display->platform.cherryview ||
+ HAS_PCH_CPT(display) || HAS_PCH_IBX(display))
+ display->funcs.audio = &ibx_audio_funcs;
+ else if (display->platform.haswell || DISPLAY_VER(display) >= 8)
+ display->funcs.audio = &hsw_audio_funcs;
}
struct aud_ts_cdclk_m_n {
@@ -897,10 +908,10 @@ struct aud_ts_cdclk_m_n {
u16 n;
};
-void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
+void intel_audio_cdclk_change_pre(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
- intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
+ if (DISPLAY_VER(display) >= 13)
+ intel_de_rmw(display, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
}
static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
@@ -909,16 +920,18 @@ static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n
aud_ts->n = cdclk * aud_ts->m / 24000;
}
-void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
+void intel_audio_cdclk_change_post(struct intel_display *display)
{
struct aud_ts_cdclk_m_n aud_ts;
- if (DISPLAY_VER(i915) >= 13) {
- get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts);
+ if (DISPLAY_VER(display) >= 13) {
+ get_aud_ts_cdclk_m_n(display->cdclk.hw.ref,
+ display->cdclk.hw.cdclk, &aud_ts);
- intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
- intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
- drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n);
+ intel_de_write(display, AUD_TS_CDCLK_N, aud_ts.n);
+ intel_de_write(display, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
+ drm_dbg_kms(display->drm, "aud_ts_cdclk set to M=%u, N=%u\n",
+ aud_ts.m, aud_ts.n);
}
}
@@ -938,12 +951,12 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
if (IS_ERR(cdclk_state))
return PTR_ERR(cdclk_state);
- cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+ intel_cdclk_force_min_cdclk(cdclk_state, enable ? 2 * 96000 : 0);
return drm_atomic_commit(&state->base);
}
-static void glk_force_audio_cdclk(struct drm_i915_private *i915,
+static void glk_force_audio_cdclk(struct intel_display *display,
bool enable)
{
struct drm_modeset_acquire_ctx ctx;
@@ -951,13 +964,13 @@ static void glk_force_audio_cdclk(struct drm_i915_private *i915,
struct intel_crtc *crtc;
int ret;
- crtc = intel_first_crtc(i915);
+ crtc = intel_first_crtc(display);
if (!crtc)
return;
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&i915->drm);
- if (drm_WARN_ON(&i915->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
state->acquire_ctx = &ctx;
@@ -972,7 +985,7 @@ retry:
goto retry;
}
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
drm_atomic_state_put(state);
@@ -983,7 +996,6 @@ retry:
int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int min_cdclk = 0;
if (!crtc_state->has_audio)
@@ -1000,7 +1012,7 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(display) == 10) {
/* Display WA #1145: glk */
min_cdclk = max(min_cdclk, 316800);
- } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) {
+ } else if (DISPLAY_VER(display) == 9 || display->platform.broadwell) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(min_cdclk, 432000);
}
@@ -1020,99 +1032,95 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
* 270 | 320 or higher
* 162 | 200 or higher"
*/
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
intel_crtc_has_dp_encoder(crtc_state))
min_cdclk = max(min_cdclk, crtc_state->port_clock);
return min_cdclk;
}
-static unsigned long i915_audio_component_get_power(struct device *kdev)
+static unsigned long intel_audio_component_get_power(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
/* Catch potential impedance mismatches before they occur! */
BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_AUDIO_PLAYBACK);
- if (i915->display.audio.power_refcount++ == 0) {
- if (DISPLAY_VER(i915) >= 9) {
- intel_de_write(i915, AUD_FREQ_CNTRL,
- i915->display.audio.freq_cntrl);
- drm_dbg_kms(&i915->drm,
+ if (display->audio.power_refcount++ == 0) {
+ if (DISPLAY_VER(display) >= 9) {
+ intel_de_write(display, AUD_FREQ_CNTRL,
+ display->audio.freq_cntrl);
+ drm_dbg_kms(display->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
- i915->display.audio.freq_cntrl);
+ display->audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
- if (IS_GEMINILAKE(i915))
- glk_force_audio_cdclk(i915, true);
+ if (display->platform.geminilake)
+ glk_force_audio_cdclk(display, true);
- if (DISPLAY_VER(i915) >= 10)
- intel_de_rmw(i915, AUD_PIN_BUF_CTL,
+ if (DISPLAY_VER(display) >= 10)
+ intel_de_rmw(display, AUD_PIN_BUF_CTL,
0, AUD_PIN_BUF_ENABLE);
}
return (unsigned long)wakeref;
}
-static void i915_audio_component_put_power(struct device *kdev,
- unsigned long cookie)
+static void intel_audio_component_put_power(struct device *kdev,
+ unsigned long cookie)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref = (intel_wakeref_t)cookie;
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
- if (--i915->display.audio.power_refcount == 0)
- if (IS_GEMINILAKE(i915))
- glk_force_audio_cdclk(i915, false);
+ if (--display->audio.power_refcount == 0)
+ if (display->platform.geminilake)
+ glk_force_audio_cdclk(display, false);
- intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_AUDIO_PLAYBACK, wakeref);
}
-static void i915_audio_component_codec_wake_override(struct device *kdev,
- bool enable)
+static void intel_audio_component_codec_wake_override(struct device *kdev,
+ bool enable)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long cookie;
- if (DISPLAY_VER(i915) < 9)
+ if (DISPLAY_VER(display) < 9)
return;
- cookie = i915_audio_component_get_power(kdev);
+ cookie = intel_audio_component_get_power(kdev);
/*
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
*/
- intel_de_rmw(i915, HSW_AUD_CHICKENBIT,
+ intel_de_rmw(display, HSW_AUD_CHICKENBIT,
SKL_AUD_CODEC_WAKE_SIGNAL, 0);
usleep_range(1000, 1500);
if (enable) {
- intel_de_rmw(i915, HSW_AUD_CHICKENBIT,
+ intel_de_rmw(display, HSW_AUD_CHICKENBIT,
0, SKL_AUD_CODEC_WAKE_SIGNAL);
usleep_range(1000, 1500);
}
- i915_audio_component_put_power(kdev, cookie);
+ intel_audio_component_put_power(kdev, cookie);
}
/* Get CDCLK in kHz */
-static int i915_audio_component_get_cdclk_freq(struct device *kdev)
+static int intel_audio_component_get_cdclk_freq(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
- if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915)))
+ if (drm_WARN_ON_ONCE(display->drm, !HAS_DDI(display)))
return -ENODEV;
- return i915->display.cdclk.hw.cdclk;
+ return display->cdclk.hw.cdclk;
}
/*
@@ -1124,7 +1132,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
* will get the right intel_encoder with port matched
* Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched
*/
-static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
+static struct intel_audio_state *find_audio_state(struct intel_display *display,
int port, int cpu_transcoder)
{
/* MST */
@@ -1132,11 +1140,11 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- if (drm_WARN_ON(&i915->drm,
- cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state)))
+ if (drm_WARN_ON(display->drm,
+ cpu_transcoder >= ARRAY_SIZE(display->audio.state)))
return NULL;
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1148,11 +1156,11 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
if (cpu_transcoder > 0)
return NULL;
- for_each_cpu_transcoder(i915, cpu_transcoder) {
+ for_each_cpu_transcoder(display, cpu_transcoder) {
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1163,27 +1171,27 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
return NULL;
}
-static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
- int cpu_transcoder, int rate)
+static int intel_audio_component_sync_audio_rate(struct device *kdev, int port,
+ int cpu_transcoder, int rate)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct i915_audio_component *acomp = display->audio.component;
const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
int err = 0;
- if (!HAS_DDI(i915))
+ if (!HAS_DDI(display))
return 0;
- cookie = i915_audio_component_get_power(kdev);
- mutex_lock(&i915->display.audio.mutex);
+ cookie = intel_audio_component_get_power(kdev);
+ mutex_lock(&display->audio.mutex);
- audio_state = find_audio_state(i915, port, cpu_transcoder);
+ audio_state = find_audio_state(display, port, cpu_transcoder);
if (!audio_state) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(display->drm, "Not valid for port %c\n",
+ port_name(port));
err = -ENODEV;
goto unlock;
}
@@ -1200,26 +1208,26 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
- mutex_unlock(&i915->display.audio.mutex);
- i915_audio_component_put_power(kdev, cookie);
+ mutex_unlock(&display->audio.mutex);
+ intel_audio_component_put_power(kdev, cookie);
return err;
}
-static int i915_audio_component_get_eld(struct device *kdev, int port,
- int cpu_transcoder, bool *enabled,
- unsigned char *buf, int max_bytes)
+static int intel_audio_component_get_eld(struct device *kdev, int port,
+ int cpu_transcoder, bool *enabled,
+ unsigned char *buf, int max_bytes)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_audio_state *audio_state;
int ret = 0;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = find_audio_state(i915, port, cpu_transcoder);
+ audio_state = find_audio_state(display, port, cpu_transcoder);
if (!audio_state) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
- mutex_unlock(&i915->display.audio.mutex);
+ drm_dbg_kms(display->drm, "Not valid for port %c\n",
+ port_name(port));
+ mutex_unlock(&display->audio.mutex);
return -EINVAL;
}
@@ -1231,71 +1239,70 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
return ret;
}
-static const struct drm_audio_component_ops i915_audio_component_ops = {
- .owner = THIS_MODULE,
- .get_power = i915_audio_component_get_power,
- .put_power = i915_audio_component_put_power,
- .codec_wake_override = i915_audio_component_codec_wake_override,
- .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
- .sync_audio_rate = i915_audio_component_sync_audio_rate,
- .get_eld = i915_audio_component_get_eld,
+static const struct drm_audio_component_ops intel_audio_component_ops = {
+ .owner = THIS_MODULE,
+ .get_power = intel_audio_component_get_power,
+ .put_power = intel_audio_component_put_power,
+ .codec_wake_override = intel_audio_component_codec_wake_override,
+ .get_cdclk_freq = intel_audio_component_get_cdclk_freq,
+ .sync_audio_rate = intel_audio_component_sync_audio_rate,
+ .get_eld = intel_audio_component_get_eld,
};
-static int i915_audio_component_bind(struct device *drv_kdev,
- struct device *hda_kdev, void *data)
+static int intel_audio_component_bind(struct device *drv_kdev,
+ struct device *hda_kdev, void *data)
{
struct intel_display *display = to_intel_display(drv_kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
int i;
- if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev))
+ if (drm_WARN_ON(display->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!device_link_add(hda_kdev, drv_kdev,
DL_FLAG_STATELESS)))
return -ENOMEM;
- drm_modeset_lock_all(&i915->drm);
- acomp->base.ops = &i915_audio_component_ops;
+ drm_modeset_lock_all(display->drm);
+ acomp->base.ops = &intel_audio_component_ops;
acomp->base.dev = drv_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
- i915->display.audio.component = acomp;
- drm_modeset_unlock_all(&i915->drm);
+ display->audio.component = acomp;
+ drm_modeset_unlock_all(display->drm);
return 0;
}
-static void i915_audio_component_unbind(struct device *drv_kdev,
- struct device *hda_kdev, void *data)
+static void intel_audio_component_unbind(struct device *drv_kdev,
+ struct device *hda_kdev, void *data)
{
struct intel_display *display = to_intel_display(drv_kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- drm_modeset_lock_all(&i915->drm);
+ drm_modeset_lock_all(display->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
- i915->display.audio.component = NULL;
- drm_modeset_unlock_all(&i915->drm);
+ display->audio.component = NULL;
+ drm_modeset_unlock_all(display->drm);
device_link_remove(hda_kdev, drv_kdev);
- if (i915->display.audio.power_refcount)
- drm_err(&i915->drm, "audio power refcount %d after unbind\n",
- i915->display.audio.power_refcount);
+ if (display->audio.power_refcount)
+ drm_err(display->drm,
+ "audio power refcount %d after unbind\n",
+ display->audio.power_refcount);
}
-static const struct component_ops i915_audio_component_bind_ops = {
- .bind = i915_audio_component_bind,
- .unbind = i915_audio_component_unbind,
+static const struct component_ops intel_audio_component_bind_ops = {
+ .bind = intel_audio_component_bind,
+ .unbind = intel_audio_component_unbind,
};
#define AUD_FREQ_TMODE_SHIFT 14
@@ -1308,8 +1315,8 @@ static const struct component_ops i915_audio_component_bind_ops = {
#define AUD_FREQ_TGL_BROKEN (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(2) | AUD_FREQ_BCLK_96M)
/**
- * i915_audio_component_init - initialize and register the audio component
- * @i915: i915 device instance
+ * intel_audio_component_init - initialize and register the audio component
+ * @display: display device
*
* This will register with the component framework a child component which
* will bind dynamically to the snd_hda_intel driver's corresponding master
@@ -1323,93 +1330,93 @@ static const struct component_ops i915_audio_component_bind_ops = {
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
-static void i915_audio_component_init(struct drm_i915_private *i915)
+static void intel_audio_component_init(struct intel_display *display)
{
u32 aud_freq, aud_freq_init;
- if (DISPLAY_VER(i915) >= 9) {
- aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
+ if (DISPLAY_VER(display) >= 9) {
+ aud_freq_init = intel_de_read(display, AUD_FREQ_CNTRL);
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
aud_freq = AUD_FREQ_GEN12;
else
aud_freq = aud_freq_init;
/* use BIOS provided value for TGL and RKL unless it is a known bad value */
- if ((IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) &&
+ if ((display->platform.tigerlake || display->platform.rocketlake) &&
aud_freq_init != AUD_FREQ_TGL_BROKEN)
aud_freq = aud_freq_init;
- drm_dbg_kms(&i915->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
+ drm_dbg_kms(display->drm,
+ "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
- i915->display.audio.freq_cntrl = aud_freq;
+ display->audio.freq_cntrl = aud_freq;
}
/* init with current cdclk */
- intel_audio_cdclk_change_post(i915);
+ intel_audio_cdclk_change_post(display);
}
-static void i915_audio_component_register(struct drm_i915_private *i915)
+static void intel_audio_component_register(struct intel_display *display)
{
int ret;
- ret = component_add_typed(i915->drm.dev,
- &i915_audio_component_bind_ops,
+ ret = component_add_typed(display->drm->dev,
+ &intel_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
- i915->display.audio.component_registered = true;
+ display->audio.component_registered = true;
}
/**
- * i915_audio_component_cleanup - deregister the audio component
- * @i915: i915 device instance
+ * intel_audio_component_cleanup - deregister the audio component
+ * @display: display device
*
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
-static void i915_audio_component_cleanup(struct drm_i915_private *i915)
+static void intel_audio_component_cleanup(struct intel_display *display)
{
- if (!i915->display.audio.component_registered)
+ if (!display->audio.component_registered)
return;
- component_del(i915->drm.dev, &i915_audio_component_bind_ops);
- i915->display.audio.component_registered = false;
+ component_del(display->drm->dev, &intel_audio_component_bind_ops);
+ display->audio.component_registered = false;
}
/**
* intel_audio_init() - Initialize the audio driver either using
* component framework or using lpe audio bridge
- * @i915: the i915 drm device private data
+ * @display: display device
*
*/
-void intel_audio_init(struct drm_i915_private *i915)
+void intel_audio_init(struct intel_display *display)
{
- if (intel_lpe_audio_init(i915) < 0)
- i915_audio_component_init(i915);
+ if (intel_lpe_audio_init(display) < 0)
+ intel_audio_component_init(display);
}
-void intel_audio_register(struct drm_i915_private *i915)
+void intel_audio_register(struct intel_display *display)
{
- if (!i915->display.audio.lpe.platdev)
- i915_audio_component_register(i915);
+ if (!display->audio.lpe.platdev)
+ intel_audio_component_register(display);
}
/**
* intel_audio_deinit() - deinitialize the audio driver
- * @i915: the i915 drm device private data
- *
+ * @display: display device
*/
-void intel_audio_deinit(struct drm_i915_private *i915)
+void intel_audio_deinit(struct intel_display *display)
{
- if (i915->display.audio.lpe.platdev != NULL)
- intel_lpe_audio_teardown(i915);
+ if (display->audio.lpe.platdev)
+ intel_lpe_audio_teardown(display);
else
- i915_audio_component_cleanup(i915);
+ intel_audio_component_cleanup(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 1bafc155434a..42cf886f3d24 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -9,11 +9,11 @@
#include <linux/types.h>
struct drm_connector_state;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
-void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
+void intel_audio_hooks_init(struct intel_display *display);
bool intel_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
@@ -25,12 +25,11 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state);
void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
-void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+void intel_audio_cdclk_change_pre(struct intel_display *display);
+void intel_audio_cdclk_change_post(struct intel_display *display);
int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_audio_init(struct drm_i915_private *dev_priv);
-void intel_audio_register(struct drm_i915_private *i915);
-void intel_audio_deinit(struct drm_i915_private *dev_priv);
-void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
+void intel_audio_init(struct intel_display *display);
+void intel_audio_register(struct intel_display *display);
+void intel_audio_deinit(struct intel_display *display);
#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 3f81a726cc7d..a68fdbd2acb9 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -7,15 +7,20 @@
#include <linux/kernel.h>
#include <linux/pwm.h>
#include <linux/string_helpers.h>
-
#include <acpi/video.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
@@ -40,8 +45,9 @@ static u32 scale(u32 source_val,
{
u64 target_val;
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
+ if (WARN_ON(source_min >= source_max) ||
+ WARN_ON(target_min > target_max))
+ return target_min;
/* defensive */
source_val = clamp(source_val, source_min, source_max);
@@ -102,20 +108,20 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n",
connector->base.base.id, connector->base.name, val);
panel->backlight.pwm_funcs->set(conn_state, val);
}
u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
val = scale(val, panel->backlight.min, panel->backlight.max,
@@ -143,32 +149,33 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- return intel_de_read(i915, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(display, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- return intel_de_read(i915, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(display, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 val;
- val = intel_de_read(i915, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (DISPLAY_VER(i915) < 4)
+ val = intel_de_read(display, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (DISPLAY_VER(display) < 4)
val >>= 1;
if (panel->backlight.combination_mode) {
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u8 lbpc;
- pci_read_config_byte(to_pci_dev(i915->drm.dev), LBPC, &lbpc);
+ pci_read_config_byte(pdev, LBPC, &lbpc);
val *= lbpc;
}
@@ -177,20 +184,20 @@ static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unuse
static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
- return intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(display, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- return intel_de_read(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller));
+ return intel_de_read(display, BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
@@ -205,69 +212,71 @@ static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe un
static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u32 val;
- val = intel_de_read(i915, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(i915, BLC_PWM_PCH_CTL2, val | level);
+ val = intel_de_read(display, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(display, BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u32 tmp;
- tmp = intel_de_read(i915, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(i915, BLC_PWM_CPU_CTL, tmp | level);
+ tmp = intel_de_read(display, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(display, BLC_PWM_CPU_CTL, tmp | level);
}
static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
+ if (drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0))
+ return;
if (panel->backlight.combination_mode) {
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u8 lbpc;
lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
- pci_write_config_byte(to_pci_dev(i915->drm.dev), LBPC, lbpc);
+ pci_write_config_byte(pdev, LBPC, lbpc);
}
- if (DISPLAY_VER(i915) == 4) {
+ if (DISPLAY_VER(display) == 4) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
}
- tmp = intel_de_read(i915, BLC_PWM_CTL) & ~mask;
- intel_de_write(i915, BLC_PWM_CTL, tmp | level);
+ tmp = intel_de_read(display, BLC_PWM_CTL) & ~mask;
+ intel_de_write(display, BLC_PWM_CTL, tmp | level);
}
static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 tmp;
- tmp = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), tmp | level);
+ tmp = intel_de_read(display, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(display, VLV_BLC_PWM_CTL(pipe), tmp | level);
}
static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- intel_de_write(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+ intel_de_write(display, BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -282,10 +291,10 @@ static void
intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n",
connector->base.base.id, connector->base.name, level);
panel->backlight.funcs->set(conn_state, level);
@@ -298,7 +307,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
@@ -311,9 +320,9 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (!panel->backlight.present || !conn_state->crtc)
return;
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
- drm_WARN_ON(&i915->drm, panel->backlight.max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.max == 0);
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -327,13 +336,13 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, level);
@@ -346,26 +355,26 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
* This needs rework if we need to add support for CPU PWM on PCH split
* platforms.
*/
- tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ tmp = intel_de_read(display, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n",
connector->base.base.id, connector->base.name);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
}
- intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
+ intel_de_rmw(display, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0);
+ intel_de_rmw(display, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0);
- intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
+ intel_de_rmw(display, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -375,48 +384,49 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
- struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev);
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_display *display = to_intel_display(connector);
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0);
+ intel_de_rmw(display, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0);
+ intel_de_rmw(display, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_rmw(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
BXT_BLC_PWM_ENABLE, 0);
if (panel->backlight.controller == 1)
- intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0);
+ intel_de_rmw(display, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0);
}
static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_rmw(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
BXT_BLC_PWM_ENABLE, 0);
}
@@ -434,7 +444,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
@@ -446,61 +456,62 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
* backlight. This will leave the backlight on unnecessarily when
* another client is not activated.
*/
- if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n",
+ if (display->drm->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n",
connector->base.base.id, connector->base.name);
return;
}
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
if (panel->backlight.device)
panel->backlight.device->props.power = BACKLIGHT_POWER_OFF;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
- pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(display, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(i915))
- intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
+ if (HAS_PCH_LPT(display))
+ intel_de_rmw(display, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
panel->backlight.alternate_pwm_increment ?
LPT_PWM_GRANULARITY : 0);
else
- intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY,
+ intel_de_rmw(display, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY,
panel->backlight.alternate_pwm_increment ?
SPT_PWM_GRANULARITY : 0);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(display, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
/* After LPT, override is the default. */
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(display, BLC_PWM_PCH_CTL1);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_backlight_set_pwm_level(conn_state, level);
@@ -510,63 +521,66 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(display, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] CPU backlight already enabled\n",
connector->base.base.id, connector->base.name);
cpu_ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_write(display, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
- pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(display, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (cpu_transcoder == TRANSCODER_EDP)
cpu_ctl2 = BLM_TRANSCODER_EDP;
else
cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
- intel_de_posting_read(i915, BLC_PWM_CPU_CTL2);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(display, BLC_PWM_CPU_CTL2);
+ intel_de_write(display, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_backlight_set_pwm_level(conn_state, level);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(display, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(display, BLC_PWM_PCH_CTL1);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
- ctl = intel_de_read(i915, BLC_PWM_CTL);
+ ctl = intel_de_read(display, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] backlight already enabled\n",
connector->base.base.id, connector->base.name);
- intel_de_write(i915, BLC_PWM_CTL, 0);
+ intel_de_write(display, BLC_PWM_CTL, 0);
}
freq = panel->backlight.pwm_level_max;
@@ -576,11 +590,11 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl = freq << 17;
if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
- if (IS_PINEVIEW(i915) && panel->backlight.active_low_pwm)
+ if (display->platform.pineview && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
- intel_de_write(i915, BLC_PWM_CTL, ctl);
- intel_de_posting_read(i915, BLC_PWM_CTL);
+ intel_de_write(display, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(display, BLC_PWM_CTL);
/* XXX: combine this into above write? */
intel_backlight_set_pwm_level(conn_state, level);
@@ -590,25 +604,26 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
- if (DISPLAY_VER(i915) == 2)
- intel_de_write(i915, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ if (DISPLAY_VER(display) == 2)
+ intel_de_write(display, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 ctl, ctl2, freq;
- ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
+ ctl2 = intel_de_read(display, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] backlight already enabled\n",
connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(i915, BLC_PWM_CTL2, ctl2);
+ intel_de_write(display, BLC_PWM_CTL2, ctl2);
}
freq = panel->backlight.pwm_level_max;
@@ -616,16 +631,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
freq /= 0xff;
ctl = freq << 16;
- intel_de_write(i915, BLC_PWM_CTL, ctl);
+ intel_de_write(display, BLC_PWM_CTL, ctl);
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- intel_de_write(i915, BLC_PWM_CTL2, ctl2);
- intel_de_posting_read(i915, BLC_PWM_CTL2);
- intel_de_write(i915, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(display, BLC_PWM_CTL2);
+ intel_de_write(display, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
intel_backlight_set_pwm_level(conn_state, level);
}
@@ -634,21 +649,22 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
- ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(display, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] backlight already enabled\n",
connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_write(display, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
ctl = panel->backlight.pwm_level_max << 16;
- intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), ctl);
+ intel_de_write(display, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
intel_backlight_set_pwm_level(conn_state, level);
@@ -656,47 +672,49 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = 0;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
- intel_de_posting_read(i915, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(display, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(display, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(display, VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 pwm_ctl, val;
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = intel_de_read(i915, UTIL_PIN_CTL);
+ val = intel_de_read(display, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] utility pin already enabled\n",
connector->base.base.id, connector->base.name);
val &= ~UTIL_PIN_ENABLE;
- intel_de_write(i915, UTIL_PIN_CTL, val);
+ intel_de_write(display, UTIL_PIN_CTL, val);
}
val = 0;
if (panel->backlight.util_pin_active_low)
val |= UTIL_PIN_POLARITY;
- intel_de_write(i915, UTIL_PIN_CTL,
+ intel_de_write(display, UTIL_PIN_CTL,
val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
}
- pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(display, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] backlight already enabled\n",
connector->base.base.id, connector->base.name);
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
- intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.pwm_level_max);
intel_backlight_set_pwm_level(conn_state, level);
@@ -705,9 +723,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+ intel_de_posting_read(display, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
@@ -715,19 +733,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
- pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(display, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(display->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
- intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.pwm_level_max);
intel_backlight_set_pwm_level(conn_state, level);
@@ -736,9 +754,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+ intel_de_posting_read(display, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
@@ -780,37 +798,37 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (!panel->backlight.present)
return;
- drm_dbg_kms(&i915->drm, "pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(display->drm, "pipe %c\n", pipe_name(pipe));
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 val = 0;
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
if (panel->backlight.enabled)
val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
- drm_dbg_kms(&i915->drm, "get backlight PWM = %d\n", val);
+ drm_dbg_kms(display->drm, "get backlight PWM = %d\n", val);
return val;
}
@@ -829,16 +847,16 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
if (!panel->backlight.present)
return;
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
- drm_WARN_ON(&i915->drm, panel->backlight.max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.max == 0);
hw_level = scale_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -846,18 +864,18 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
- drm_dbg_kms(&i915->drm, "updating intel_backlight, brightness=%d/%d\n",
+ drm_dbg_kms(display->drm, "updating intel_backlight, brightness=%d/%d\n",
bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector->base.state, bd->props.brightness,
bd->props.max_brightness);
@@ -878,7 +896,7 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
bd->props.power = BACKLIGHT_POWER_OFF;
}
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -886,20 +904,19 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- intel_wakeref_t wakeref;
+ struct intel_display *display = to_intel_display(connector);
int ret = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ with_intel_display_rpm(display) {
u32 hw_level;
- drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
hw_level = intel_panel_get_backlight(connector);
ret = scale_hw_to_user(connector,
hw_level, bd->props.max_brightness);
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
}
return ret;
@@ -912,7 +929,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
int intel_backlight_device_register(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
struct backlight_device *bd;
@@ -928,7 +945,8 @@ int intel_backlight_device_register(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
if (!acpi_video_backlight_use_native()) {
- drm_info(&i915->drm, "Skipping intel_backlight registration\n");
+ drm_info(display->drm,
+ "Skipping intel_backlight registration\n");
return 0;
}
@@ -965,7 +983,8 @@ int intel_backlight_device_register(struct intel_connector *connector)
*/
kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
- i915->drm.primary->index, connector->base.name);
+ display->drm->primary->index,
+ connector->base.name);
if (!name)
return -ENOMEM;
}
@@ -973,7 +992,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
&intel_backlight_device_ops, &props);
if (IS_ERR(bd)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n",
connector->base.base.id, connector->base.name, name, PTR_ERR(bd));
ret = PTR_ERR(bd);
@@ -982,7 +1001,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
panel->backlight.device = bd;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] backlight device %s registered\n",
connector->base.base.id, connector->base.name, name);
@@ -1009,9 +1028,9 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
*/
static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq),
pwm_freq_hz);
}
@@ -1048,7 +1067,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 mul, clock;
@@ -1057,7 +1076,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
mul = 128;
- if (HAS_PCH_LPT_H(i915))
+ if (HAS_PCH_LPT_H(display))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
@@ -1071,9 +1090,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq),
pwm_freq_hz * 128);
}
@@ -1087,13 +1106,13 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int clock;
- if (IS_PINEVIEW(i915))
- clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
+ if (display->platform.pineview)
+ clock = KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq);
else
- clock = KHz(i915->display.cdclk.hw.cdclk);
+ clock = KHz(display->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@@ -1105,13 +1124,13 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int clock;
- if (IS_G4X(i915))
- clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
+ if (display->platform.g4x)
+ clock = KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq);
else
- clock = KHz(i915->display.cdclk.hw.cdclk);
+ clock = KHz(display->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
@@ -1123,17 +1142,17 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int mul, clock;
- if ((intel_de_read(i915, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
- if (IS_CHERRYVIEW(i915))
+ if ((intel_de_read(display, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if (display->platform.cherryview)
clock = KHz(19200);
else
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
+ clock = KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq);
mul = 128;
}
@@ -1142,16 +1161,16 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u16 get_vbt_pwm_freq(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
if (pwm_freq_hz) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT defined backlight frequency %u Hz\n",
pwm_freq_hz);
} else {
pwm_freq_hz = 200;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"default backlight frequency %u Hz\n",
pwm_freq_hz);
}
@@ -1161,20 +1180,20 @@ static u16 get_vbt_pwm_freq(struct intel_connector *connector)
static u32 get_backlight_max_vbt(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
u32 pwm;
if (!panel->backlight.pwm_funcs->hz_to_pwm) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"backlight frequency conversion not supported\n");
return 0;
}
pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"backlight frequency conversion failed\n");
return 0;
}
@@ -1187,11 +1206,11 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
*/
static u32 get_backlight_min_vbt(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
int min;
- drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1202,7 +1221,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
*/
min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
if (min != connector->panel.vbt.backlight.min_brightness) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"clamping VBT min backlight %d/255 to %d/255\n",
connector->panel.vbt.backlight.min_brightness, min);
}
@@ -1213,24 +1232,24 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
- if (HAS_PCH_LPT(i915))
- alt = intel_de_read(i915, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ if (HAS_PCH_LPT(display))
+ alt = intel_de_read(display, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
- alt = intel_de_read(i915, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ alt = intel_de_read(display, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
panel->backlight.alternate_pwm_increment = alt;
- pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(display, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(display, BLC_PWM_PCH_CTL2);
panel->backlight.pwm_level_max = pch_ctl2 >> 16;
- cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(display, BLC_PWM_CPU_CTL2);
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1242,26 +1261,26 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(i915) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(display) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
if (cpu_mode) {
val = pch_get_backlight(connector, unused);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, val);
- intel_de_write(i915, BLC_PWM_PCH_CTL1,
+ intel_de_write(display, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
- intel_de_write(i915, BLC_PWM_CPU_CTL2,
+ intel_de_write(display, BLC_PWM_CPU_CTL2,
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
connector->base.base.id, connector->base.name);
@@ -1270,14 +1289,14 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(display, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(display, BLC_PWM_PCH_CTL2);
panel->backlight.pwm_level_max = pch_ctl2 >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1288,11 +1307,11 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(display, BLC_PWM_CPU_CTL2);
panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
connector->base.base.id, connector->base.name);
@@ -1301,16 +1320,16 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
- ctl = intel_de_read(i915, BLC_PWM_CTL);
+ ctl = intel_de_read(display, BLC_PWM_CTL);
- if (DISPLAY_VER(i915) == 2 || IS_I915GM(i915) || IS_I945GM(i915))
+ if (DISPLAY_VER(display) == 2 || display->platform.i915gm || display->platform.i945gm)
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
panel->backlight.pwm_level_max = ctl >> 17;
@@ -1334,7 +1353,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = val != 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
connector->base.base.id, connector->base.name);
@@ -1343,15 +1362,15 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2;
- ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
+ ctl2 = intel_de_read(display, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = intel_de_read(i915, BLC_PWM_CTL);
+ ctl = intel_de_read(display, BLC_PWM_CTL);
panel->backlight.pwm_level_max = ctl >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1367,7 +1386,7 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
connector->base.base.id, connector->base.name);
@@ -1376,17 +1395,17 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2;
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
- ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(display, VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe));
+ ctl = intel_de_read(display, VLV_BLC_PWM_CTL(pipe));
panel->backlight.pwm_level_max = ctl >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1399,7 +1418,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n",
connector->base.base.id, connector->base.name, pipe_name(pipe));
@@ -1409,25 +1428,25 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
static int
bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
panel->backlight.controller = connector->panel.vbt.backlight.controller;
- pwm_ctl = intel_de_read(i915,
+ pwm_ctl = intel_de_read(display,
BXT_BLC_PWM_CTL(panel->backlight.controller));
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = intel_de_read(i915, UTIL_PIN_CTL);
+ val = intel_de_read(display, UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
val & UTIL_PIN_POLARITY;
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.pwm_level_max =
- intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(display, BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1439,7 +1458,7 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n",
connector->base.base.id, connector->base.name,
panel->backlight.controller);
@@ -1447,29 +1466,29 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
return 0;
}
-static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
+static int cnp_num_backlight_controllers(struct intel_display *display)
{
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
}
-static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int controller)
+static bool cnp_backlight_controller_is_valid(struct intel_display *display, int controller)
{
- if (controller < 0 || controller >= cnp_num_backlight_controllers(i915))
+ if (controller < 0 || controller >= cnp_num_backlight_controllers(display))
return false;
if (controller == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
- return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
+ return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
}
@@ -1477,7 +1496,7 @@ static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int
static int
cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
@@ -1486,19 +1505,20 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
* controller. ICP+ can have two controllers, depending on pin muxing.
*/
panel->backlight.controller = connector->panel.vbt.backlight.controller;
- if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n",
+ if (!cnp_backlight_controller_is_valid(display, panel->backlight.controller)) {
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n",
connector->base.base.id, connector->base.name,
panel->backlight.controller);
panel->backlight.controller = 0;
}
- pwm_ctl = intel_de_read(i915,
+ pwm_ctl = intel_de_read(display,
BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.pwm_level_max =
- intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(display, BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1510,7 +1530,7 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n",
connector->base.base.id, connector->base.name,
panel->backlight.controller);
@@ -1521,22 +1541,25 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int ext_pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
const char *desc;
u32 level;
/* Get the right PWM chip for DSI backlight according to VBT */
if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
- panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_pmic_backlight");
+ panel->backlight.pwm = pwm_get(display->drm->dev,
+ "pwm_pmic_backlight");
desc = "PMIC";
} else {
- panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_soc_backlight");
+ panel->backlight.pwm = pwm_get(display->drm->dev,
+ "pwm_soc_backlight");
desc = "SoC";
}
if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n",
connector->base.base.id, connector->base.name, desc);
panel->backlight.pwm = NULL;
return -ENODEV;
@@ -1554,7 +1577,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
level = intel_backlight_invert_pwm_level(connector, level);
panel->backlight.pwm_enabled = true;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n",
connector->base.base.id, connector->base.name,
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
get_vbt_pwm_freq(connector), level);
@@ -1564,7 +1588,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using %s PWM for backlight control\n",
connector->base.base.id, connector->base.name, desc);
@@ -1630,17 +1654,17 @@ void intel_backlight_update(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
return;
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
if (!panel->backlight.enabled)
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
@@ -1791,30 +1815,30 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
{
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ if (display->platform.geminilake || display->platform.broxton) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT) {
- if (HAS_PCH_LPT(i915))
+ } else if (INTEL_PCH_TYPE(display) >= PCH_LPT_H) {
+ if (HAS_PCH_LPT(display))
panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
panel->backlight.pwm_funcs = &pch_pwm_funcs;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ } else if (display->platform.valleyview || display->platform.cherryview) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
panel->backlight.pwm_funcs = &ext_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
- } else if (DISPLAY_VER(i915) == 4) {
+ } else if (DISPLAY_VER(display) == 4) {
panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
@@ -1824,7 +1848,7 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (intel_dp_aux_init_backlight_funcs(connector) == 0)
return;
- if (!intel_has_quirk(&i915->display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ if (!intel_has_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
connector->panel.backlight.power = intel_pps_backlight_power;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index e0e4e9b62d8d..4b41068e9e35 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -32,12 +32,15 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "soc/intel_rom.h"
-#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
@@ -1564,10 +1567,7 @@ parse_psr(struct intel_display *display,
panel->vbt.psr.full_link = psr_table->full_link;
panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
-
- /* Allowed VBT values goes from 0 to 15 */
- panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
- psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+ panel->vbt.psr.idle_frames = psr_table->idle_frames;
/*
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
@@ -1937,7 +1937,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display,
int index, len;
if (drm_WARN_ON(display->drm,
- !data || panel->vbt.dsi.seq_version != 1))
+ !data || panel->vbt.dsi.seq_version >= 3))
return 0;
/* index = 1 to skip sequence byte */
@@ -1960,7 +1960,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display,
}
/*
- * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * Some v1/v2 VBT MIPI sequences do the deassert in the init OTP sequence.
* The deassert must be done before calling intel_dsi_device_ready, so for
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
@@ -1971,9 +1971,9 @@ static void vlv_fixup_mipi_sequences(struct intel_display *display,
u8 *init_otp;
int len;
- /* Limit this to v1 vid-mode sequences */
+ /* Limit this to v1/v2 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode ||
- panel->vbt.dsi.seq_version != 1)
+ panel->vbt.dsi.seq_version >= 3)
return;
/* Only do this if there are otp and assert seqs and no deassert seq */
@@ -2244,28 +2244,27 @@ static const u8 adlp_ddc_pin_map[] = {
static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *ddc_pin_map;
int i, n_entries;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL || display->platform.alderlake_p) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL || display->platform.alderlake_p) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (display->platform.alderlake_s) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
return vbt_pin;
- } else if (display->platform.rocketlake && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ } else if (display->platform.rocketlake && INTEL_PCH_TYPE(display) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) {
+ } else if (HAS_PCH_TGP(display) && DISPLAY_VER(display) == 9) {
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
ddc_pin_map = cnp_ddc_pin_map;
n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
} else {
@@ -2479,6 +2478,25 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
+static u32 edp_rate_override_mask(int rate)
+{
+ switch (rate) {
+ case 2000000: return BDB_263_VBT_EDP_LINK_RATE_20;
+ case 1350000: return BDB_263_VBT_EDP_LINK_RATE_13_5;
+ case 1000000: return BDB_263_VBT_EDP_LINK_RATE_10;
+ case 810000: return BDB_263_VBT_EDP_LINK_RATE_8_1;
+ case 675000: return BDB_263_VBT_EDP_LINK_RATE_6_75;
+ case 540000: return BDB_263_VBT_EDP_LINK_RATE_5_4;
+ case 432000: return BDB_263_VBT_EDP_LINK_RATE_4_32;
+ case 324000: return BDB_263_VBT_EDP_LINK_RATE_3_24;
+ case 270000: return BDB_263_VBT_EDP_LINK_RATE_2_7;
+ case 243000: return BDB_263_VBT_EDP_LINK_RATE_2_43;
+ case 216000: return BDB_263_VBT_EDP_LINK_RATE_2_16;
+ case 162000: return BDB_263_VBT_EDP_LINK_RATE_1_62;
+ default: return 0;
+ }
+}
+
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->display->vbt.version < 216)
@@ -2498,6 +2516,19 @@ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
return devdata->child.dp_max_lane_count + 1;
}
+bool
+intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata,
+ int rate)
+{
+ if (!devdata || devdata->display->vbt.version < 263)
+ return false;
+
+ if (devdata->child.edp_data_rate_override == BDB_263_VBT_EDP_RATES_MASK)
+ return false;
+
+ return devdata->child.edp_data_rate_override & edp_rate_override_mask(rate);
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -2746,8 +2777,10 @@ static int child_device_expected_size(u16 version)
{
BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
- if (version > 256)
+ if (version > 263)
return -ENOENT;
+ else if (version >= 263)
+ return 44;
else if (version >= 256)
return 40;
else if (version >= 216)
@@ -2864,8 +2897,6 @@ parse_general_definitions(struct intel_display *display)
static void
init_vbt_defaults(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
display->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
@@ -2882,7 +2913,7 @@ init_vbt_defaults(struct intel_display *display)
* clock for LVDS.
*/
display->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(display,
- !HAS_PCH_SPLIT(i915));
+ !HAS_PCH_SPLIT(display));
drm_dbg_kms(display->drm, "Set default to SSC at %d kHz\n",
display->vbt.lvds_ssc_freq);
}
@@ -2902,7 +2933,6 @@ init_vbt_panel_defaults(struct intel_panel *panel)
static void
init_vbt_missing_defaults(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask;
enum port port;
@@ -2912,13 +2942,13 @@ init_vbt_missing_defaults(struct intel_display *display)
for_each_port_masked(port, ports) {
struct intel_bios_encoder_data *devdata;
struct child_device_config *child;
- enum phy phy = intel_port_to_phy(i915, port);
+ enum phy phy = intel_port_to_phy(display, port);
/*
* VBT has the TypeC mode (native,TBT/USB) and we don't want
* to detect it.
*/
- if (intel_phy_is_tc(i915, phy))
+ if (intel_phy_is_tc(display, phy))
continue;
/* Create fake child device config */
@@ -3114,9 +3144,7 @@ err_free_rom:
static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display,
size_t *sizep)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
- intel_wakeref_t wakeref;
vbt = firmware_get_vbt(display, sizep);
@@ -3127,13 +3155,13 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
* If the OpRegion does not have VBT, look in SPI flash
* through MMIO or PCI mapping
*/
- if (!vbt && IS_DGFX(i915))
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
+ if (!vbt && display->platform.dgfx)
+ with_intel_display_rpm(display)
+ vbt = oprom_get_vbt(display, intel_rom_spi(display->drm), sizep, "SPI flash");
if (!vbt)
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
+ with_intel_display_rpm(display)
+ vbt = oprom_get_vbt(display, intel_rom_pci(display->drm), sizep, "PCI ROM");
return vbt;
}
@@ -3746,8 +3774,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
void intel_bios_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_vbt", 0444, display->drm->debugfs_root,
display, &intel_bios_vbt_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index f9841f0498c6..f9e438b2787b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -24,7 +24,7 @@
/*
* Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
* the VBT from the rest of the driver. Add the parsed, clean data to struct
- * intel_vbt_data within struct drm_i915_private.
+ * intel_vbt_data within struct intel_display.
*/
#ifndef _INTEL_BIOS_H_
@@ -50,180 +50,6 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
-/*
- * MIPI Sequence Block definitions
- *
- * Note the VBT spec has AssertReset / DeassertReset swapped from their
- * usual naming, we use the proper names here to avoid confusion when
- * reading the code.
- */
-enum mipi_seq {
- MIPI_SEQ_END = 0,
- MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
- MIPI_SEQ_INIT_OTP,
- MIPI_SEQ_DISPLAY_ON,
- MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
- MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
- MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
- MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
- MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
- MIPI_SEQ_POWER_ON, /* sequence block v3+ */
- MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
- MIPI_SEQ_MAX
-};
-
-enum mipi_seq_element {
- MIPI_SEQ_ELEM_END = 0,
- MIPI_SEQ_ELEM_SEND_PKT,
- MIPI_SEQ_ELEM_DELAY,
- MIPI_SEQ_ELEM_GPIO,
- MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
- MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
- MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
- MIPI_SEQ_ELEM_MAX
-};
-
-#define MIPI_DSI_UNDEFINED_PANEL_ID 0
-#define MIPI_DSI_GENERIC_PANEL_ID 1
-
-struct mipi_config {
- u16 panel_id;
-
- /* General Params */
- u32 enable_dithering:1;
- u32 rsvd1:1;
- u32 is_bridge:1;
-
- u32 panel_arch_type:2;
- u32 is_cmd_mode:1;
-
-#define NON_BURST_SYNC_PULSE 0x1
-#define NON_BURST_SYNC_EVENTS 0x2
-#define BURST_MODE 0x3
- u32 video_transfer_mode:2;
-
- u32 cabc_supported:1;
-#define PPS_BLC_PMIC 0
-#define PPS_BLC_SOC 1
- u32 pwm_blc:1;
-
- /* Bit 13:10 */
-#define PIXEL_FORMAT_RGB565 0x1
-#define PIXEL_FORMAT_RGB666 0x2
-#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
-#define PIXEL_FORMAT_RGB888 0x4
- u32 videomode_color_format:4;
-
- /* Bit 15:14 */
-#define ENABLE_ROTATION_0 0x0
-#define ENABLE_ROTATION_90 0x1
-#define ENABLE_ROTATION_180 0x2
-#define ENABLE_ROTATION_270 0x3
- u32 rotation:2;
- u32 bta_enabled:1;
- u32 rsvd2:15;
-
- /* 2 byte Port Description */
-#define DUAL_LINK_NOT_SUPPORTED 0
-#define DUAL_LINK_FRONT_BACK 1
-#define DUAL_LINK_PIXEL_ALT 2
- u16 dual_link:2;
- u16 lane_cnt:2;
- u16 pixel_overlap:3;
- u16 rgb_flip:1;
-#define DL_DCS_PORT_A 0x00
-#define DL_DCS_PORT_C 0x01
-#define DL_DCS_PORT_A_AND_C 0x02
- u16 dl_dcs_cabc_ports:2;
- u16 dl_dcs_backlight_ports:2;
- u16 rsvd3:4;
-
- u16 rsvd4;
-
- u8 rsvd5;
- u32 target_burst_mode_freq;
- u32 dsi_ddr_clk;
- u32 bridge_ref_clk;
-
-#define BYTE_CLK_SEL_20MHZ 0
-#define BYTE_CLK_SEL_10MHZ 1
-#define BYTE_CLK_SEL_5MHZ 2
- u8 byte_clk_sel:2;
-
- u8 rsvd6:6;
-
- /* DPHY Flags */
- u16 dphy_param_valid:1;
- u16 eot_pkt_disabled:1;
- u16 enable_clk_stop:1;
- u16 rsvd7:13;
-
- u32 hs_tx_timeout;
- u32 lp_rx_timeout;
- u32 turn_around_timeout;
- u32 device_reset_timer;
- u32 master_init_timer;
- u32 dbi_bw_timer;
- u32 lp_byte_clk_val;
-
- /* 4 byte Dphy Params */
- u32 prepare_cnt:6;
- u32 rsvd8:2;
- u32 clk_zero_cnt:8;
- u32 trail_cnt:5;
- u32 rsvd9:3;
- u32 exit_zero_cnt:6;
- u32 rsvd10:2;
-
- u32 clk_lane_switch_cnt;
- u32 hl_switch_cnt;
-
- u32 rsvd11[6];
-
- /* timings based on dphy spec */
- u8 tclk_miss;
- u8 tclk_post;
- u8 rsvd12;
- u8 tclk_pre;
- u8 tclk_prepare;
- u8 tclk_settle;
- u8 tclk_term_enable;
- u8 tclk_trail;
- u16 tclk_prepare_clkzero;
- u8 rsvd13;
- u8 td_term_enable;
- u8 teot;
- u8 ths_exit;
- u8 ths_prepare;
- u16 ths_prepare_hszero;
- u8 rsvd14;
- u8 ths_settle;
- u8 ths_skip;
- u8 ths_trail;
- u8 tinit;
- u8 tlpx;
- u8 rsvd15[3];
-
- /* GPIOs */
- u8 panel_enable;
- u8 bl_enable;
- u8 pwm_enable;
- u8 reset_r_n;
- u8 pwr_down_r;
- u8 stdby_r_n;
-
-} __packed;
-
-/* all delays have a unit of 100us */
-struct mipi_pps_data {
- u16 panel_on_delay;
- u16 bl_enable_delay;
- u16 bl_disable_delay;
- u16 panel_off_delay;
- u16 panel_power_cycle_delay;
-} __packed;
-
void intel_bios_init(struct intel_display *display);
void intel_bios_init_panel_early(struct intel_display *display,
struct intel_panel *panel,
@@ -259,6 +85,8 @@ bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata,
+ int rate);
enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index fbd16d7b58d9..f3687eb63467 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
+#include <drm/drm_panic.h>
+
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -27,11 +29,6 @@ bool intel_bo_is_protected(struct drm_gem_object *obj)
return i915_gem_object_is_protected(to_intel_bo(obj));
}
-void intel_bo_flush_if_display(struct drm_gem_object *obj)
-{
- i915_gem_object_flush_if_display(to_intel_bo(obj));
-}
-
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return i915_gem_fb_mmap(to_intel_bo(obj), vma);
@@ -42,15 +39,40 @@ int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, i
return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size);
}
-struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
+struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *_obj)
{
- return i915_gem_object_get_frontbuffer(to_intel_bo(obj));
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct i915_frontbuffer *front;
+
+ front = i915_gem_object_frontbuffer_get(obj);
+ if (!front)
+ return NULL;
+
+ return &front->base;
}
-struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
- struct intel_frontbuffer *front)
+void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
{
- return i915_gem_object_set_frontbuffer(to_intel_bo(obj), front);
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_frontbuffer_ref(front);
+}
+
+void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ return i915_gem_object_frontbuffer_put(front);
+}
+
+void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_flush_if_display(front->obj);
}
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index ea7a2253aaa5..fc05f680dc76 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
struct drm_gem_object;
+struct drm_scanout_buffer;
+struct intel_framebuffer;
struct seq_file;
struct vm_area_struct;
@@ -14,13 +16,13 @@ bool intel_bo_is_tiled(struct drm_gem_object *obj);
bool intel_bo_is_userptr(struct drm_gem_object *obj);
bool intel_bo_is_shmem(struct drm_gem_object *obj);
bool intel_bo_is_protected(struct drm_gem_object *obj);
-void intel_bo_flush_if_display(struct drm_gem_object *obj);
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size);
-struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj);
-struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
- struct intel_frontbuffer *front);
+struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj);
+void intel_bo_frontbuffer_ref(struct intel_frontbuffer *front);
+void intel_bo_frontbuffer_put(struct intel_frontbuffer *front);
+void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front);
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 08e8a67ca74c..1f6461be50ef 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -4,18 +4,51 @@
*/
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_print.h>
+
+#include "soc/intel_dram.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
-#include "intel_atomic.h"
#include "intel_bw.h"
-#include "intel_cdclk.h"
+#include "intel_crtc.h"
#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "skl_watermark.h"
+#include "intel_display_utils.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_uncore.h"
+#include "skl_watermark.h"
+
+struct intel_bw_state {
+ struct intel_global_state base;
+
+ /*
+ * Contains a bit mask, used to determine, whether correspondent
+ * pipe allows SAGV or not.
+ */
+ u8 pipe_sagv_reject;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /*
+ * From MTL onwards, to lock a QGV point, punit expects the peak BW of
+ * the selected QGV point as the parameter in multiples of 100MB/s
+ */
+ u16 qgv_point_peakbw;
+
+ /*
+ * Current QGV points mask, which restricts
+ * some particular SAGV states, not to confuse
+ * with pipe_sagv_mask.
+ */
+ u16 qgv_points_mask;
+
+ unsigned int data_rate[I915_MAX_PIPES];
+ u8 num_active_planes[I915_MAX_PIPES];
+};
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -39,14 +72,15 @@ struct intel_qgv_info {
u8 deinterleave;
};
-static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dclk_ratio, dclk_reference;
u32 val;
- val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
+ val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
if (val & DG1_QCLK_REFERENCE)
dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
@@ -54,18 +88,18 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
- val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
if (val & DG1_GEAR_TYPE)
sp->dclk *= 2;
if (sp->dclk == 0)
return -EINVAL;
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
@@ -74,7 +108,7 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int icl_pcode_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
@@ -82,14 +116,14 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
u16 dclk;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
- &val, &val2);
+ ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
if (ret)
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -102,15 +136,15 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
- struct intel_psf_gv_point *points)
+static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
+ struct intel_psf_gv_point *points)
{
u32 val = 0;
int ret;
int i;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
+ ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -122,10 +156,10 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+static u16 icl_qgv_points_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -142,49 +176,50 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
}
-static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
{
- return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
ICL_PCODE_REQ_QGV_PT_MASK);
}
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
- u32 points_mask)
+static int icl_pcode_restrict_qgv_points(struct intel_display *display,
+ u32 points_mask)
{
int ret;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
- points_mask,
- ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
- ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
- 1);
+ ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
+ 1);
if (ret < 0) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to disable qgv points (0x%x) points: 0x%x\n",
ret, points_mask);
return ret;
}
- dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ display->sagv.status = is_sagv_enabled(display, points_mask) ?
I915_SAGV_ENABLED : I915_SAGV_DISABLED;
return 0;
}
-static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int mtl_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp, int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val, val2;
u16 dclk;
- val = intel_uncore_read(&dev_priv->uncore,
+ val = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
- val2 = intel_uncore_read(&dev_priv->uncore,
+ val2 = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
@@ -200,29 +235,29 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
}
static int
-intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+intel_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
- if (DISPLAY_VER(dev_priv) >= 14)
- return mtl_read_qgv_point_info(dev_priv, sp, point);
- else if (IS_DG1(dev_priv))
- return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_read_qgv_point_info(display, sp, point);
+ else if (display->platform.dg1)
+ return dg1_mchbar_read_qgv_point_info(display, sp, point);
else
- return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+ return icl_pcode_read_qgv_point_info(display, sp, point);
}
-static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+static int icl_get_qgv_points(struct intel_display *display,
+ const struct dram_info *dram_info,
struct intel_qgv_info *qi,
bool is_y_tile)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
int i, ret;
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = 4;
@@ -244,13 +279,14 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = 4;
break;
case INTEL_DRAM_GDDR:
+ case INTEL_DRAM_GDDR_ECC:
qi->channel_width = 32;
break;
default:
MISSING_CASE(dram_info->type);
return -EINVAL;
}
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -265,7 +301,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_LPDDR4:
- if (IS_ROCKETLAKE(dev_priv)) {
+ if (display->platform.rocketlake) {
qi->t_bl = 8;
qi->max_numchannels = 4;
qi->channel_width = 32;
@@ -284,40 +320,40 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- } else if (DISPLAY_VER(dev_priv) == 11) {
- qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
+ } else if (DISPLAY_VER(display) == 11) {
+ qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- ret = intel_read_qgv_point_info(dev_priv, sp, i);
+ ret = intel_read_qgv_point_info(display, sp, i);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
+ drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
return ret;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
sp->t_rcd, sp->t_rc);
}
if (qi->num_psf_points > 0) {
- ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
+ ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
if (ret) {
- drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
+ drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
qi->num_psf_points = 0;
}
for (i = 0; i < qi->num_psf_points; i++)
- drm_dbg_kms(&dev_priv->drm,
- "PSF GV %d: CLK=%d \n",
+ drm_dbg_kms(display->drm,
+ "PSF GV %d: CLK=%d\n",
i, qi->psf_points[i].clk);
}
@@ -398,20 +434,42 @@ static const struct intel_sa_info xe2_hpd_sa_info = {
/* Other values not used by simplified algorithm */
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
+ .derating = 45,
+ .deprogbwlimit = 53,
+ /* Other values not used by simplified algorithm */
+};
+
+static const struct intel_sa_info xe3lpd_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 65, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static const struct intel_sa_info xe3lpd_3002_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 22, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static int icl_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
+ const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -422,7 +480,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
int clpchgroup;
int j;
@@ -449,7 +507,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -460,44 +518,45 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static int tgl_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
+ const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
- const struct dram_info *dram_info = &dev_priv->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- if (DISPLAY_VER(dev_priv) < 14 &&
+ if (DISPLAY_VER(display) < 14 &&
(dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
num_channels *= 2;
qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
- if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
+ if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
- drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
+ if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
+ drm_warn(display->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -514,7 +573,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
@@ -522,7 +581,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
if (i < num_groups - 1) {
- bi_next = &dev_priv->display.bw.max[i + 1];
+ bi_next = &display->bw.max[i + 1];
if (clpchgroup < clperchgroup)
bi_next->num_planes = (ipqdepth - clpchgroup) /
@@ -554,7 +613,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
num_channels *
qi.channel_width, 8);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
i, j, bi->num_planes, bi->deratedbw[j],
bi->peakbw[j]);
@@ -565,7 +624,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / PSF GV %d: num_planes=%d bw=%u\n",
i, j, bi->num_planes, bi->psf_bw[j]);
}
@@ -577,17 +636,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static void dg2_get_bw_info(struct drm_i915_private *i915)
+static void dg2_get_bw_info(struct intel_display *display)
{
- unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->display.bw.max);
+ unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i;
/*
@@ -598,7 +657,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -606,20 +665,21 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
}
-static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
+static int xe2_hpd_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
- int num_channels = i915->dram_info.num_channels;
+ int num_channels = dram_info->num_channels;
int peakbw, maxdebw;
int ret, i;
- ret = icl_get_qgv_points(i915, &qi, true);
+ ret = icl_get_qgv_points(display, dram_info, &qi, true);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -631,33 +691,33 @@ static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
const struct intel_qgv_point *point = &qi.points[i];
int bw = num_channels * (qi.channel_width / 8) * point->dclk;
- i915->display.bw.max[0].deratedbw[i] =
+ display->bw.max[0].deratedbw[i] =
min(maxdebw, (100 - sa->derating) * bw / 100);
- i915->display.bw.max[0].peakbw[i] = bw;
+ display->bw.max[0].peakbw[i] = bw;
- drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
- i, i915->display.bw.max[0].deratedbw[i],
- i915->display.bw.max[0].peakbw[i]);
+ drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
+ i, display->bw.max[0].deratedbw[i],
+ display->bw.max[0].peakbw[i]);
}
/* Bandwidth does not depend on # of planes; set all groups the same */
- i915->display.bw.max[0].num_planes = 1;
- i915->display.bw.max[0].num_qgv_points = qi.num_points;
- for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++)
- memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0],
- sizeof(i915->display.bw.max[0]));
+ display->bw.max[0].num_planes = 1;
+ display->bw.max[0].num_qgv_points = qi.num_points;
+ for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
+ memcpy(&display->bw.max[i], &display->bw.max[0],
+ sizeof(display->bw.max[0]));
/*
* Xe2_HPD should always have exactly two QGV points representing
* battery and plugged-in operation.
*/
- drm_WARN_ON(&i915->drm, qi.num_points != 2);
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ drm_WARN_ON(display->drm, qi.num_points != 2);
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int icl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -667,9 +727,9 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
+ for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -685,7 +745,7 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
return UINT_MAX;
}
-static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int tgl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -695,9 +755,9 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -713,151 +773,113 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
return 0;
}
-static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
+static unsigned int adl_psf_bw(struct intel_display *display,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[0];
+ &display->bw.max[0];
return bi->psf_bw[psf_gv_point];
}
-static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+static unsigned int icl_qgv_bw(struct intel_display *display,
int num_active_planes, int qgv_point)
{
unsigned int idx;
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ if (DISPLAY_VER(display) >= 12)
+ idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
else
- idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+ idx = icl_max_bw_index(display, num_active_planes, qgv_point);
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ if (idx >= ARRAY_SIZE(display->bw.max))
return 0;
- return i915->display.bw.max[idx].deratedbw[qgv_point];
+ return display->bw.max[idx].deratedbw[qgv_point];
}
-void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+void intel_bw_init_hw(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
- return;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
- if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
- else if (DISPLAY_VER(dev_priv) >= 14)
- tgl_get_bw_info(dev_priv, &mtl_sa_info);
- else if (IS_DG2(dev_priv))
- dg2_get_bw_info(dev_priv);
- else if (IS_ALDERLAKE_P(dev_priv))
- tgl_get_bw_info(dev_priv, &adlp_sa_info);
- else if (IS_ALDERLAKE_S(dev_priv))
- tgl_get_bw_info(dev_priv, &adls_sa_info);
- else if (IS_ROCKETLAKE(dev_priv))
- tgl_get_bw_info(dev_priv, &rkl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 12)
- tgl_get_bw_info(dev_priv, &tgl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 11)
- icl_get_bw_info(dev_priv, &icl_sa_info);
-}
+ if (!HAS_DISPLAY(display))
+ return;
-static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
-{
/*
- * We assume cursors are small enough
- * to not not cause bandwidth problems.
+ * Starting with Xe3p_LPD, the hardware tells us whether memory has ECC
+ * enabled that would impact display bandwidth. However, so far there
+ * are no instructions in Bspec on how to handle that case. Let's
+ * complain if we ever find such a scenario.
*/
- return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
-}
-
-static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- unsigned int data_rate = 0;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- /*
- * We assume cursors are small enough
- * to not not cause bandwidth problems.
- */
- if (plane_id == PLANE_CURSOR)
- continue;
-
- data_rate += crtc_state->data_rate[plane_id];
-
- if (DISPLAY_VER(i915) < 11)
- data_rate += crtc_state->data_rate_y[plane_id];
+ if (DISPLAY_VER(display) >= 35)
+ drm_WARN_ON(display->drm, dram_info->ecc_impacting_de_bw);
+
+ if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VERx100(display) == 3002)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
+ else
+ tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
+ } else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) {
+ if (dram_info->type == INTEL_DRAM_GDDR_ECC)
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
+ else
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
+ } else if (DISPLAY_VER(display) >= 14) {
+ tgl_get_bw_info(display, dram_info, &mtl_sa_info);
+ } else if (display->platform.dg2) {
+ dg2_get_bw_info(display);
+ } else if (display->platform.alderlake_p) {
+ tgl_get_bw_info(display, dram_info, &adlp_sa_info);
+ } else if (display->platform.alderlake_s) {
+ tgl_get_bw_info(display, dram_info, &adls_sa_info);
+ } else if (display->platform.rocketlake) {
+ tgl_get_bw_info(display, dram_info, &rkl_sa_info);
+ } else if (DISPLAY_VER(display) == 12) {
+ tgl_get_bw_info(display, dram_info, &tgl_sa_info);
+ } else if (DISPLAY_VER(display) == 11) {
+ icl_get_bw_info(display, dram_info, &icl_sa_info);
}
-
- return data_rate;
}
-/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) < 12)
- return 0;
-
- return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
-}
-
-void intel_bw_crtc_update(struct intel_bw_state *bw_state,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- bw_state->data_rate[crtc->pipe] =
- intel_bw_crtc_data_rate(crtc_state);
- bw_state->num_active_planes[crtc->pipe] =
- intel_bw_crtc_num_active_planes(crtc_state);
- bw_state->force_check_qgv = true;
-
- drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
-}
-
-static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_num_active_planes(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int num_active_planes = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
num_active_planes += bw_state->num_active_planes[pipe];
return num_active_planes;
}
-static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int data_rate = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
}
+struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_bw_state, base);
+}
+
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -865,10 +887,10 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -876,27 +898,27 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
}
-static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
int num_active_planes)
{
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int max_bw_point = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate =
- icl_qgv_bw(i915, num_active_planes, i);
+ icl_qgv_bw(display, num_active_planes, i);
/*
* We need to know which qgv point gives us
@@ -915,23 +937,23 @@ static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
return max_bw_point;
}
-static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
unsigned int qgv_points,
unsigned int psf_points)
{
return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
}
-static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
unsigned int max_bw_point_mask = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate > max_bw) {
max_bw_point_mask = BIT(i);
@@ -944,29 +966,93 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
return max_bw_point_mask;
}
-static void icl_force_disable_sagv(struct drm_i915_private *i915,
+static void icl_force_disable_sagv(struct intel_display *display,
struct intel_bw_state *bw_state)
{
- unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
- unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
- bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
- drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
bw_state->qgv_points_mask);
- icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+ icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
}
-static int mtl_find_qgv_points(struct drm_i915_private *i915,
+void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask;
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(display, new_mask);
+}
+
+void icl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(display, new_mask);
+}
+
+static int mtl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
struct intel_bw_state *new_bw_state)
{
unsigned int best_rate = UINT_MAX;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int qgv_peak_bw = 0;
int i;
int ret;
@@ -980,9 +1066,9 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
* not enabled. PM Demand code will clamp the value for the register
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
+ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
- drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw.");
+ drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
return 0;
}
@@ -992,27 +1078,27 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
*/
for (i = 0; i < num_qgv_points; i++) {
unsigned int bw_index =
- tgl_max_bw_index(i915, num_active_planes, i);
+ tgl_max_bw_index(display, num_active_planes, i);
unsigned int max_data_rate;
- if (bw_index >= ARRAY_SIZE(i915->display.bw.max))
+ if (bw_index >= ARRAY_SIZE(display->bw.max))
continue;
- max_data_rate = i915->display.bw.max[bw_index].deratedbw[i];
+ max_data_rate = display->bw.max[bw_index].deratedbw[i];
if (max_data_rate < data_rate)
continue;
if (max_data_rate - data_rate < best_rate) {
best_rate = max_data_rate - data_rate;
- qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i];
+ qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
}
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
i, max_data_rate, data_rate, qgv_peak_bw);
}
- drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
+ drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
qgv_peak_bw, data_rate);
/*
@@ -1020,7 +1106,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* satisfying the required data rate is found
*/
if (qgv_peak_bw == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
+ drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
@@ -1031,14 +1117,14 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int icl_find_qgv_points(struct drm_i915_private *i915,
+static int icl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 psf_points = 0;
u16 qgv_points = 0;
int i;
@@ -1049,22 +1135,22 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int max_data_rate = icl_qgv_bw(i915,
+ unsigned int max_data_rate = icl_qgv_bw(display,
num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
+ drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
" required %d\n",
i, max_data_rate, data_rate);
}
@@ -1075,14 +1161,14 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* reasons.
*/
if (qgv_points == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
- drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
@@ -1093,9 +1179,9 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
+ qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
+ drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
}
@@ -1103,7 +1189,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
/*
@@ -1119,263 +1205,135 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
+static int intel_bw_check_qgv_points(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
+ unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
unsigned int num_active_planes =
- intel_bw_num_active_planes(i915, new_bw_state);
+ intel_bw_num_active_planes(display, new_bw_state);
data_rate = DIV_ROUND_UP(data_rate, 1000);
- if (DISPLAY_VER(i915) >= 14)
- return mtl_find_qgv_points(i915, data_rate, num_active_planes,
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_find_qgv_points(display, data_rate, num_active_planes,
new_bw_state);
else
- return icl_find_qgv_points(i915, data_rate, num_active_planes,
+ return icl_find_qgv_points(display, data_rate, num_active_planes,
old_bw_state, new_bw_state);
}
-static bool intel_bw_state_changed(struct drm_i915_private *i915,
- const struct intel_bw_state *old_bw_state,
- const struct intel_bw_state *new_bw_state)
-{
- enum pipe pipe;
-
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *old_crtc_bw =
- &old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_crtc_bw =
- &new_bw_state->dbuf_bw[pipe];
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(i915, slice) {
- if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
- old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
- return true;
- }
-
- if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
- return true;
- }
-
- return false;
-}
-
-static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
- struct intel_crtc *crtc,
- enum plane_id plane_id,
- const struct skl_ddb_entry *ddb,
- unsigned int data_rate)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
- enum dbuf_slice slice;
-
- /*
- * The arbiter can only really guarantee an
- * equal share of the total bw to each plane.
- */
- for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
- crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
- crtc_bw->active_planes[slice] |= BIT(plane_id);
- }
-}
-
-static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
- const struct intel_crtc_state *crtc_state)
+static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
- enum plane_id plane_id;
-
- memset(crtc_bw, 0, sizeof(*crtc_bw));
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
- if (!crtc_state->hw.active)
- return;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ unsigned int old_data_rate =
+ intel_crtc_bw_data_rate(old_crtc_state);
+ unsigned int new_data_rate =
+ intel_crtc_bw_data_rate(new_crtc_state);
+ unsigned int old_active_planes =
+ intel_crtc_bw_num_active_planes(old_crtc_state);
+ unsigned int new_active_planes =
+ intel_crtc_bw_num_active_planes(new_crtc_state);
+ struct intel_bw_state *new_bw_state;
- for_each_plane_id_on_crtc(crtc, plane_id) {
/*
- * We assume cursors are small enough
- * to not cause bandwidth problems.
+ * Avoid locking the bw state when
+ * nothing significant has changed.
*/
- if (plane_id == PLANE_CURSOR)
+ if (old_data_rate == new_data_rate &&
+ old_active_planes == new_active_planes)
continue;
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
- &crtc_state->wm.skl.plane_ddb[plane_id],
- crtc_state->data_rate[plane_id]);
-
- if (DISPLAY_VER(i915) < 11)
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
- &crtc_state->wm.skl.plane_ddb_y[plane_id],
- crtc_state->data_rate[plane_id]);
- }
-}
-
-/* "Maximum Data Buffer Bandwidth" */
-static int
-intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
- const struct intel_bw_state *bw_state)
-{
- unsigned int total_max_bw = 0;
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(i915, slice) {
- int num_active_planes = 0;
- unsigned int max_bw = 0;
- enum pipe pipe;
-
- /*
- * The arbiter can only really guarantee an
- * equal share of the total bw to each plane.
- */
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
-
- max_bw = max(crtc_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(crtc_bw->active_planes[slice]);
- }
- max_bw *= num_active_planes;
-
- total_max_bw = max(total_max_bw, max_bw);
- }
-
- return DIV_ROUND_UP(total_max_bw, 64);
-}
-
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
- const struct intel_bw_state *bw_state)
-{
- enum pipe pipe;
- int min_cdclk;
-
- min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
-
- for_each_pipe(i915, pipe)
- min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
-
- return min_cdclk;
-}
-
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *crtc_state;
- int old_min_cdclk, new_min_cdclk;
- struct intel_crtc *crtc;
- int i;
-
- if (DISPLAY_VER(dev_priv) < 9)
- return 0;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
- old_bw_state = intel_atomic_get_old_bw_state(state);
+ new_bw_state->data_rate[crtc->pipe] = new_data_rate;
+ new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
- skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
+ *changed = true;
- new_bw_state->min_cdclk[crtc->pipe] =
- intel_bw_crtc_min_cdclk(crtc_state);
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] data rate %u num active planes %u\n",
+ crtc->base.base.id, crtc->base.name,
+ new_bw_state->data_rate[crtc->pipe],
+ new_bw_state->num_active_planes[crtc->pipe]);
}
- if (!old_bw_state)
- return 0;
-
- if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
- int ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
+ return 0;
+}
- old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
+static int intel_bw_modeset_checks(struct intel_atomic_state *state)
+{
+ const struct intel_bw_state *old_bw_state;
+ struct intel_bw_state *new_bw_state;
+ int ret;
- /*
- * No need to check against the cdclk state if
- * the min cdclk doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to bandwidth
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_min_cdclk <= old_min_cdclk)
+ if (!intel_any_crtc_active_changed(state))
return 0;
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
- /*
- * No need to recalculate the cdclk state if
- * the min cdclk doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to bandwidth
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
- return 0;
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
- drm_dbg_kms(&dev_priv->drm,
- "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
- new_min_cdclk, cdclk_state->bw_min_cdclk);
- *need_cdclk_calc = true;
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
return 0;
}
-static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
+static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_bw_state *old_bw_state = NULL;
+ struct intel_bw_state *new_bw_state = NULL;
struct intel_crtc *crtc;
- int i;
+ int ret, i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- unsigned int old_data_rate =
- intel_bw_crtc_data_rate(old_crtc_state);
- unsigned int new_data_rate =
- intel_bw_crtc_data_rate(new_crtc_state);
- unsigned int old_active_planes =
- intel_bw_crtc_num_active_planes(old_crtc_state);
- unsigned int new_active_planes =
- intel_bw_crtc_num_active_planes(new_crtc_state);
- struct intel_bw_state *new_bw_state;
-
- /*
- * Avoid locking the bw state when
- * nothing significant has changed.
- */
- if (old_data_rate == new_data_rate &&
- old_active_planes == new_active_planes)
+ if (intel_crtc_can_enable_sagv(old_crtc_state) ==
+ intel_crtc_can_enable_sagv(new_crtc_state))
continue;
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
- new_bw_state->data_rate[crtc->pipe] = new_data_rate;
- new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+ old_bw_state = intel_atomic_get_old_bw_state(state);
- *changed = true;
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] data rate %u num active planes %u\n",
- crtc->base.base.id, crtc->base.name,
- new_bw_state->data_rate[crtc->pipe],
- new_bw_state->num_active_planes[crtc->pipe]);
+ if (!new_bw_state)
+ return 0;
+
+ if (intel_bw_can_enable_sagv(display, new_bw_state) !=
+ intel_bw_can_enable_sagv(display, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
}
return 0;
@@ -1383,14 +1341,25 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
bool changed = false;
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state;
const struct intel_bw_state *old_bw_state;
int ret;
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
+
+ ret = intel_bw_check_sagv_mask(state);
+ if (ret)
+ return ret;
+
/* FIXME earlier gens need some checks too */
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return 0;
ret = intel_bw_check_data_rate(state, &changed);
@@ -1401,9 +1370,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- (intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state) ||
- new_bw_state->force_check_qgv))
+ intel_bw_can_enable_sagv(display, old_bw_state) !=
+ intel_bw_can_enable_sagv(display, new_bw_state))
changed = true;
/*
@@ -1413,15 +1381,72 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (!changed)
return 0;
- ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
+ ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
if (ret)
return ret;
- new_bw_state->force_check_qgv = false;
-
return 0;
}
+static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ bw_state->data_rate[crtc->pipe] =
+ intel_crtc_bw_data_rate(crtc_state);
+ bw_state->num_active_planes[crtc->pipe] =
+ intel_crtc_bw_num_active_planes(crtc_state);
+
+ drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
+}
+
+void intel_bw_update_hw_state(struct intel_display *display)
+{
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ bw_state->active_pipes = 0;
+ bw_state->pipe_sagv_reject = 0;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (crtc_state->hw.active)
+ bw_state->active_pipes |= BIT(pipe);
+
+ if (DISPLAY_VER(display) >= 11)
+ intel_bw_crtc_update(bw_state, crtc_state);
+
+ /* initially SAGV has been forced off */
+ bw_state->pipe_sagv_reject |= BIT(pipe);
+ }
+}
+
+void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
+}
+
static struct intel_global_state *
intel_bw_duplicate_state(struct intel_global_obj *obj)
{
@@ -1445,7 +1470,7 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *i915)
+int intel_bw_init(struct intel_display *display)
{
struct intel_bw_state *state;
@@ -1453,15 +1478,44 @@ int intel_bw_init(struct drm_i915_private *i915)
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.bw.obj,
+ intel_atomic_global_obj_init(display, &display->bw.obj,
&state->base, &intel_bw_funcs);
/*
* Limit this only if we have SAGV. And for Display version 14 onwards
* sagv is handled though pmdemand requests
*/
- if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
- icl_force_disable_sagv(i915, state);
+ if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
+ icl_force_disable_sagv(display, state);
return 0;
}
+
+bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_bw_state *new_bw_state, *old_bw_state;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (new_bw_state &&
+ new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw)
+ return true;
+
+ return false;
+}
+
+bool intel_bw_can_enable_sagv(struct intel_display *display,
+ const struct intel_bw_state *bw_state)
+{
+ if (DISPLAY_VER(display) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state)
+{
+ return bw_state->qgv_point_peakbw;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 161813cca473..99b447388245 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,58 +8,14 @@
#include <drm/drm_atomic.h>
-#include "intel_display_limits.h"
-#include "intel_display_power.h"
-#include "intel_global_state.h"
-
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_bw_state;
+struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
+struct intel_global_state;
-struct intel_dbuf_bw {
- unsigned int max_bw[I915_MAX_DBUF_SLICES];
- u8 active_planes[I915_MAX_DBUF_SLICES];
-};
-
-struct intel_bw_state {
- struct intel_global_state base;
- struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
-
- /*
- * Contains a bit mask, used to determine, whether correspondent
- * pipe allows SAGV or not.
- */
- u8 pipe_sagv_reject;
-
- /* bitmask of active pipes */
- u8 active_pipes;
-
- /*
- * From MTL onwards, to lock a QGV point, punit expects the peak BW of
- * the selected QGV point as the parameter in multiples of 100MB/s
- */
- u16 qgv_point_peakbw;
-
- /*
- * Current QGV points mask, which restricts
- * some particular SAGV states, not to confuse
- * with pipe_sagv_mask.
- */
- u16 qgv_points_mask;
-
- /*
- * Flag to force the QGV comparison in atomic check right after the
- * hw state readout
- */
- bool force_check_qgv;
-
- int min_cdclk[I915_MAX_PIPES];
- unsigned int data_rate[I915_MAX_PIPES];
- u8 num_active_planes[I915_MAX_PIPES];
-};
-
-#define to_intel_bw_state(global_state) \
- container_of_const((global_state), struct intel_bw_state, base)
+struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state);
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
@@ -70,16 +26,17 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state);
-void intel_bw_init_hw(struct drm_i915_private *dev_priv);
-int intel_bw_init(struct drm_i915_private *dev_priv);
+void intel_bw_init_hw(struct intel_display *display);
+int intel_bw_init(struct intel_display *display);
int intel_bw_atomic_check(struct intel_atomic_state *state);
-void intel_bw_crtc_update(struct intel_bw_state *bw_state,
- const struct intel_crtc_state *crtc_state);
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
- u32 points_mask);
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
- const struct intel_bw_state *bw_state);
+void intel_bw_update_hw_state(struct intel_display *display);
+void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
+
+bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state);
+bool intel_bw_can_enable_sagv(struct intel_display *display,
+ const struct intel_bw_state *bw_state);
+void icl_sagv_pre_plane_update(struct intel_atomic_state *state);
+void icl_sagv_post_plane_update(struct intel_atomic_state *state);
+int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_casf.c b/drivers/gpu/drm/i915/display/intel_casf.c
new file mode 100644
index 000000000000..95339b496f24
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "i915_reg.h"
+#include "intel_casf.h"
+#include "intel_casf_regs.h"
+#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_types.h"
+#include "skl_scaler.h"
+
+#define MAX_PIXELS_FOR_3_TAP_FILTER (1920 * 1080)
+#define MAX_PIXELS_FOR_5_TAP_FILTER (3840 * 2160)
+
+#define FILTER_COEFF_0_125 125
+#define FILTER_COEFF_0_25 250
+#define FILTER_COEFF_0_5 500
+#define FILTER_COEFF_1_0 1000
+#define FILTER_COEFF_0_0 0
+#define SET_POSITIVE_SIGN(x) ((x) & (~SIGN))
+
+/**
+ * DOC: Content Adaptive Sharpness Filter (CASF)
+ *
+ * Starting from LNL the display engine supports an
+ * adaptive sharpening filter, enhancing the image
+ * quality. The display hardware utilizes the second
+ * pipe scaler for implementing CASF.
+ * If sharpness is being enabled then pipe scaling
+ * cannot be used.
+ * This filter operates on a region of pixels based
+ * on the tap size. Coefficients are used to generate
+ * an alpha value which blends the sharpened image to
+ * original image.
+ */
+
+/* Default LUT values to be loaded one time. */
+static const u16 sharpness_lut[] = {
+ 4095, 2047, 1364, 1022, 816, 678, 579,
+ 504, 444, 397, 357, 323, 293, 268, 244, 224,
+ 204, 187, 170, 154, 139, 125, 111, 98, 85,
+ 73, 60, 48, 36, 24, 12, 0
+};
+
+const u16 filtercoeff_1[] = {
+ FILTER_COEFF_0_0, FILTER_COEFF_0_0, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_0,
+ FILTER_COEFF_0_0,
+};
+
+const u16 filtercoeff_2[] = {
+ FILTER_COEFF_0_0, FILTER_COEFF_0_25, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25,
+ FILTER_COEFF_0_0,
+};
+
+const u16 filtercoeff_3[] = {
+ FILTER_COEFF_0_125, FILTER_COEFF_0_25, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25,
+ FILTER_COEFF_0_125,
+};
+
+static void intel_casf_filter_lut_load(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int i;
+
+ intel_de_write(display, SHRPLUT_INDEX(crtc->pipe),
+ INDEX_AUTO_INCR | INDEX_VALUE(0));
+
+ for (i = 0; i < ARRAY_SIZE(sharpness_lut); i++)
+ intel_de_write(display, SHRPLUT_DATA(crtc->pipe),
+ sharpness_lut[i]);
+}
+
+void intel_casf_update_strength(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int win_size;
+
+ intel_de_rmw(display, SHARPNESS_CTL(crtc->pipe), FILTER_STRENGTH_MASK,
+ FILTER_STRENGTH(crtc_state->hw.casf_params.strength));
+
+ win_size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, 1));
+
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, 1), win_size);
+}
+
+static void intel_casf_compute_win_size(struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->hw.adjusted_mode;
+ u32 total_pixels = mode->hdisplay * mode->vdisplay;
+
+ if (total_pixels <= MAX_PIXELS_FOR_3_TAP_FILTER)
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_3X3;
+ else if (total_pixels <= MAX_PIXELS_FOR_5_TAP_FILTER)
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_5X5;
+ else
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_7X7;
+}
+
+int intel_casf_compute_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!HAS_CASF(display))
+ return 0;
+
+ if (crtc_state->uapi.sharpness_strength == 0) {
+ crtc_state->hw.casf_params.casf_enable = false;
+ crtc_state->hw.casf_params.strength = 0;
+ return 0;
+ }
+
+ crtc_state->hw.casf_params.casf_enable = true;
+
+ /*
+ * HW takes a value in form (1.0 + strength) in 4.4 fixed format.
+ * Strength is from 0.0-14.9375 ie from 0-239.
+ * User can give value from 0-255 but is clamped to 239.
+ * Ex. User gives 85 which is 5.3125 and adding 1.0 gives 6.3125.
+ * 6.3125 in 4.4 format is b01100101 which is equal to 101.
+ * Also 85 + 16 = 101.
+ */
+ crtc_state->hw.casf_params.strength =
+ min(crtc_state->uapi.sharpness_strength, 0xEF) + 0x10;
+
+ intel_casf_compute_win_size(crtc_state);
+
+ intel_casf_scaler_compute_config(crtc_state);
+
+ return 0;
+}
+
+void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 sharp;
+
+ sharp = intel_de_read(display, SHARPNESS_CTL(crtc->pipe));
+ if (sharp & FILTER_EN) {
+ if (drm_WARN_ON(display->drm,
+ REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp) < 16))
+ crtc_state->hw.casf_params.strength = 0;
+ else
+ crtc_state->hw.casf_params.strength =
+ REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp);
+ crtc_state->hw.casf_params.casf_enable = true;
+ crtc_state->hw.casf_params.win_size =
+ REG_FIELD_GET(FILTER_SIZE_MASK, sharp);
+ }
+}
+
+bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.casf_params.casf_enable)
+ return true;
+
+ return false;
+}
+
+static int casf_coeff_tap(int i)
+{
+ return i % SCALER_FILTER_NUM_TAPS;
+}
+
+static u32 casf_coeff(struct intel_crtc_state *crtc_state, int t)
+{
+ struct scaler_filter_coeff value;
+ u32 coeff;
+
+ value = crtc_state->hw.casf_params.coeff[t];
+ value.sign = 0;
+
+ coeff = value.sign << 15 | value.exp << 12 | value.mantissa << 3;
+ return coeff;
+}
+
+/*
+ * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
+ * To enable casf: program scaler coefficients with the coeffients
+ * that are calculated and stored in hw.casf_params.coeff as per
+ * SCALER_COEFFICIENT_FORMAT
+ */
+static void intel_casf_write_coeff(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int id = crtc_state->scaler_state.scaler_id;
+ int i;
+
+ if (id != 1) {
+ drm_WARN(display->drm, 0, "Second scaler not enabled\n");
+ return;
+ }
+
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(crtc->pipe, id, 0),
+ PS_COEF_INDEX_AUTO_INC);
+
+ for (i = 0; i < 17 * SCALER_FILTER_NUM_TAPS; i += 2) {
+ u32 tmp;
+ int t;
+
+ t = casf_coeff_tap(i);
+ tmp = casf_coeff(crtc_state, t);
+
+ t = casf_coeff_tap(i + 1);
+ tmp |= casf_coeff(crtc_state, t) << 16;
+
+ intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(crtc->pipe, id, 0),
+ tmp);
+ }
+}
+
+static void convert_sharpness_coef_binary(struct scaler_filter_coeff *coeff,
+ u16 coefficient)
+{
+ if (coefficient < 25) {
+ coeff->mantissa = (coefficient * 2048) / 100;
+ coeff->exp = 3;
+ } else if (coefficient < 50) {
+ coeff->mantissa = (coefficient * 1024) / 100;
+ coeff->exp = 2;
+ } else if (coefficient < 100) {
+ coeff->mantissa = (coefficient * 512) / 100;
+ coeff->exp = 1;
+ } else {
+ coeff->mantissa = (coefficient * 256) / 100;
+ coeff->exp = 0;
+ }
+}
+
+void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state)
+{
+ const u16 *filtercoeff;
+ u16 filter_coeff[SCALER_FILTER_NUM_TAPS];
+ u16 sumcoeff = 0;
+ int i;
+
+ if (crtc_state->hw.casf_params.win_size == 0)
+ filtercoeff = filtercoeff_1;
+ else if (crtc_state->hw.casf_params.win_size == 1)
+ filtercoeff = filtercoeff_2;
+ else
+ filtercoeff = filtercoeff_3;
+
+ for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++)
+ sumcoeff += *(filtercoeff + i);
+
+ for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++) {
+ filter_coeff[i] = (*(filtercoeff + i) * 100 / sumcoeff);
+ convert_sharpness_coef_binary(&crtc_state->hw.casf_params.coeff[i],
+ filter_coeff[i]);
+ }
+}
+
+void intel_casf_enable(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 sharpness_ctl;
+
+ intel_casf_filter_lut_load(crtc, crtc_state);
+
+ intel_casf_write_coeff(crtc_state);
+
+ sharpness_ctl = FILTER_EN | FILTER_STRENGTH(crtc_state->hw.casf_params.strength);
+
+ sharpness_ctl |= crtc_state->hw.casf_params.win_size;
+
+ intel_de_write(display, SHARPNESS_CTL(crtc->pipe), sharpness_ctl);
+
+ skl_scaler_setup_casf(crtc_state);
+}
+
+void intel_casf_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ intel_de_write(display, SKL_PS_CTRL(crtc->pipe, 1), 0);
+ intel_de_write(display, SKL_PS_WIN_POS(crtc->pipe, 1), 0);
+ intel_de_write(display, SHARPNESS_CTL(crtc->pipe), 0);
+ intel_de_write(display, SKL_PS_WIN_SZ(crtc->pipe, 1), 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_casf.h b/drivers/gpu/drm/i915/display/intel_casf.h
new file mode 100644
index 000000000000..b3fb0bcb3f5b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CASF_H__
+#define __INTEL_CASF_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+
+int intel_casf_compute_config(struct intel_crtc_state *crtc_state);
+void intel_casf_update_strength(struct intel_crtc_state *new_crtc_state);
+void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state);
+void intel_casf_enable(struct intel_crtc_state *crtc_state);
+void intel_casf_disable(const struct intel_crtc_state *crtc_state);
+void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state);
+bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_CASF_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_casf_regs.h b/drivers/gpu/drm/i915/display/intel_casf_regs.h
new file mode 100644
index 000000000000..87803cca510f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CASF_REGS_H__
+#define __INTEL_CASF_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _SHARPNESS_CTL_A 0x682B0
+#define _SHARPNESS_CTL_B 0x68AB0
+#define SHARPNESS_CTL(pipe) _MMIO_PIPE(pipe, _SHARPNESS_CTL_A, _SHARPNESS_CTL_B)
+#define FILTER_EN REG_BIT(31)
+#define FILTER_STRENGTH_MASK REG_GENMASK(15, 8)
+#define FILTER_STRENGTH(x) REG_FIELD_PREP(FILTER_STRENGTH_MASK, (x))
+#define FILTER_SIZE_MASK REG_GENMASK(1, 0)
+#define SHARPNESS_FILTER_SIZE_3X3 REG_FIELD_PREP(FILTER_SIZE_MASK, 0)
+#define SHARPNESS_FILTER_SIZE_5X5 REG_FIELD_PREP(FILTER_SIZE_MASK, 1)
+#define SHARPNESS_FILTER_SIZE_7X7 REG_FIELD_PREP(FILTER_SIZE_MASK, 2)
+
+#define _SHRPLUT_DATA_A 0x682B8
+#define _SHRPLUT_DATA_B 0x68AB8
+#define SHRPLUT_DATA(pipe) _MMIO_PIPE(pipe, _SHRPLUT_DATA_A, _SHRPLUT_DATA_B)
+
+#define _SHRPLUT_INDEX_A 0x682B4
+#define _SHRPLUT_INDEX_B 0x68AB4
+#define SHRPLUT_INDEX(pipe) _MMIO_PIPE(pipe, _SHRPLUT_INDEX_A, _SHRPLUT_INDEX_B)
+#define INDEX_AUTO_INCR REG_BIT(10)
+#define INDEX_VALUE_MASK REG_GENMASK(4, 0)
+#define INDEX_VALUE(x) REG_FIELD_PREP(INDEX_VALUE_MASK, (x))
+
+#endif /* __INTEL_CASF_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 5a4c8c2410ae..37801c744b05 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -22,29 +22,35 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <linux/time.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "soc/intel_dram.h"
#include "hsw_ips.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_audio.h"
-#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
+#include "intel_dbuf_bw.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+#include "vlv_clock.h"
#include "vlv_dsi.h"
#include "vlv_sideband.h"
@@ -112,6 +118,45 @@
* dividers can be programmed correctly.
*/
+struct intel_cdclk_state {
+ struct intel_global_state base;
+
+ /*
+ * Logical configuration of cdclk (used for all scaling,
+ * watermark, etc. calculations and checks). This is
+ * computed as if all enabled crtcs were active.
+ */
+ struct intel_cdclk_config logical;
+
+ /*
+ * Actual configuration of cdclk, can be different from the
+ * logical configuration only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_config actual;
+
+ /* minimum acceptable cdclk to satisfy DBUF bandwidth requirements */
+ int dbuf_bw_min_cdclk;
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
+
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
+
+ /* forced minimum cdclk for glk+ audio w/a */
+ int force_min_cdclk;
+
+ /* bitmask of enabled pipes */
+ u8 enabled_pipes;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
+};
+
struct intel_cdclk_funcs {
void (*get_cdclk)(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
@@ -313,27 +358,26 @@ static unsigned int intel_hpll_vco(struct intel_display *display)
[4] = 2666667,
[5] = 4266667,
};
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const unsigned int *vco_table;
unsigned int vco;
u8 tmp = 0;
/* FIXME other chipsets? */
- if (IS_GM45(dev_priv))
+ if (display->platform.gm45)
vco_table = ctg_vco;
- else if (IS_G45(dev_priv))
+ else if (display->platform.g45)
vco_table = elk_vco;
- else if (IS_I965GM(dev_priv))
+ else if (display->platform.i965gm)
vco_table = cl_vco;
- else if (IS_PINEVIEW(dev_priv))
+ else if (display->platform.pineview)
vco_table = pnv_vco;
- else if (IS_G33(dev_priv))
+ else if (display->platform.g33)
vco_table = blb_vco;
else
return 0;
- tmp = intel_de_read(display,
- IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = intel_de_read(display, display->platform.pineview ||
+ display->platform.mobile ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -507,7 +551,6 @@ static void gm45_get_cdclk(struct intel_display *display,
static void hsw_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 lcpll = intel_de_read(display, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -517,7 +560,7 @@ static void hsw_get_cdclk(struct intel_display *display,
cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
cdclk_config->cdclk = 450000;
- else if (IS_HASWELL_ULT(dev_priv))
+ else if (display->platform.haswell_ult)
cdclk_config->cdclk = 337500;
else
cdclk_config->cdclk = 540000;
@@ -525,8 +568,7 @@ static void hsw_get_cdclk(struct intel_display *display,
static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
+ int freq_320 = (vlv_clock_get_hpll_vco(display->drm) << 1) % 320000 != 0 ?
333333 : 320000;
/*
@@ -534,7 +576,7 @@ static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
- if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
+ if (display->platform.valleyview && min_cdclk > freq_320)
return 400000;
else if (min_cdclk > 266667)
return freq_320;
@@ -546,9 +588,7 @@ static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_VALLEYVIEW(dev_priv)) {
+ if (display->platform.valleyview) {
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
return 2;
else if (cdclk >= 266667)
@@ -561,30 +601,23 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
* hardware has shown that we just need to write the desired
* CCK divider into the Punit register.
*/
- return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+ return DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1, cdclk) - 1;
}
}
static void vlv_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
-
- cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
- cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
- CCK_DISPLAY_CLOCK_CONTROL,
- cdclk_config->vco);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ cdclk_config->vco = vlv_clock_get_hpll_vco(display->drm);
+ cdclk_config->cdclk = vlv_clock_get_cdclk(display->drm);
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+ vlv_punit_get(display->drm);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
+ vlv_punit_put(display->drm);
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
DSPFREQGUAR_SHIFT;
else
@@ -594,17 +627,16 @@ static void vlv_get_cdclk(struct intel_display *display,
static void vlv_program_pfi_credits(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned int credits, default_credits;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
default_credits = PFI_CREDIT(12);
else
default_credits = PFI_CREDIT(8);
- if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ if (display->cdclk.hw.cdclk >= vlv_clock_get_czclk(display->drm)) {
/* CHV suggested value is 31 or 63 */
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
credits = PFI_CREDIT_63;
else
credits = PFI_CREDIT(15);
@@ -634,10 +666,10 @@ static void vlv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
+ int ret;
switch (cdclk) {
case 400000:
@@ -657,45 +689,45 @@ static void vlv_set_cdclk(struct intel_display *display,
* a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
- vlv_iosf_sb_get(dev_priv,
+ vlv_iosf_sb_get(display->drm,
BIT(VLV_IOSF_SB_CCK) |
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
- DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
- 50)) {
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
- }
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
+
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (val & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
if (cdclk == 400000) {
u32 divider;
- divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
+ divider = DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1,
cdclk) - 1;
/* adjust cdclk divider */
- val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~CCK_FREQUENCY_VALUES;
val |= divider;
- vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+ vlv_cck_write(display->drm, CCK_DISPLAY_CLOCK_CONTROL, val);
- if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
- CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
- 50))
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
+ ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL),
+ (val & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
}
/* adjust self-refresh exit latency value */
- val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val = vlv_bunit_read(display->drm, BUNIT_REG_BISOC);
val &= ~0x7f;
/*
@@ -706,9 +738,9 @@ static void vlv_set_cdclk(struct intel_display *display,
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
- vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+ vlv_bunit_write(display->drm, BUNIT_REG_BISOC, val);
- vlv_iosf_sb_put(dev_priv,
+ vlv_iosf_sb_put(display->drm,
BIT(VLV_IOSF_SB_CCK) |
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
@@ -717,17 +749,17 @@ static void vlv_set_cdclk(struct intel_display *display,
vlv_program_pfi_credits(display);
- intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static void chv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
+ int ret;
switch (cdclk) {
case 333333:
@@ -746,27 +778,27 @@ static void chv_set_cdclk(struct intel_display *display,
* a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
- vlv_punit_get(dev_priv);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ vlv_punit_get(display->drm);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
- DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
- 50)) {
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
- }
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- vlv_punit_put(dev_priv);
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (val & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
+
+ vlv_punit_put(display->drm);
intel_update_cdclk(display);
vlv_program_pfi_credits(display);
- intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@@ -844,7 +876,6 @@ static void bdw_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int ret;
@@ -857,7 +888,7 @@ static void bdw_set_cdclk(struct intel_display *display,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = intel_pcode_write(display->drm, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(display->drm,
"failed to inform pcode about cdclk change\n");
@@ -871,8 +902,9 @@ static void bdw_set_cdclk(struct intel_display *display,
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 100))
+ ret = intel_de_wait_for_set_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 100);
+ if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
intel_de_rmw(display, LCPLL_CTL,
@@ -881,12 +913,13 @@ static void bdw_set_cdclk(struct intel_display *display,
intel_de_rmw(display, LCPLL_CTL,
LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ ret = intel_de_wait_for_clear_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
+ if (ret)
drm_err(display->drm, "Switching back to LCPLL failed\n");
- snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level);
+ intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level);
intel_de_write(display, CDCLK_FREQ,
DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -1079,7 +1112,7 @@ static void skl_dpll0_enable(struct intel_display *display, int vco)
intel_de_rmw(display, LCPLL1_CTL,
0, LCPLL_PLL_ENABLE);
- if (intel_de_wait_for_set(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set_ms(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
drm_err(display->drm, "DPLL0 not locked\n");
display->cdclk.hw.vco = vco;
@@ -1093,7 +1126,7 @@ static void skl_dpll0_disable(struct intel_display *display)
intel_de_rmw(display, LCPLL1_CTL,
LCPLL_PLL_ENABLE, 0);
- if (intel_de_wait_for_clear(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
drm_err(display->drm, "Couldn't disable DPLL0\n");
display->cdclk.hw.vco = 0;
@@ -1126,7 +1159,6 @@ static void skl_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u32 freq_select, cdclk_ctl;
@@ -1141,12 +1173,12 @@ static void skl_set_cdclk(struct intel_display *display,
* minimum 308MHz CDCLK.
*/
drm_WARN_ON_ONCE(display->drm,
- IS_SKYLAKE(dev_priv) && vco == 8640000);
+ display->platform.skylake && vco == 8640000);
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
drm_err(display->drm,
"Failed to inform PCU about cdclk change (%d)\n", ret);
@@ -1189,8 +1221,8 @@ static void skl_set_cdclk(struct intel_display *display,
intel_de_posting_read(display, CDCLK_CTL);
/* inform PCU of the change */
- snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
intel_update_cdclk(display);
}
@@ -1501,6 +1533,41 @@ static const struct intel_cdclk_vals xe3lpd_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals xe3p_lpd_cdclk_table[] = {
+ { .refclk = 38400, .cdclk = 151200, .ratio = 21, .waveform = 0xa4a4 },
+ { .refclk = 38400, .cdclk = 176400, .ratio = 21, .waveform = 0xaa54 },
+ { .refclk = 38400, .cdclk = 201600, .ratio = 21, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 226800, .ratio = 21, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 252000, .ratio = 21, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 277200, .ratio = 21, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 302400, .ratio = 21, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 327600, .ratio = 21, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 352800, .ratio = 21, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 378000, .ratio = 21, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 403200, .ratio = 21, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 422400, .ratio = 22, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 441600, .ratio = 23, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 460800, .ratio = 24, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 499200, .ratio = 26, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 518400, .ratio = 27, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 537600, .ratio = 28, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 576000, .ratio = 30, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 595200, .ratio = 31, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 614400, .ratio = 32, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 633600, .ratio = 33, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 672000, .ratio = 35, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 691200, .ratio = 36, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 710400, .ratio = 37, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 729600, .ratio = 38, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 748800, .ratio = 39, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 768000, .ratio = 40, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 787200, .ratio = 41, .waveform = 0xffff },
+ {}
+};
+
static const int cdclk_squash_len = 16;
static int cdclk_squash_divider(u16 waveform)
@@ -1528,7 +1595,7 @@ static int bxt_calc_cdclk(struct intel_display *display, int min_cdclk)
drm_WARN(display->drm, 1,
"Cannot satisfy minimum cdclk %d with refclk %u\n",
min_cdclk, display->cdclk.hw.ref);
- return 0;
+ return display->cdclk.max_cdclk_freq;
}
static int bxt_calc_cdclk_pll_vco(struct intel_display *display, int cdclk)
@@ -1661,10 +1728,9 @@ static void icl_readout_refclk(struct intel_display *display,
static void bxt_de_pll_readout(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val, ratio;
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
cdclk_config->ref = 38400;
else if (DISPLAY_VER(display) >= 11)
icl_readout_refclk(display, cdclk_config);
@@ -1767,8 +1833,8 @@ static void bxt_de_pll_disable(struct intel_display *display)
intel_de_write(display, BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_de_wait_for_clear(display,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for DE PLL unlock\n");
display->cdclk.hw.vco = 0;
@@ -1784,8 +1850,8 @@ static void bxt_de_pll_enable(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_de_wait_for_set(display,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_set_ms(display,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for DE PLL lock\n");
display->cdclk.hw.vco = vco;
@@ -1797,7 +1863,7 @@ static void icl_cdclk_pll_disable(struct intel_display *display)
BXT_DE_PLL_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for CDCLK PLL unlock\n");
display->cdclk.hw.vco = 0;
@@ -1815,7 +1881,7 @@ static void icl_cdclk_pll_enable(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_set_ms(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for CDCLK PLL lock\n");
display->cdclk.hw.vco = vco;
@@ -1835,8 +1901,8 @@ static void adlp_cdclk_pll_crawl(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE,
- BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
+ if (intel_de_wait_for_set_ms(display, BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
drm_err(display->drm, "timeout waiting for FREQ change request ack\n");
val &= ~BXT_DE_PLL_FREQ_REQ;
@@ -1974,9 +2040,7 @@ int intel_mdclk_cdclk_ratio(struct intel_display *display,
static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
intel_mdclk_cdclk_ratio(display, cdclk_config),
cdclk_config->joined_mbus);
}
@@ -2056,11 +2120,9 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct intel_display *displa
static bool pll_enable_wa_needed(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
return (DISPLAY_VERx100(display) == 2000 ||
DISPLAY_VERx100(display) == 1400 ||
- IS_DG2(dev_priv)) &&
+ display->platform.dg2) &&
display->cdclk.hw.vco > 0;
}
@@ -2068,7 +2130,6 @@ static u32 bxt_cdclk_ctl(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u16 waveform;
@@ -2083,7 +2144,7 @@ static u32 bxt_cdclk_ctl(struct intel_display *display,
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
- if ((IS_GEMINILAKE(i915) || IS_BROXTON(i915)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
@@ -2132,7 +2193,6 @@ static void bxt_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_config mid_cdclk_config;
int cdclk = cdclk_config->cdclk;
int ret = 0;
@@ -2143,21 +2203,21 @@ static void bxt_set_cdclk(struct intel_display *display,
* mailbox communication, skip
* this step.
*/
- if (DISPLAY_VER(display) >= 14 || IS_DG2(dev_priv))
- /* NOOP */;
+ if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
+ ; /* NOOP */
else if (DISPLAY_VER(display) >= 11)
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
else
/*
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
+ ret = intel_pcode_write_timeout(display->drm,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2);
if (ret) {
drm_err(display->drm,
@@ -2185,9 +2245,9 @@ static void bxt_set_cdclk(struct intel_display *display,
* NOOP - No Pcode communication needed for
* Display versions 14 and beyond
*/;
- else if (DISPLAY_VER(display) >= 11 && !IS_DG2(dev_priv))
- ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ else if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
+ ret = intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
if (DISPLAY_VER(display) < 11) {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -2195,10 +2255,9 @@ static void bxt_set_cdclk(struct intel_display *display,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level,
- 150, 2);
+ ret = intel_pcode_write_timeout(display->drm,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level, 2);
}
if (ret) {
drm_err(display->drm,
@@ -2249,7 +2308,7 @@ static void bxt_sanitize_cdclk(struct intel_display *display)
/*
* Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
+ * dividers both syncing to an active pipe, or asynchronously
* (PIPE_NONE).
*/
cdctl &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE);
@@ -2317,9 +2376,7 @@ static void bxt_cdclk_uninit_hw(struct intel_display *display)
*/
void intel_cdclk_init_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915))
+ if (DISPLAY_VER(display) >= 10 || display->platform.broxton)
bxt_cdclk_init_hw(display);
else if (DISPLAY_VER(display) == 9)
skl_cdclk_init_hw(display);
@@ -2334,9 +2391,7 @@ void intel_cdclk_init_hw(struct intel_display *display)
*/
void intel_cdclk_uninit_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915))
+ if (DISPLAY_VER(display) >= 10 || display->platform.broxton)
bxt_cdclk_uninit_hw(display);
else if (DISPLAY_VER(display) == 9)
skl_cdclk_uninit_hw(display);
@@ -2437,10 +2492,8 @@ static bool intel_cdclk_can_cd2x_update(struct intel_display *display,
const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/* Older hw doesn't have the capability */
- if (DISPLAY_VER(display) < 10 && !IS_BROXTON(dev_priv))
+ if (DISPLAY_VER(display) < 10 && !display->platform.broxton)
return false;
/*
@@ -2490,11 +2543,10 @@ static void intel_pcode_notify(struct intel_display *display,
bool cdclk_update_valid,
bool pipe_count_update_valid)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 update_mask = 0;
- if (!IS_DG2(i915))
+ if (!display->platform.dg2)
return;
update_mask = DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, active_pipe_count, voltage_level);
@@ -2505,11 +2557,11 @@ static void intel_pcode_notify(struct intel_display *display,
if (pipe_count_update_valid)
update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID;
- ret = skl_pcode_request(&i915->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE |
- update_mask,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE |
+ update_mask,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret)
drm_err(display->drm,
"Failed to inform PCU about display config (err %d)\n",
@@ -2520,7 +2572,6 @@ static void intel_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe, const char *context)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
if (!intel_cdclk_changed(&display->cdclk.hw, cdclk_config))
@@ -2537,7 +2588,7 @@ static void intel_set_cdclk(struct intel_display *display,
intel_psr_pause(intel_dp);
}
- intel_audio_cdclk_change_pre(dev_priv);
+ intel_audio_cdclk_change_pre(display);
/*
* Lock aux/gmbus while we change cdclk in case those
@@ -2567,7 +2618,7 @@ static void intel_set_cdclk(struct intel_display *display,
intel_psr_resume(intel_dp);
}
- intel_audio_cdclk_change_post(dev_priv);
+ intel_audio_cdclk_change_post(display);
if (drm_WARN(display->drm,
intel_cdclk_changed(&display->cdclk.hw, cdclk_config),
@@ -2577,6 +2628,12 @@ static void intel_set_cdclk(struct intel_display *display,
}
}
+static bool dg2_power_well_count(struct intel_display *display,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ return display->platform.dg2 ? hweight8(cdclk_state->active_pipes) : 0;
+}
+
static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
@@ -2589,16 +2646,16 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual) &&
- new_cdclk_state->active_pipes ==
- old_cdclk_state->active_pipes)
+ dg2_power_well_count(display, old_cdclk_state) ==
+ dg2_power_well_count(display, new_cdclk_state))
return;
/* According to "Sequence Before Frequency Change", voltage level set to 0x3 */
voltage_level = DISPLAY_TO_PCODE_VOLTAGE_MAX;
change_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
- update_pipe_count = hweight8(new_cdclk_state->active_pipes) >
- hweight8(old_cdclk_state->active_pipes);
+ update_pipe_count = dg2_power_well_count(display, new_cdclk_state) >
+ dg2_power_well_count(display, old_cdclk_state);
/*
* According to "Sequence Before Frequency Change",
@@ -2616,7 +2673,7 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
* no action if it is decreasing, before the change
*/
if (update_pipe_count)
- num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+ num_active_pipes = dg2_power_well_count(display, new_cdclk_state);
intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
change_cdclk, update_pipe_count);
@@ -2636,8 +2693,8 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
voltage_level = new_cdclk_state->actual.voltage_level;
update_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
- update_pipe_count = hweight8(new_cdclk_state->active_pipes) <
- hweight8(old_cdclk_state->active_pipes);
+ update_pipe_count = dg2_power_well_count(display, new_cdclk_state) <
+ dg2_power_well_count(display, old_cdclk_state);
/*
* According to "Sequence After Frequency Change",
@@ -2653,7 +2710,7 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
* no action if it is increasing, after the change
*/
if (update_pipe_count)
- num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+ num_active_pipes = dg2_power_well_count(display, new_cdclk_state);
intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
update_cdclk, update_pipe_count);
@@ -2681,7 +2738,6 @@ void
intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
@@ -2689,11 +2745,14 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
struct intel_cdclk_config cdclk_config;
enum pipe pipe;
+ if (!new_cdclk_state)
+ return;
+
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
return;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
intel_cdclk_pcode_pre_notify(state);
if (new_cdclk_state->disable_pipes) {
@@ -2735,18 +2794,20 @@ void
intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe;
+ if (!new_cdclk_state)
+ return;
+
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
return;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
intel_cdclk_pcode_post_notify(state);
if (!new_cdclk_state->disable_pipes &&
@@ -2770,27 +2831,29 @@ static int intel_cdclk_ppc(struct intel_display *display, bool double_wide)
/* max pixel rate as % of CDCLK (not accounting for PPC) */
static int intel_cdclk_guardband(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ display->platform.broadwell || display->platform.haswell)
return 100;
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
return 95;
else
return 90;
}
-static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+static int _intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state, int pixel_rate)
{
struct intel_display *display = to_intel_display(crtc_state);
int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
int guardband = intel_cdclk_guardband(display);
- int pixel_rate = crtc_state->pixel_rate;
return DIV_ROUND_UP(pixel_rate * 100, guardband * ppc);
}
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ return _intel_pixel_rate_to_cdclk(crtc_state, crtc_state->pixel_rate);
+}
+
static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2799,12 +2862,12 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
int min_cdclk = 0;
for_each_intel_plane_on_crtc(display->drm, crtc, plane)
- min_cdclk = max(min_cdclk, crtc_state->min_cdclk[plane->id]);
+ min_cdclk = max(min_cdclk, crtc_state->plane_min_cdclk[plane->id]);
return min_cdclk;
}
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
{
int min_cdclk;
@@ -2812,6 +2875,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
+ min_cdclk = max(min_cdclk, intel_crtc_bw_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_fbc_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, hsw_ips_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, intel_audio_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, vlv_dsi_min_cdclk(crtc_state));
@@ -2821,52 +2886,110 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+static int intel_cdclk_update_crtc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_cdclk_state *cdclk_state =
- intel_atomic_get_new_cdclk_state(state);
- const struct intel_bw_state *bw_state;
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- int min_cdclk, i;
- enum pipe pipe;
+ struct intel_cdclk_state *cdclk_state;
+ bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state);
+ int ret;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- int ret;
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (min_cdclk < 0)
- return min_cdclk;
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
- if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
- continue;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ old_min_cdclk = cdclk_state->min_cdclk[crtc->pipe];
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
- }
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
- bw_state = intel_atomic_get_new_bw_state(state);
- if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state);
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
- if (cdclk_state->bw_min_cdclk != min_cdclk) {
- int ret;
+ cdclk_state->min_cdclk[crtc->pipe] = new_min_cdclk;
- cdclk_state->bw_min_cdclk = min_cdclk;
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
- }
- }
+ *need_cdclk_calc = true;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] min cdclk: %d kHz -> %d kHz\n",
+ crtc->base.base.id, crtc->base.name,
+ old_min_cdclk, new_min_cdclk);
- min_cdclk = max(cdclk_state->force_min_cdclk,
- cdclk_state->bw_min_cdclk);
+ return 0;
+}
+
+int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_cdclk_state *cdclk_state;
+ bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state);
+ int ret;
+
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ old_min_cdclk = cdclk_state->dbuf_bw_min_cdclk;
+
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
+
+ cdclk_state->dbuf_bw_min_cdclk = new_min_cdclk;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+
+ *need_cdclk_calc = true;
+
+ drm_dbg_kms(display->drm,
+ "dbuf bandwidth min cdclk: %d kHz -> %d kHz\n",
+ old_min_cdclk, new_min_cdclk);
+
+ return 0;
+}
+
+static bool glk_cdclk_audio_wa_needed(struct intel_display *display,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ return display->platform.geminilake &&
+ cdclk_state->enabled_pipes &&
+ !is_power_of_2(cdclk_state->enabled_pipes);
+}
+
+static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_cdclk_state *cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe;
+ int min_cdclk;
+
+ min_cdclk = cdclk_state->force_min_cdclk;
+ min_cdclk = max(min_cdclk, cdclk_state->dbuf_bw_min_cdclk);
for_each_pipe(display, pipe)
min_cdclk = max(min_cdclk, cdclk_state->min_cdclk[pipe]);
@@ -2878,8 +3001,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* by changing the cd2x divider (see glk_cdclk_table[]) and
* thus a full modeset won't be needed then.
*/
- if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
- !is_power_of_2(cdclk_state->active_pipes))
+ if (glk_cdclk_audio_wa_needed(display, cdclk_state))
min_cdclk = max(min_cdclk, 2 * 96000);
if (min_cdclk > display->cdclk.max_cdclk_freq) {
@@ -3165,41 +3287,69 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
return to_intel_cdclk_state(cdclk_state);
}
-int intel_cdclk_atomic_check(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
+static int intel_cdclk_modeset_checks(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
- const struct intel_cdclk_state *new_cdclk_state;
- struct intel_plane_state __maybe_unused *plane_state;
- struct intel_plane *plane;
+ struct intel_cdclk_state *new_cdclk_state;
int ret;
- int i;
- /*
- * active_planes bitmask has been updated, and potentially affected
- * planes are part of the state. We can now compute the minimum cdclk
- * for each plane.
- */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
- if (ret)
- return ret;
- }
+ if (!intel_any_crtc_enable_changed(state) &&
+ !intel_any_crtc_active_changed(state))
+ return 0;
+
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ new_cdclk_state->enabled_pipes =
+ intel_calc_enabled_pipes(state, old_cdclk_state->enabled_pipes);
+
+ new_cdclk_state->active_pipes =
+ intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
- ret = intel_bw_calc_min_cdclk(state, need_cdclk_calc);
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (!old_cdclk_state->active_pipes != !new_cdclk_state->active_pipes)
+ *need_cdclk_calc = true;
- if (new_cdclk_state &&
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
+ if (glk_cdclk_audio_wa_needed(display, old_cdclk_state) !=
+ glk_cdclk_audio_wa_needed(display, new_cdclk_state))
+ *need_cdclk_calc = true;
+
+ if (dg2_power_well_count(display, old_cdclk_state) !=
+ dg2_power_well_count(display, new_cdclk_state))
*need_cdclk_calc = true;
return 0;
}
+static int intel_crtcs_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = intel_cdclk_update_crtc_min_cdclk(state, crtc,
+ old_crtc_state->min_cdclk,
+ new_crtc_state->min_cdclk,
+ need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus)
{
struct intel_cdclk_state *cdclk_state;
@@ -3216,14 +3366,13 @@ int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joi
int intel_cdclk_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
if (!cdclk_state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &display->cdclk.obj,
+ intel_atomic_global_obj_init(display, &display->cdclk.obj,
&cdclk_state->base, &intel_cdclk_funcs);
return 0;
@@ -3233,19 +3382,17 @@ static bool intel_cdclk_need_serialize(struct intel_display *display,
const struct intel_cdclk_state *old_cdclk_state,
const struct intel_cdclk_state *new_cdclk_state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) !=
- hweight8(new_cdclk_state->active_pipes);
- bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual,
- &new_cdclk_state->actual);
/*
- * We need to poke hw for gen >= 12, because we notify PCode if
+ * We need to poke hw for DG2, because we notify PCode if
* pipe power well count changes.
*/
- return cdclk_changed || (IS_DG2(i915) && power_well_cnt_changed);
+ return intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual) ||
+ dg2_power_well_count(display, old_cdclk_state) !=
+ dg2_power_well_count(display, new_cdclk_state);
}
-int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
@@ -3259,9 +3406,6 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state->active_pipes =
- intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
-
ret = intel_cdclk_modeset_calc_cdclk(state);
if (ret)
return ret;
@@ -3274,9 +3418,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk ||
- intel_cdclk_changed(&old_cdclk_state->logical,
+ } else if (intel_cdclk_changed(&old_cdclk_state->logical,
&new_cdclk_state->logical)) {
ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
@@ -3358,6 +3500,81 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
+int intel_cdclk_atomic_check(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *old_cdclk_state;
+ struct intel_cdclk_state *new_cdclk_state;
+ bool need_cdclk_calc = false;
+ int ret;
+
+ ret = intel_cdclk_modeset_checks(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ ret = intel_crtcs_calc_min_cdclk(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ ret = intel_dbuf_bw_calc_min_cdclk(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) {
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
+ if (ret)
+ return ret;
+
+ need_cdclk_calc = true;
+ }
+
+ if (need_cdclk_calc) {
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_cdclk_update_hw_state(struct intel_display *display)
+{
+ const struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(display->cdclk.obj.state);
+ struct intel_crtc *crtc;
+
+ cdclk_state->enabled_pipes = 0;
+ cdclk_state->active_pipes = 0;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (crtc_state->hw.enable)
+ cdclk_state->enabled_pipes |= BIT(pipe);
+ if (crtc_state->hw.active)
+ cdclk_state->active_pipes |= BIT(pipe);
+
+ cdclk_state->min_cdclk[pipe] = crtc_state->min_cdclk;
+ cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
+ }
+
+ cdclk_state->dbuf_bw_min_cdclk = intel_dbuf_bw_min_cdclk(display, dbuf_bw_state);
+}
+
+void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_cdclk_update_hw_state(display);
+}
+
static int intel_compute_max_dotclk(struct intel_display *display)
{
int ppc = intel_cdclk_ppc(display, HAS_DOUBLE_WIDE(display));
@@ -3377,11 +3594,13 @@ static int intel_compute_max_dotclk(struct intel_display *display)
*/
void intel_update_max_cdclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VER(display) >= 35) {
+ display->cdclk.max_cdclk_freq = 787200;
+ } else if (DISPLAY_VERx100(display) >= 3002) {
+ display->cdclk.max_cdclk_freq = 480000;
+ } else if (DISPLAY_VER(display) >= 30) {
display->cdclk.max_cdclk_freq = 691200;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
if (display->cdclk.hw.ref == 24000)
display->cdclk.max_cdclk_freq = 552000;
else
@@ -3391,9 +3610,9 @@ void intel_update_max_cdclk(struct intel_display *display)
display->cdclk.max_cdclk_freq = 648000;
else
display->cdclk.max_cdclk_freq = 652800;
- } else if (IS_GEMINILAKE(dev_priv)) {
+ } else if (display->platform.geminilake) {
display->cdclk.max_cdclk_freq = 316800;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (display->platform.broxton) {
display->cdclk.max_cdclk_freq = 624000;
} else if (DISPLAY_VER(display) == 9) {
u32 limit = intel_de_read(display, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
@@ -3417,7 +3636,7 @@ void intel_update_max_cdclk(struct intel_display *display)
max_cdclk = 308571;
display->cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
/*
* FIXME with extra cooling we can allow
* 540 MHz for ULX and 675 Mhz for ULT.
@@ -3426,15 +3645,15 @@ void intel_update_max_cdclk(struct intel_display *display)
*/
if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT)
display->cdclk.max_cdclk_freq = 450000;
- else if (IS_BROADWELL_ULX(dev_priv))
+ else if (display->platform.broadwell_ulx)
display->cdclk.max_cdclk_freq = 450000;
- else if (IS_BROADWELL_ULT(dev_priv))
+ else if (display->platform.broadwell_ult)
display->cdclk.max_cdclk_freq = 540000;
else
display->cdclk.max_cdclk_freq = 675000;
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ } else if (display->platform.cherryview) {
display->cdclk.max_cdclk_freq = 320000;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
display->cdclk.max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
@@ -3458,8 +3677,6 @@ void intel_update_max_cdclk(struct intel_display *display)
*/
void intel_update_cdclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
intel_cdclk_get_cdclk(display, &display->cdclk.hw);
/*
@@ -3468,7 +3685,7 @@ void intel_update_cdclk(struct intel_display *display)
* of cdclk that generates 4MHz reference clock freq which is used to
* generate GMBus clock. This will vary with the cdclk freq.
*/
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
intel_de_write(display, GMBUSFREQ_VLV,
DIV_ROUND_UP(display->cdclk.hw.cdclk, 1000));
}
@@ -3487,7 +3704,6 @@ static int dg1_rawclk(struct intel_display *display)
static int cnp_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int divider, fraction;
u32 rawclk;
@@ -3507,7 +3723,7 @@ static int cnp_rawclk(struct intel_display *display)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@@ -3520,21 +3736,12 @@ static int pch_rawclk(struct intel_display *display)
return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
-static int vlv_hrawclk(struct intel_display *display)
-{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- /* RAWCLK_FREQ_VLV register updated from power well code */
- return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
- CCK_DISPLAY_REF_CLOCK_CONTROL);
-}
-
static int i9xx_hrawclk(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
/* hrawclock is 1/4 the FSB frequency */
- return DIV_ROUND_CLOSEST(i9xx_fsb_freq(i915), 4);
+ return DIV_ROUND_CLOSEST(intel_fsb_freq(i915), 4);
}
/**
@@ -3546,24 +3753,23 @@ static int i9xx_hrawclk(struct intel_display *display)
*/
u32 intel_read_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 freq;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
/*
* MTL always uses a 38.4 MHz rawclk. The bspec tells us
* "RAWCLK_FREQ defaults to the values for 38.4 and does
* not need to be programmed."
*/
freq = 38400;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
freq = dg1_rawclk(display);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_CNP)
freq = cnp_rawclk(display);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
freq = pch_rawclk(display);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- freq = vlv_hrawclk(display);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ freq = vlv_clock_get_hrawclk(display->drm);
else if (DISPLAY_VER(display) >= 3)
freq = i9xx_hrawclk(display);
else
@@ -3588,9 +3794,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info);
void intel_cdclk_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_cdclk_info", 0444, display->drm->debugfs_root,
display, &i915_cdclk_info_fops);
}
@@ -3743,9 +3947,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
*/
void intel_init_cdclk_hooks(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VER(display) >= 35) {
+ display->funcs.cdclk = &xe3lpd_cdclk_funcs;
+ display->cdclk.table = xe3p_lpd_cdclk_table;
+ } else if (DISPLAY_VER(display) >= 30) {
display->funcs.cdclk = &xe3lpd_cdclk_funcs;
display->cdclk.table = xe3lpd_cdclk_table;
} else if (DISPLAY_VER(display) >= 20) {
@@ -3757,80 +3962,80 @@ void intel_init_cdclk_hooks(struct intel_display *display)
} else if (DISPLAY_VER(display) >= 14) {
display->funcs.cdclk = &rplu_cdclk_funcs;
display->cdclk.table = mtl_cdclk_table;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
display->funcs.cdclk = &tgl_cdclk_funcs;
display->cdclk.table = dg2_cdclk_table;
- } else if (IS_ALDERLAKE_P(dev_priv)) {
+ } else if (display->platform.alderlake_p) {
/* Wa_22011320316:adl-p[a0] */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
display->cdclk.table = adlp_a_step_cdclk_table;
display->funcs.cdclk = &tgl_cdclk_funcs;
- } else if (IS_RAPTORLAKE_U(dev_priv)) {
+ } else if (display->platform.alderlake_p_raptorlake_u) {
display->cdclk.table = rplu_cdclk_table;
display->funcs.cdclk = &rplu_cdclk_funcs;
} else {
display->cdclk.table = adlp_cdclk_table;
display->funcs.cdclk = &tgl_cdclk_funcs;
}
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (display->platform.rocketlake) {
display->funcs.cdclk = &tgl_cdclk_funcs;
display->cdclk.table = rkl_cdclk_table;
} else if (DISPLAY_VER(display) >= 12) {
display->funcs.cdclk = &tgl_cdclk_funcs;
display->cdclk.table = icl_cdclk_table;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
display->funcs.cdclk = &ehl_cdclk_funcs;
display->cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(display) >= 11) {
display->funcs.cdclk = &icl_cdclk_funcs;
display->cdclk.table = icl_cdclk_table;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
display->funcs.cdclk = &bxt_cdclk_funcs;
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
display->cdclk.table = glk_cdclk_table;
else
display->cdclk.table = bxt_cdclk_table;
} else if (DISPLAY_VER(display) == 9) {
display->funcs.cdclk = &skl_cdclk_funcs;
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
display->funcs.cdclk = &bdw_cdclk_funcs;
- } else if (IS_HASWELL(dev_priv)) {
+ } else if (display->platform.haswell) {
display->funcs.cdclk = &hsw_cdclk_funcs;
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ } else if (display->platform.cherryview) {
display->funcs.cdclk = &chv_cdclk_funcs;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
display->funcs.cdclk = &vlv_cdclk_funcs;
- } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ } else if (display->platform.sandybridge || display->platform.ivybridge) {
display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
- } else if (IS_IRONLAKE(dev_priv)) {
+ } else if (display->platform.ironlake) {
display->funcs.cdclk = &ilk_cdclk_funcs;
- } else if (IS_GM45(dev_priv)) {
+ } else if (display->platform.gm45) {
display->funcs.cdclk = &gm45_cdclk_funcs;
- } else if (IS_G45(dev_priv)) {
+ } else if (display->platform.g45) {
display->funcs.cdclk = &g33_cdclk_funcs;
- } else if (IS_I965GM(dev_priv)) {
+ } else if (display->platform.i965gm) {
display->funcs.cdclk = &i965gm_cdclk_funcs;
- } else if (IS_I965G(dev_priv)) {
+ } else if (display->platform.i965g) {
display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
display->funcs.cdclk = &pnv_cdclk_funcs;
- } else if (IS_G33(dev_priv)) {
+ } else if (display->platform.g33) {
display->funcs.cdclk = &g33_cdclk_funcs;
- } else if (IS_I945GM(dev_priv)) {
+ } else if (display->platform.i945gm) {
display->funcs.cdclk = &i945gm_cdclk_funcs;
- } else if (IS_I945G(dev_priv)) {
+ } else if (display->platform.i945g) {
display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
- } else if (IS_I915GM(dev_priv)) {
+ } else if (display->platform.i915gm) {
display->funcs.cdclk = &i915gm_cdclk_funcs;
- } else if (IS_I915G(dev_priv)) {
+ } else if (display->platform.i915g) {
display->funcs.cdclk = &i915g_cdclk_funcs;
- } else if (IS_I865G(dev_priv)) {
+ } else if (display->platform.i865g) {
display->funcs.cdclk = &i865g_cdclk_funcs;
- } else if (IS_I85X(dev_priv)) {
+ } else if (display->platform.i85x) {
display->funcs.cdclk = &i85x_cdclk_funcs;
- } else if (IS_I845G(dev_priv)) {
+ } else if (display->platform.i845g) {
display->funcs.cdclk = &i845g_cdclk_funcs;
- } else if (IS_I830(dev_priv)) {
+ } else if (display->platform.i830) {
display->funcs.cdclk = &i830_cdclk_funcs;
}
@@ -3838,3 +4043,127 @@ void intel_init_cdclk_hooks(struct intel_display *display)
"Unknown platform. Assuming i830\n"))
display->funcs.cdclk = &i830_cdclk_funcs;
}
+
+int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->logical.cdclk;
+}
+
+int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->actual.cdclk;
+}
+
+int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->actual.voltage_level;
+}
+
+int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe)
+{
+ return cdclk_state->min_cdclk[pipe];
+}
+
+bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
+
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ (new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk ||
+ new_cdclk_state->actual.voltage_level != old_cdclk_state->actual.voltage_level))
+ return true;
+
+ return false;
+}
+
+void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk)
+{
+ cdclk_state->force_min_cdclk = force_min_cdclk;
+}
+
+void intel_cdclk_read_hw(struct intel_display *display)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
+
+ intel_update_cdclk(display);
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
+ cdclk_state->actual = display->cdclk.hw;
+ cdclk_state->logical = display->cdclk.hw;
+}
+
+static int calc_cdclk(const struct intel_crtc_state *crtc_state, int min_cdclk)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 10 || display->platform.broxton) {
+ return bxt_calc_cdclk(display, min_cdclk);
+ } else if (DISPLAY_VER(display) == 9) {
+ int vco;
+
+ vco = display->cdclk.skl_preferred_vco_freq;
+ if (vco == 0)
+ vco = 8100000;
+
+ return skl_calc_cdclk(min_cdclk, vco);
+ } else if (display->platform.broadwell) {
+ return bdw_calc_cdclk(min_cdclk);
+ } else if (display->platform.cherryview || display->platform.valleyview) {
+ return vlv_calc_cdclk(display, min_cdclk);
+ } else {
+ return display->cdclk.max_cdclk_freq;
+ }
+}
+
+static unsigned int _intel_cdclk_prefill_adj(const struct intel_crtc_state *crtc_state,
+ int clock, int min_cdclk)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+ int cdclk = calc_cdclk(crtc_state, min_cdclk);
+
+ return min(0x10000, DIV_ROUND_UP_ULL((u64)clock << 16, ppc * cdclk));
+}
+
+unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /* FIXME use the actual min_cdclk for the pipe here */
+ return intel_cdclk_prefill_adjustment_worst(crtc_state);
+}
+
+unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ int clock = crtc_state->hw.pipe_mode.crtc_clock;
+ int min_cdclk;
+
+ /*
+ * FIXME could perhaps consider a few more of the factors
+ * that go the per-crtc min_cdclk. Namely anything that
+ * only changes during full modesets.
+ *
+ * FIXME this assumes 1:1 scaling, but the other _worst() stuff
+ * assumes max downscaling, so the final result will be
+ * unrealistically bad. Figure out where the actual maximum value
+ * lies and use that to compute a more realistic worst case
+ * estimate...
+ */
+ min_cdclk = _intel_pixel_rate_to_cdclk(crtc_state, clock);
+
+ return _intel_cdclk_prefill_adj(crtc_state, clock, min_cdclk);
+}
+
+int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state,
+ unsigned int prefill_lines_unadjusted,
+ unsigned int prefill_lines_available)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, prefill_lines_unadjusted),
+ ppc * prefill_lines_available);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 6b0e7a41eba3..1ff7d078b42c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,10 +8,10 @@
#include <linux/types.h>
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
-
+enum pipe;
struct intel_atomic_state;
+struct intel_cdclk_state;
+struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
@@ -22,43 +22,6 @@ struct intel_cdclk_config {
bool joined_mbus;
};
-struct intel_cdclk_state {
- struct intel_global_state base;
-
- /*
- * Logical configuration of cdclk (used for all scaling,
- * watermark, etc. calculations and checks). This is
- * computed as if all enabled crtcs were active.
- */
- struct intel_cdclk_config logical;
-
- /*
- * Actual configuration of cdclk, can be different from the
- * logical configuration only when all crtc's are DPMS off.
- */
- struct intel_cdclk_config actual;
-
- /* minimum acceptable cdclk to satisfy bandwidth requirements */
- int bw_min_cdclk;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
-
- /* pipe to which cd2x update is synchronized */
- enum pipe pipe;
-
- /* forced minimum cdclk for glk+ audio w/a */
- int force_min_cdclk;
-
- /* bitmask of active pipes */
- u8 active_pipes;
-
- /* update cdclk with pipes disabled */
- bool disable_pipes;
-};
-
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init_hw(struct intel_display *display);
void intel_cdclk_uninit_hw(struct intel_display *display);
void intel_init_cdclk_hooks(struct intel_display *display);
@@ -75,14 +38,17 @@ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_cdclk_dump_config(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
const char *context);
-int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
void intel_cdclk_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
-int intel_cdclk_atomic_check(struct intel_atomic_state *state,
- bool *need_cdclk_calc);
+int intel_cdclk_atomic_check(struct intel_atomic_state *state);
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
+void intel_cdclk_update_hw_state(struct intel_display *display);
+void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
+int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc);
#define to_intel_cdclk_state(global_state) \
container_of_const((global_state), struct intel_cdclk_state, base)
@@ -95,4 +61,20 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
int intel_cdclk_init(struct intel_display *display);
void intel_cdclk_debugfs_register(struct intel_display *display);
+int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe);
+bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state);
+void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk);
+void intel_cdclk_read_hw(struct intel_display *display);
+
+unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state,
+ unsigned int prefill_lines_unadjusted,
+ unsigned int prefill_lines_available);
+
+int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
new file mode 100644
index 000000000000..165138b95cb2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include "intel_cmtg.h"
+#include "intel_cmtg_regs.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_device.h"
+#include "intel_display_power.h"
+#include "intel_display_regs.h"
+
+/**
+ * DOC: Common Primary Timing Generator (CMTG)
+ *
+ * The CMTG is a timing generator that runs in parallel to transcoders timing
+ * generators (TG) to provide a synchronization mechanism where CMTG acts as
+ * primary and transcoders TGs act as secondary to the CMTG. The CMTG outputs
+ * its TG start and frame sync signals to the transcoders that are configured
+ * as secondary, which use those signals to synchronize their own timing with
+ * the CMTG's.
+ *
+ * The CMTG can be used only with eDP or MIPI command mode and supports the
+ * following use cases:
+ *
+ * - Dual eDP: The CMTG can be used to keep two eDP TGs in sync when on a
+ * dual eDP configuration (with or without PSR/PSR2 enabled).
+ *
+ * - Single eDP as secondary: It is also possible to use a single eDP
+ * configuration with the transcoder TG as secondary to the CMTG. That would
+ * allow a flow that would not require a modeset on the existing eDP when a
+ * new eDP is added for a dual eDP configuration with CMTG.
+ *
+ * - DC6v: In DC6v, the transcoder might be off but the CMTG keeps running to
+ * maintain frame timings. When exiting DC6v, the transcoder TG then is
+ * synced back the CMTG.
+ *
+ * Currently, the driver does not use the CMTG, but we need to make sure that
+ * we disable it in case we inherit a display configuration with it enabled.
+ */
+
+/*
+ * We describe here only the minimum data required to allow us to properly
+ * disable the CMTG if necessary.
+ */
+struct intel_cmtg_config {
+ bool cmtg_a_enable;
+ /*
+ * Xe2_LPD adds a second CMTG that can be used for dual eDP async mode.
+ */
+ bool cmtg_b_enable;
+ bool trans_a_secondary;
+ bool trans_b_secondary;
+};
+
+static bool intel_cmtg_has_cmtg_b(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 20;
+}
+
+static bool intel_cmtg_has_clock_sel(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 14;
+}
+
+static void intel_cmtg_dump_config(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ drm_dbg_kms(display->drm,
+ "CMTG readout: CMTG A: %s, CMTG B: %s, Transcoder A secondary: %s, Transcoder B secondary: %s\n",
+ str_enabled_disabled(cmtg_config->cmtg_a_enable),
+ intel_cmtg_has_cmtg_b(display) ? str_enabled_disabled(cmtg_config->cmtg_b_enable) : "n/a",
+ str_yes_no(cmtg_config->trans_a_secondary),
+ str_yes_no(cmtg_config->trans_b_secondary));
+}
+
+static bool intel_cmtg_transcoder_is_secondary(struct intel_display *display,
+ enum transcoder trans)
+{
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 val = 0;
+
+ if (!HAS_TRANSCODER(display, trans))
+ return false;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(trans);
+
+ with_intel_display_power_if_enabled(display, power_domain, wakeref)
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, trans));
+
+ return val & CMTG_SECONDARY_MODE;
+}
+
+static void intel_cmtg_get_config(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ u32 val;
+
+ val = intel_de_read(display, TRANS_CMTG_CTL_A);
+ cmtg_config->cmtg_a_enable = val & CMTG_ENABLE;
+
+ if (intel_cmtg_has_cmtg_b(display)) {
+ val = intel_de_read(display, TRANS_CMTG_CTL_B);
+ cmtg_config->cmtg_b_enable = val & CMTG_ENABLE;
+ }
+
+ cmtg_config->trans_a_secondary = intel_cmtg_transcoder_is_secondary(display, TRANSCODER_A);
+ cmtg_config->trans_b_secondary = intel_cmtg_transcoder_is_secondary(display, TRANSCODER_B);
+}
+
+static bool intel_cmtg_disable_requires_modeset(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ if (DISPLAY_VER(display) >= 20)
+ return false;
+
+ return cmtg_config->trans_a_secondary || cmtg_config->trans_b_secondary;
+}
+
+static void intel_cmtg_disable(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ u32 clk_sel_clr = 0;
+ u32 clk_sel_set = 0;
+
+ if (cmtg_config->trans_a_secondary)
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_A),
+ CMTG_SECONDARY_MODE, 0);
+
+ if (cmtg_config->trans_b_secondary)
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_B),
+ CMTG_SECONDARY_MODE, 0);
+
+ if (cmtg_config->cmtg_a_enable) {
+ drm_dbg_kms(display->drm, "Disabling CMTG A\n");
+ intel_de_rmw(display, TRANS_CMTG_CTL_A, CMTG_ENABLE, 0);
+ clk_sel_clr |= CMTG_CLK_SEL_A_MASK;
+ clk_sel_set |= CMTG_CLK_SEL_A_DISABLED;
+ }
+
+ if (cmtg_config->cmtg_b_enable) {
+ drm_dbg_kms(display->drm, "Disabling CMTG B\n");
+ intel_de_rmw(display, TRANS_CMTG_CTL_B, CMTG_ENABLE, 0);
+ clk_sel_clr |= CMTG_CLK_SEL_B_MASK;
+ clk_sel_set |= CMTG_CLK_SEL_B_DISABLED;
+ }
+
+ if (intel_cmtg_has_clock_sel(display) && clk_sel_clr)
+ intel_de_rmw(display, CMTG_CLK_SEL, clk_sel_clr, clk_sel_set);
+}
+
+/*
+ * Read out CMTG configuration and, on platforms that allow disabling it without
+ * a modeset, do it.
+ *
+ * This function must be called before any port PLL is disabled in the general
+ * sanitization process, because we need whatever port PLL that is providing the
+ * clock for CMTG to be on before accessing CMTG registers.
+ */
+void intel_cmtg_sanitize(struct intel_display *display)
+{
+ struct intel_cmtg_config cmtg_config = {};
+
+ if (!HAS_CMTG(display))
+ return;
+
+ intel_cmtg_get_config(display, &cmtg_config);
+ intel_cmtg_dump_config(display, &cmtg_config);
+
+ /*
+ * FIXME: The driver is not prepared to handle cases where a modeset is
+ * required for disabling the CMTG: we need a proper way of tracking
+ * CMTG state and do the right syncronization with respect to triggering
+ * the modeset as part of the disable sequence.
+ */
+ if (intel_cmtg_disable_requires_modeset(display, &cmtg_config))
+ return;
+
+ intel_cmtg_disable(display, &cmtg_config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.h b/drivers/gpu/drm/i915/display/intel_cmtg.h
new file mode 100644
index 000000000000..ba62199adaa2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CMTG_H__
+#define __INTEL_CMTG_H__
+
+struct intel_display;
+
+void intel_cmtg_sanitize(struct intel_display *display);
+
+#endif /* __INTEL_CMTG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
new file mode 100644
index 000000000000..945a35578284
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CMTG_REGS_H__
+#define __INTEL_CMTG_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define CMTG_CLK_SEL _MMIO(0x46160)
+#define CMTG_CLK_SEL_A_MASK REG_GENMASK(31, 29)
+#define CMTG_CLK_SEL_A_DISABLED REG_FIELD_PREP(CMTG_CLK_SEL_A_MASK, 0)
+#define CMTG_CLK_SEL_B_MASK REG_GENMASK(15, 13)
+#define CMTG_CLK_SEL_B_DISABLED REG_FIELD_PREP(CMTG_CLK_SEL_B_MASK, 0)
+
+#define TRANS_CMTG_CTL_A _MMIO(0x6fa88)
+#define TRANS_CMTG_CTL_B _MMIO(0x6fb88)
+#define CMTG_ENABLE REG_BIT(31)
+
+#endif /* __INTEL_CMTG_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 7cd902bbd244..e7950655434b 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,12 +22,18 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsb.h"
+#include "intel_vrr.h"
+#include "skl_universal_plane.h"
+#include "skl_universal_plane_regs.h"
struct intel_color_funcs {
int (*color_check)(struct intel_atomic_state *state,
@@ -83,6 +89,14 @@ struct intel_color_funcs {
* Read config other than LUTs and CSCs, before them. Optional.
*/
void (*get_config)(struct intel_crtc_state *crtc_state);
+
+ /* Plane CSC*/
+ void (*load_plane_csc_matrix)(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+
+ /* Plane Pre/Post CSC */
+ void (*load_plane_luts)(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
};
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -403,14 +417,13 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state)
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
/* icl+ have dedicated output CSC */
if (DISPLAY_VER(display) >= 11)
return false;
/* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
- if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915))
+ if (DISPLAY_VER(display) < 7 || display->platform.ivybridge)
return false;
return crtc_state->limited_color_range;
@@ -514,7 +527,6 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
@@ -536,7 +548,7 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915));
+ drm_WARN_ON(display->drm, !display->platform.geminilake);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity);
} else {
@@ -607,6 +619,8 @@ static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits)
if (CTM_COEFF_NEGATIVE(coeff))
c = -c;
+ int_bits = max(int_bits, 1);
+
c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1),
(s64)(BIT(int_bits + frac_bits - 1) - 1));
@@ -997,7 +1011,7 @@ static void skl_color_commit_noarm(struct intel_dsb *dsb,
* output all black (until CSC_MODE is rearmed and properly latched).
* Once PSR exit (and proper register latching) has occurred the
* danger is over. Thus when PSR is enabled the CSC coeff/offset
- * register programming will be peformed from skl_color_commit_arm()
+ * register programming will be performed from skl_color_commit_arm()
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
@@ -1088,18 +1102,19 @@ static void skl_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 tmp;
crtc_state->gamma_mode = hsw_read_gamma_mode(crtc);
crtc_state->csc_mode = ilk_read_csc_mode(crtc);
- tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe));
+ if (DISPLAY_VER(display) < 35) {
+ u32 tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe));
- if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
- crtc_state->gamma_enable = true;
+ if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+ crtc_state->gamma_enable = true;
- if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
- crtc_state->csc_enable = true;
+ if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+ crtc_state->csc_enable = true;
+ }
}
static void skl_color_commit_arm(struct intel_dsb *dsb,
@@ -1337,8 +1352,8 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
- intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val);
+ if (crtc_state->dsb_color)
+ intel_dsb_reg_write(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
@@ -1348,8 +1363,8 @@ static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
- intel_dsb_reg_write_indexed(crtc_state->dsb_color_vblank, reg, val);
+ if (crtc_state->dsb_color)
+ intel_dsb_reg_write_indexed(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
@@ -1387,7 +1402,7 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
for (i = 0; i < 256; i++) {
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
- if (crtc_state->dsb_color_vblank)
+ if (crtc_state->dsb_color)
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
}
@@ -1915,7 +1930,7 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
+ if (crtc_state->dsb_color)
return;
display->funcs.color->load_luts(crtc_state);
@@ -1963,6 +1978,25 @@ void intel_color_modeset(const struct intel_crtc_state *crtc_state)
}
}
+bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->dsb_color;
+}
+
+bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->dsb_color && !HAS_DOUBLE_BUFFERED_LUT(display);
+}
+
+bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->dsb_color && HAS_DOUBLE_BUFFERED_LUT(display);
+}
+
void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1980,43 +2014,53 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
return;
- crtc_state->dsb_color_vblank = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
- if (!crtc_state->dsb_color_vblank)
+ if (HAS_DOUBLE_BUFFERED_LUT(display))
+ crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024);
+ else
+ crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
+
+ if (!intel_color_uses_dsb(crtc_state))
return;
display->funcs.color->load_luts(crtc_state);
- intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank);
- intel_dsb_interrupt(crtc_state->dsb_color_vblank);
+ if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) {
+ intel_vrr_send_push(crtc_state->dsb_color, crtc_state);
+ intel_dsb_wait_for_delayed_vblank(state, crtc_state->dsb_color);
+ intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state);
+ intel_dsb_interrupt(crtc_state->dsb_color);
+ }
- intel_dsb_finish(crtc_state->dsb_color_vblank);
+ if (intel_color_uses_gosub_dsb(crtc_state))
+ intel_dsb_gosub_finish(crtc_state->dsb_color);
+ else
+ intel_dsb_finish(crtc_state->dsb_color);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_vblank) {
- intel_dsb_cleanup(crtc_state->dsb_color_vblank);
- crtc_state->dsb_color_vblank = NULL;
+ if (crtc_state->dsb_color) {
+ intel_dsb_cleanup(crtc_state->dsb_color);
+ crtc_state->dsb_color = NULL;
}
}
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_vblank)
- intel_dsb_wait(crtc_state->dsb_color_vblank);
-}
-
-bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
-{
- return crtc_state->dsb_color_vblank;
+ if (crtc_state->dsb_color)
+ intel_dsb_wait(crtc_state->dsb_color);
}
static bool intel_can_preload_luts(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
+ if (HAS_DOUBLE_BUFFERED_LUT(display))
+ return false;
+
return !old_crtc_state->post_csc_lut &&
!old_crtc_state->pre_csc_lut;
}
@@ -3804,6 +3848,266 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
}
}
+static void
+xelpd_load_plane_csc_matrix(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_property_blob *blob = plane_state->hw.ctm;
+ struct drm_color_ctm_3x4 *ctm;
+ const u64 *input;
+ u16 coeffs[9] = {};
+ int i, j;
+
+ if (!icl_is_hdr_plane(display, plane) || !blob)
+ return;
+
+ ctm = blob->data;
+ input = ctm->matrix;
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0, j = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[j];
+
+ /*
+ * Clamp input value to min/max supported by
+ * hardware.
+ */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[j]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
+
+ /* Skip postoffs */
+ if (!((j + 2) % 4))
+ j += 2;
+ else
+ j++;
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 0),
+ coeffs[0] << 16 | coeffs[1]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 1),
+ coeffs[2] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 2),
+ coeffs[3] << 16 | coeffs[4]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 3),
+ coeffs[5] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 4),
+ coeffs[6] << 16 | coeffs[7]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 5),
+ coeffs[8] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
+
+ /*
+ * Conversion from S31.32 to S0.12. BIT[12] is the signed bit
+ */
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 0),
+ ctm_to_twos_complement(input[3], 0, 12));
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 1),
+ ctm_to_twos_complement(input[7], 0, 12));
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 2),
+ ctm_to_twos_complement(input[11], 0, 12));
+}
+
+static void
+xelpd_program_plane_pre_csc_lut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_color_lut32 *pre_csc_lut = plane_state->hw.degamma_lut->data;
+ u32 i, lut_size;
+
+ if (icl_is_hdr_plane(display, plane)) {
+ lut_size = 128;
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+
+ if (pre_csc_lut) {
+ for (i = 0; i < lut_size; i++) {
+ u32 lut_val = drm_color_lut32_extract(pre_csc_lut[i].green, 24);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ lut_val);
+ }
+
+ /* Program the max register to clamp values > 1.0. */
+ /* TODO: Restrict to 0x7ffffff */
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ (1 << 24));
+ } while (i++ > 130);
+ } else {
+ for (i = 0; i < lut_size; i++) {
+ u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
+ }
+
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ 1 << 24);
+ } while (i++ < 130);
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
+ }
+}
+
+static void
+xelpd_program_plane_post_csc_lut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_color_lut32 *post_csc_lut = plane_state->hw.gamma_lut->data;
+ u32 i, lut_size, lut_val;
+
+ if (icl_is_hdr_plane(display, plane)) {
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+ /* TODO: Add macro */
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+ if (post_csc_lut) {
+ lut_size = 32;
+ for (i = 0; i < lut_size; i++) {
+ lut_val = drm_color_lut32_extract(post_csc_lut[i].green, 24);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ lut_val);
+ }
+
+ /* Segment 2 */
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ (1 << 24));
+ } while (i++ < 34);
+ } else {
+ /*TODO: Add for segment 0 */
+ lut_size = 32;
+ for (i = 0; i < lut_size; i++) {
+ u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
+ }
+
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ 1 << 24);
+ } while (i++ < 34);
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0), 0);
+ }
+}
+
+static void
+xelpd_plane_load_luts(struct intel_dsb *dsb, const struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.degamma_lut)
+ xelpd_program_plane_pre_csc_lut(dsb, plane_state);
+
+ if (plane_state->hw.gamma_lut)
+ xelpd_program_plane_post_csc_lut(dsb, plane_state);
+}
+
+static u32 glk_3dlut_10(const struct drm_color_lut32 *color)
+{
+ return REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, drm_color_lut32_extract(color->red, 10)) |
+ REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, drm_color_lut32_extract(color->green, 10)) |
+ REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, drm_color_lut32_extract(color->blue, 10));
+}
+
+static void glk_load_lut_3d(struct intel_dsb *dsb,
+ struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+ const struct drm_color_lut32 *lut = blob->data;
+ int i, lut_size = drm_color_lut32_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
+ drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not loading LUTs\n",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), LUT_3D_AUTO_INCREMENT);
+ for (i = 0; i < lut_size; i++)
+ intel_de_write_dsb(display, dsb, LUT_3D_DATA(pipe), glk_3dlut_10(&lut[i]));
+ intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), 0);
+}
+
+static void glk_lut_3d_commit(struct intel_dsb *dsb, struct intel_crtc *crtc, bool enable)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 val = 0;
+
+ if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
+ drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not committing change\n",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ if (enable)
+ val = LUT_3D_ENABLE | LUT_3D_READY | LUT_3D_BIND_PLANE_1;
+
+ intel_de_write_dsb(display, dsb, LUT_3D_CTL(pipe), val);
+}
+
static const struct intel_color_funcs chv_color_funcs = {
.color_check = chv_color_check,
.color_commit_arm = i9xx_color_commit_arm,
@@ -3851,6 +4155,8 @@ static const struct intel_color_funcs tgl_color_funcs = {
.lut_equal = icl_lut_equal,
.read_csc = icl_read_csc,
.get_config = skl_get_config,
+ .load_plane_csc_matrix = xelpd_load_plane_csc_matrix,
+ .load_plane_luts = xelpd_plane_load_luts,
};
static const struct intel_color_funcs icl_color_funcs = {
@@ -3931,6 +4237,67 @@ static const struct intel_color_funcs ilk_color_funcs = {
.get_config = ilk_get_config,
};
+void intel_color_plane_commit_arm(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+
+ if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
+ glk_lut_3d_commit(dsb, crtc, !!plane_state->hw.lut_3d);
+}
+
+static void
+intel_color_load_plane_csc_matrix(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ if (display->funcs.color->load_plane_csc_matrix)
+ display->funcs.color->load_plane_csc_matrix(dsb, plane_state);
+}
+
+static void
+intel_color_load_plane_luts(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ if (display->funcs.color->load_plane_luts)
+ display->funcs.color->load_plane_luts(dsb, plane_state);
+}
+
+bool
+intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe)
+{
+ if (DISPLAY_VER(display) >= 12)
+ return pipe == PIPE_A || pipe == PIPE_B;
+ else
+ return false;
+}
+
+static void
+intel_color_load_3dlut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+
+ if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
+ glk_load_lut_3d(dsb, crtc, plane_state->hw.lut_3d);
+}
+
+void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.ctm)
+ intel_color_load_plane_csc_matrix(dsb, plane_state);
+ if (plane_state->hw.degamma_lut || plane_state->hw.gamma_lut)
+ intel_color_load_plane_luts(dsb, plane_state);
+ if (plane_state->hw.lut_3d)
+ intel_color_load_3dlut(dsb, plane_state);
+}
+
void intel_color_crtc_init(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
@@ -3977,12 +4344,10 @@ int intel_color_init(struct intel_display *display)
void intel_color_init_hooks(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (HAS_GMCH(display)) {
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
display->funcs.color = &chv_color_funcs;
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
display->funcs.color = &vlv_color_funcs;
else if (DISPLAY_VER(display) >= 4)
display->funcs.color = &i965_color_funcs;
@@ -3999,7 +4364,7 @@ void intel_color_init_hooks(struct intel_display *display)
display->funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(display) == 8)
display->funcs.color = &bdw_color_funcs;
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
display->funcs.color = &hsw_color_funcs;
else if (DISPLAY_VER(display) == 7)
display->funcs.color = &ivb_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 9d66457c1e89..c21b9bdf7bb8 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -13,7 +13,9 @@ struct intel_crtc_state;
struct intel_crtc;
struct intel_display;
struct intel_dsb;
+struct intel_plane_state;
struct drm_property_blob;
+enum pipe;
void intel_color_init_hooks(struct intel_display *display);
int intel_color_init(struct intel_display *display);
@@ -24,6 +26,8 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state);
+bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state);
+bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state);
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
@@ -38,5 +42,9 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob2,
bool is_pre_csc_lut);
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
-
+void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+void intel_color_plane_commit_arm(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+bool intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.c b/drivers/gpu/drm/i915/display/intel_color_pipeline.c
new file mode 100644
index 000000000000..942d9b9c93ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#include "intel_color.h"
+#include "intel_colorop.h"
+#include "intel_color_pipeline.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "skl_universal_plane.h"
+
+#define MAX_COLOR_PIPELINES 1
+#define PLANE_DEGAMMA_SIZE 128
+#define PLANE_GAMMA_SIZE 32
+
+static
+int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list,
+ enum pipe pipe)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_display *display = to_intel_display(dev);
+ struct drm_colorop *prev_op;
+ struct intel_colorop *colorop;
+ int ret;
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT);
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
+ PLANE_DEGAMMA_SIZE,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+
+ if (ret)
+ return ret;
+
+ list->type = colorop->base.base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id);
+
+ /* TODO: handle failures and clean up */
+ prev_op = &colorop->base;
+
+ if (DISPLAY_VER(display) >= 35 &&
+ intel_color_crtc_has_3dlut(display, pipe) &&
+ plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT);
+
+ ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, 17,
+ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
+ true);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+
+ prev_op = &colorop->base;
+ }
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
+ ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+ prev_op = &colorop->base;
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT);
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
+ PLANE_GAMMA_SIZE,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+
+ return 0;
+}
+
+int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_display *display = to_intel_display(dev);
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ int len = 0;
+ int ret;
+
+ /* Currently expose pipeline only for HDR planes */
+ if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id))
+ return 0;
+
+ /* Add pipeline consisting of transfer functions */
+ ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe);
+ if (ret)
+ return ret;
+ len++;
+
+ return drm_plane_create_color_pipeline_property(plane, pipelines, len);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.h b/drivers/gpu/drm/i915/display/intel_color_pipeline.h
new file mode 100644
index 000000000000..a457d306da7f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOR_PIPELINE_H__
+#define __INTEL_COLOR_PIPELINE_H__
+
+struct drm_plane;
+enum pipe;
+
+int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe);
+
+#endif /* __INTEL_COLOR_PIPELINE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color_regs.h b/drivers/gpu/drm/i915/display/intel_color_regs.h
index 8eb643cfead7..c370b6029369 100644
--- a/drivers/gpu/drm/i915/display/intel_color_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_color_regs.h
@@ -316,4 +316,33 @@
#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30)
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
+/* 3D LUT */
+#define _LUT_3D_CTL_A 0x490A4
+#define _LUT_3D_CTL_B 0x491A4
+#define LUT_3D_CTL(pipe) _MMIO_PIPE(pipe, _LUT_3D_CTL_A, _LUT_3D_CTL_B)
+#define LUT_3D_ENABLE REG_BIT(31)
+#define LUT_3D_READY REG_BIT(30)
+#define LUT_3D_BINDING_MASK REG_GENMASK(23, 22)
+#define LUT_3D_BIND_PIPE REG_FIELD_PREP(LUT_3D_BINDING_MASK, 0)
+#define LUT_3D_BIND_PLANE_1 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 1)
+#define LUT_3D_BIND_PLANE_2 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 2)
+#define LUT_3D_BIND_PLANE_3 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 3)
+
+#define _LUT_3D_INDEX_A 0x490A8
+#define _LUT_3D_INDEX_B 0x491A8
+#define LUT_3D_INDEX(pipe) _MMIO_PIPE(pipe, _LUT_3D_INDEX_A, _LUT_3D_INDEX_B)
+#define LUT_3D_AUTO_INCREMENT REG_BIT(13)
+#define LUT_3D_INDEX_VALUE_MASK REG_GENMASK(12, 0)
+#define LUT_3D_INDEX_VALUE(x) REG_FIELD_PREP(LUT_3D_INDEX_VALUE_MASK, (x))
+
+#define _LUT_3D_DATA_A 0x490AC
+#define _LUT_3D_DATA_B 0x491AC
+#define LUT_3D_DATA(pipe) _MMIO_PIPE(pipe, _LUT_3D_DATA_A, _LUT_3D_DATA_B)
+#define LUT_3D_DATA_RED_MASK REG_GENMASK(29, 20)
+#define LUT_3D_DATA_GREEN_MASK REG_GENMASK(19, 10)
+#define LUT_3D_DATA_BLUE_MASK REG_GENMASK(9, 0)
+#define LUT_3D_DATA_RED(x) REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, (x))
+#define LUT_3D_DATA_GREEN(x) REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, (x))
+#define LUT_3D_DATA_BLUE(x) REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, (x))
+
#endif /* __INTEL_COLOR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_colorop.c b/drivers/gpu/drm/i915/display/intel_colorop.c
new file mode 100644
index 000000000000..f2fc0d8780ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_colorop.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#include "intel_colorop.h"
+
+struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop)
+{
+ return container_of(colorop, struct intel_colorop, base);
+}
+
+struct intel_colorop *intel_colorop_alloc(void)
+{
+ struct intel_colorop *colorop;
+
+ colorop = kzalloc(sizeof(*colorop), GFP_KERNEL);
+ if (!colorop)
+ return ERR_PTR(-ENOMEM);
+
+ return colorop;
+}
+
+struct intel_colorop *intel_colorop_create(enum intel_color_block id)
+{
+ struct intel_colorop *colorop;
+
+ colorop = intel_colorop_alloc();
+
+ if (IS_ERR(colorop))
+ return colorop;
+
+ colorop->id = id;
+
+ return colorop;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_colorop.h b/drivers/gpu/drm/i915/display/intel_colorop.h
new file mode 100644
index 000000000000..21d58eb9f3d0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_colorop.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOROP_H__
+#define __INTEL_COLOROP_H__
+
+#include "intel_display_types.h"
+
+struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop);
+struct intel_colorop *intel_colorop_alloc(void);
+struct intel_colorop *intel_colorop_create(enum intel_color_block id);
+
+#endif /* __INTEL_COLOROP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 3252dab56430..f401558ac14e 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,19 +3,22 @@
* Copyright © 2018 Intel Corporation
*/
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
-#define for_each_combo_phy(__dev_priv, __phy) \
+#define for_each_combo_phy(__display, __phy) \
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
- for_each_if(intel_phy_is_combo(__dev_priv, __phy))
+ for_each_if(intel_phy_is_combo(__display, __phy))
-#define for_each_combo_phy_reverse(__dev_priv, __phy) \
+#define for_each_combo_phy_reverse(__display, __phy) \
for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \
- for_each_if(intel_phy_is_combo(__dev_priv, __phy))
+ for_each_if(intel_phy_is_combo(__display, __phy))
enum {
PROCMON_0_85V_DOT_0,
@@ -52,11 +55,11 @@ static const struct icl_procmon {
};
static const struct icl_procmon *
-icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
+icl_get_procmon_ref_values(struct intel_display *display, enum phy phy)
{
u32 val;
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
+ val = intel_de_read(display, ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -74,57 +77,57 @@ icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
}
}
-static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+static void icl_set_procmon_ref_values(struct intel_display *display,
enum phy phy)
{
const struct icl_procmon *procmon;
- procmon = icl_get_procmon_ref_values(dev_priv, phy);
+ procmon = icl_get_procmon_ref_values(display, phy);
- intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy),
+ intel_de_rmw(display, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
- intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
- intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
+ intel_de_write(display, ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ intel_de_write(display, ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
-static bool check_phy_reg(struct drm_i915_private *dev_priv,
+static bool check_phy_reg(struct intel_display *display,
enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
if ((val & mask) != expected_val) {
- drm_dbg(&dev_priv->drm,
- "Combo PHY %c reg %08x state mismatch: "
- "current %08x mask %08x expected %08x\n",
- phy_name(phy),
- reg.reg, val, mask, expected_val);
+ drm_dbg_kms(display->drm,
+ "Combo PHY %c reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ phy_name(phy),
+ reg.reg, val, mask, expected_val);
return false;
}
return true;
}
-static bool icl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+static bool icl_verify_procmon_ref_values(struct intel_display *display,
enum phy phy)
{
const struct icl_procmon *procmon;
bool ret;
- procmon = icl_get_procmon_ref_values(dev_priv, phy);
+ procmon = icl_get_procmon_ref_values(display, phy);
- ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
+ ret = check_phy_reg(display, phy, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy),
+ ret &= check_phy_reg(display, phy, ICL_PORT_COMP_DW9(phy),
-1U, procmon->dw9);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy),
+ ret &= check_phy_reg(display, phy, ICL_PORT_COMP_DW10(phy),
-1U, procmon->dw10);
return ret;
}
-static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
+static bool has_phy_misc(struct intel_display *display, enum phy phy)
{
/*
* Some platforms only expect PHY_MISC to be programmed for PHY-A and
@@ -135,32 +138,30 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
* that we program it for PHY A.
*/
- if (IS_ALDERLAKE_S(i915))
+ if (display->platform.alderlake_s)
return phy == PHY_A;
- else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) ||
- IS_ROCKETLAKE(i915) ||
- IS_DG1(i915))
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) ||
+ display->platform.rocketlake ||
+ display->platform.dg1)
return phy < PHY_C;
return true;
}
-static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+static bool icl_combo_phy_enabled(struct intel_display *display,
enum phy phy)
{
/* The PHY C added by EHL has no PHY_MISC register */
- if (!has_phy_misc(dev_priv, phy))
- return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ if (!has_phy_misc(display, phy))
+ return intel_de_read(display, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
- return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
+ return !(intel_de_read(display, ICL_PHY_MISC(phy)) &
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
- (intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+ (intel_de_read(display, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
}
-static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
+static bool ehl_vbt_ddi_d_present(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
bool ddi_a_present = intel_bios_is_port_present(display, PORT_A);
bool ddi_d_present = intel_bios_is_port_present(display, PORT_D);
bool dsi_present = intel_bios_is_dsi_present(display, NULL);
@@ -180,13 +181,13 @@ static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
* in the log and let the internal display win.
*/
if (ddi_d_present)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
return false;
}
-static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
+static bool phy_is_master(struct intel_display *display, enum phy phy)
{
/*
* Certain PHYs are connected to compensation resistors and act
@@ -206,64 +207,64 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
*/
if (phy == PHY_A)
return true;
- else if (IS_ALDERLAKE_S(dev_priv))
+ else if (display->platform.alderlake_s)
return phy == PHY_D;
- else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.dg1 || display->platform.rocketlake)
return phy == PHY_C;
return false;
}
-static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+static bool icl_combo_phy_verify_state(struct intel_display *display,
enum phy phy)
{
bool ret = true;
u32 expected_val = 0;
- if (!icl_combo_phy_enabled(dev_priv, phy))
+ if (!icl_combo_phy_enabled(display, phy))
return false;
- if (DISPLAY_VER(dev_priv) >= 12) {
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN(0, phy),
+ if (DISPLAY_VER(display) >= 12) {
+ ret &= check_phy_reg(display, phy, ICL_PORT_TX_DW8_LN(0, phy),
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
+ ret &= check_phy_reg(display, phy, ICL_PORT_PCS_DW1_LN(0, phy),
DCC_MODE_SELECT_MASK, RUN_DCC_ONCE);
}
- ret &= icl_verify_procmon_ref_values(dev_priv, phy);
+ ret &= icl_verify_procmon_ref_values(display, phy);
- if (phy_is_master(dev_priv, phy)) {
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
+ if (phy_is_master(display, phy)) {
+ ret &= check_phy_reg(display, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (ehl_vbt_ddi_d_present(dev_priv))
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
+ if (ehl_vbt_ddi_d_present(display))
expected_val = ICL_PHY_MISC_MUX_DDID;
- ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy),
+ ret &= check_phy_reg(display, phy, ICL_PHY_MISC(phy),
ICL_PHY_MISC_MUX_DDID,
expected_val);
}
}
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
+ ret &= check_phy_reg(display, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
-void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+void intel_combo_phy_power_up_lanes(struct intel_display *display,
enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
if (is_dsi) {
- drm_WARN_ON(&dev_priv->drm, lane_reversal);
+ drm_WARN_ON(display->drm, lane_reversal);
switch (lane_count) {
case 1:
@@ -301,28 +302,28 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy),
+ intel_de_rmw(display, ICL_PORT_CL_DW10(phy),
PWR_DOWN_LN_MASK, lane_mask);
}
-static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+static void icl_combo_phys_init(struct intel_display *display)
{
enum phy phy;
- for_each_combo_phy(dev_priv, phy) {
+ for_each_combo_phy(display, phy) {
const struct icl_procmon *procmon;
u32 val;
- if (icl_combo_phy_verify_state(dev_priv, phy))
+ if (icl_combo_phy_verify_state(display, phy))
continue;
- procmon = icl_get_procmon_ref_values(dev_priv, phy);
+ procmon = icl_get_procmon_ref_values(display, phy);
- drm_dbg(&dev_priv->drm,
- "Initializing combo PHY %c (Voltage/Process Info : %s)\n",
- phy_name(phy), procmon->name);
+ drm_dbg_kms(display->drm,
+ "Initializing combo PHY %c (Voltage/Process Info : %s)\n",
+ phy_name(phy), procmon->name);
- if (!has_phy_misc(dev_priv, phy))
+ if (!has_phy_misc(display, phy))
goto skip_phy_misc;
/*
@@ -333,84 +334,84 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
* based on whether our VBT indicates the presence of any
* "internal" child devices.
*/
- val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
- if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ val = intel_de_read(display, ICL_PHY_MISC(phy));
+ if ((display->platform.jasperlake || display->platform.elkhartlake) &&
phy == PHY_A) {
val &= ~ICL_PHY_MISC_MUX_DDID;
- if (ehl_vbt_ddi_d_present(dev_priv))
+ if (ehl_vbt_ddi_d_present(display))
val |= ICL_PHY_MISC_MUX_DDID;
}
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
+ intel_de_write(display, ICL_PHY_MISC(phy), val);
skip_phy_misc:
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN(0, phy));
+ if (DISPLAY_VER(display) >= 12) {
+ val = intel_de_read(display, ICL_PORT_TX_DW8_LN(0, phy));
val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
- intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW8_GRP(phy), val);
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
val |= RUN_DCC_ONCE;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val);
}
- icl_set_procmon_ref_values(dev_priv, phy);
+ icl_set_procmon_ref_values(display, phy);
- if (phy_is_master(dev_priv, phy))
- intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy),
+ if (phy_is_master(display, phy))
+ intel_de_rmw(display, ICL_PORT_COMP_DW8(phy),
0, IREFGEN);
- intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT);
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ intel_de_rmw(display, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT);
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy),
0, CL_POWER_DOWN_ENABLE);
}
}
-static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+static void icl_combo_phys_uninit(struct intel_display *display)
{
enum phy phy;
- for_each_combo_phy_reverse(dev_priv, phy) {
+ for_each_combo_phy_reverse(display, phy) {
if (phy == PHY_A &&
- !icl_combo_phy_verify_state(dev_priv, phy)) {
- if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
+ !icl_combo_phy_verify_state(display, phy)) {
+ if (display->platform.tigerlake || display->platform.dg1) {
/*
* A known problem with old ifwi:
* https://gitlab.freedesktop.org/drm/intel/-/issues/2411
* Suppress the warning for CI. Remove ASAP!
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
} else {
- drm_warn(&dev_priv->drm,
+ drm_warn(display->drm,
"Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
}
}
- if (!has_phy_misc(dev_priv, phy))
+ if (!has_phy_misc(display, phy))
goto skip_phy_misc;
- intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0,
+ intel_de_rmw(display, ICL_PHY_MISC(phy), 0,
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN);
skip_phy_misc:
- intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0);
+ intel_de_rmw(display, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0);
}
}
-void intel_combo_phy_init(struct drm_i915_private *i915)
+void intel_combo_phy_init(struct intel_display *display)
{
- icl_combo_phys_init(i915);
+ icl_combo_phys_init(display);
}
-void intel_combo_phy_uninit(struct drm_i915_private *i915)
+void intel_combo_phy_uninit(struct intel_display *display)
{
- icl_combo_phys_uninit(i915);
+ icl_combo_phys_uninit(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
index 660886f86c59..3f5dba78e533 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h
@@ -8,12 +8,12 @@
#include <linux/types.h>
-struct drm_i915_private;
enum phy;
+struct intel_display;
-void intel_combo_phy_init(struct drm_i915_private *dev_priv);
-void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
-void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+void intel_combo_phy_init(struct intel_display *display);
+void intel_combo_phy_uninit(struct intel_display *display);
+void intel_combo_phy_power_up_lanes(struct intel_display *display,
enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal);
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index 0964e392d02c..3694f95376c2 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_COMBO_PHY_REGS__
#define __INTEL_COMBO_PHY_REGS__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
@@ -133,6 +133,8 @@
#define TX_TRAINING_EN REG_BIT(31)
#define TAP2_DISABLE REG_BIT(30)
#define TAP3_DISABLE REG_BIT(29)
+#define CURSOR_PROGRAM REG_BIT(26)
+#define COEFF_POLARITY REG_BIT(25)
#define SCALING_MODE_SEL_MASK REG_GENMASK(20, 18)
#define SCALING_MODE_SEL(x) REG_FIELD_PREP(SCALING_MODE_SEL_MASK, (x))
#define RTERM_SELECT_MASK REG_GENMASK(5, 3)
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index c65887870ddc..913d90a7a508 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -28,16 +28,57 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
-#include "intel_backlight.h"
+#include "i915_utils.h" /* for i915_inject_probe_failure() */
#include "intel_connector.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_panel.h"
-int intel_connector_init(struct intel_connector *connector)
+static void intel_connector_modeset_retry_work_fn(struct work_struct *work)
+{
+ struct intel_connector *connector = container_of(work, typeof(*connector),
+ modeset_retry_work);
+ struct intel_display *display = to_intel_display(connector);
+
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id,
+ connector->base.name);
+
+ /* Grab the locks before changing connector property*/
+ mutex_lock(&display->drm->mode_config.mutex);
+ /* Set connector link status to BAD and send a Uevent to notify
+ * userspace to do a modeset.
+ */
+ drm_connector_set_link_status_property(&connector->base,
+ DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&display->drm->mode_config.mutex);
+ /* Send Hotplug uevent so userspace can reprobe */
+ drm_kms_helper_connector_hotplug_event(&connector->base);
+
+ drm_connector_put(&connector->base);
+}
+
+void intel_connector_queue_modeset_retry_work(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+
+ drm_connector_get(&connector->base);
+ if (!queue_work(display->wq.unordered, &connector->modeset_retry_work))
+ drm_connector_put(&connector->base);
+}
+
+void intel_connector_cancel_modeset_retry_work(struct intel_connector *connector)
+{
+ if (cancel_work_sync(&connector->modeset_retry_work))
+ drm_connector_put(&connector->base);
+}
+
+static int intel_connector_init(struct intel_connector *connector)
{
struct intel_digital_connector_state *conn_state;
@@ -56,6 +97,9 @@ int intel_connector_init(struct intel_connector *connector)
intel_panel_init_alloc(connector);
+ INIT_WORK(&connector->modeset_retry_work,
+ intel_connector_modeset_retry_work_fn);
+
return 0;
}
@@ -103,41 +147,42 @@ void intel_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
- if (intel_connector->port)
- drm_dp_mst_put_port_malloc(intel_connector->port);
+ if (intel_connector->mst.port)
+ drm_dp_mst_put_port_malloc(intel_connector->mst.port);
kfree(connector);
}
-int intel_connector_register(struct drm_connector *connector)
+int intel_connector_register(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(_connector->dev);
int ret;
- ret = intel_backlight_device_register(intel_connector);
+ ret = intel_panel_register(connector);
if (ret)
goto err;
- if (i915_inject_probe_failure(to_i915(connector->dev))) {
+ if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
- goto err_backlight;
+ goto err_panel;
}
- intel_connector_debugfs_add(intel_connector);
+ intel_connector_debugfs_add(connector);
return 0;
-err_backlight:
- intel_backlight_device_unregister(intel_connector);
+err_panel:
+ intel_panel_unregister(connector);
err:
return ret;
}
-void intel_connector_unregister(struct drm_connector *connector)
+void intel_connector_unregister(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
- intel_backlight_device_unregister(intel_connector);
+ intel_panel_unregister(connector);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -162,10 +207,9 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct intel_display *display = to_intel_display(connector);
- drm_WARN_ON(dev,
- !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex);
if (!connector->base.state->crtc)
return INVALID_PIPE;
@@ -222,20 +266,19 @@ static const struct drm_prop_enum_list force_audio_names[] = {
void
intel_attach_force_audio_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.force_audio;
+ prop = display->properties.force_audio;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, 0,
- "audio",
- force_audio_names,
- ARRAY_SIZE(force_audio_names));
+ prop = drm_property_create_enum(display->drm, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
- dev_priv->display.properties.force_audio = prop;
+ display->properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -249,20 +292,19 @@ static const struct drm_prop_enum_list broadcast_rgb_names[] = {
void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.broadcast_rgb;
+ prop = display->properties.broadcast_rgb;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Broadcast RGB",
- broadcast_rgb_names,
- ARRAY_SIZE(broadcast_rgb_names));
+ prop = drm_property_create_enum(display->drm, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
- dev_priv->display.properties.broadcast_rgb = prop;
+ display->properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
@@ -294,14 +336,14 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector)
void
intel_attach_scaling_mode_property(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
u32 scaling_modes;
scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) |
BIT(DRM_MODE_SCALE_FULLSCREEN);
/* On GMCH platforms borders are only possible on the LVDS port */
- if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (!HAS_GMCH(display) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
scaling_modes |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, scaling_modes);
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index bafde3f11ff4..0aa86626e646 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -14,7 +14,6 @@ struct i2c_adapter;
struct intel_connector;
struct intel_encoder;
-int intel_connector_init(struct intel_connector *connector);
struct intel_connector *intel_connector_alloc(void);
void intel_connector_free(struct intel_connector *connector);
void intel_connector_destroy(struct drm_connector *connector);
@@ -33,5 +32,7 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
void intel_attach_dp_colorspace_property(struct drm_connector *connector);
void intel_attach_scaling_mode_property(struct drm_connector *connector);
+void intel_connector_queue_modeset_retry_work(struct intel_connector *connector);
+void intel_connector_cancel_modeset_retry_work(struct intel_connector *connector);
#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 4634d3fd9f20..82e89cdbe5a5 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -31,11 +31,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
@@ -44,6 +42,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@@ -51,9 +50,11 @@
#include "intel_gmbus.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
+#include "intel_link_bw.h"
#include "intel_load_detect.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
+#include "intel_pfit.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_ENABLE | \
@@ -90,13 +91,12 @@ static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
bool intel_crt_port_enabled(struct intel_display *display,
i915_reg_t adpa_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK, val);
@@ -108,19 +108,18 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
ret = intel_crt_port_enabled(display, crt->adpa_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -177,7 +176,6 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
int mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -194,14 +192,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_CPT(display))
adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
adpa |= ADPA_PIPE_SEL(crtc->pipe);
- if (!HAS_PCH_SPLIT(dev_priv))
+ if (!HAS_PCH_SPLIT(display))
intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
switch (mode) {
@@ -251,11 +249,10 @@ static void hsw_disable_crt(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
}
static void hsw_post_disable_crt(struct intel_atomic_state *state,
@@ -265,7 +262,6 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_crtc_vblank_off(old_crtc_state);
@@ -285,7 +281,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
@@ -294,11 +290,10 @@ static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
}
static void hsw_pre_enable_crt(struct intel_atomic_state *state,
@@ -307,13 +302,12 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
hsw_fdi_link_train(encoder, crtc_state);
@@ -326,7 +320,6 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
@@ -344,8 +337,8 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
static void intel_enable_crt(struct intel_atomic_state *state,
@@ -358,24 +351,23 @@ static void intel_enable_crt(struct intel_atomic_state *state,
static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
- status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
@@ -392,7 +384,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev_priv) &&
+ if (HAS_PCH_LPT(display) &&
ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
return MODE_CLOCK_HIGH;
@@ -430,7 +422,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder,
return -EINVAL;
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -443,7 +435,6 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -456,13 +447,13 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
return -EINVAL;
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (crtc_state->bw_constrained && crtc_state->pipe_bpp < 24) {
drm_dbg_kms(display->drm,
@@ -487,13 +478,12 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 adpa;
bool ret;
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
+ bool turn_off_dac = HAS_PCH_SPLIT(display);
u32 save_adpa;
crt->force_hotplug_required = false;
@@ -508,10 +498,10 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, crt->adpa_reg, adpa);
- if (intel_de_wait_for_clear(display,
- crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
- 1000))
+ if (intel_de_wait_for_clear_ms(display,
+ crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
+ 1000))
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_TRIGGER");
@@ -537,8 +527,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- bool reenable_hpd;
u32 adpa;
bool ret;
u32 save_adpa;
@@ -555,7 +543,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*
* Just disable HPD interrupts here to prevent this
*/
- reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_block(&crt->base);
save_adpa = adpa = intel_de_read(display, crt->adpa_reg);
drm_dbg_kms(display->drm,
@@ -565,8 +553,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, crt->adpa_reg, adpa);
- if (intel_de_wait_for_clear(display, crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
+ if (intel_de_wait_for_clear_ms(display, crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_TRIGGER");
intel_de_write(display, crt->adpa_reg, save_adpa);
@@ -582,8 +570,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
drm_dbg_kms(display->drm,
"valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
- if (reenable_hpd)
- intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_clear_and_unblock(&crt->base);
return ret;
}
@@ -591,15 +578,14 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 stat;
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return ilk_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return valleyview_crt_detect_hotplug(connector);
/*
@@ -607,19 +593,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G45(dev_priv))
+ if (display->platform.g45)
tries = 2;
else
tries = 1;
for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */
- i915_hotplug_interrupt_update(dev_priv,
+ i915_hotplug_interrupt_update(display,
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_de_wait_for_clear(display, PORT_HOTPLUG_EN(display),
- CRT_HOTPLUG_FORCE_DETECT, 1000))
+ if (intel_de_wait_for_clear_ms(display, PORT_HOTPLUG_EN(display),
+ CRT_HOTPLUG_FORCE_DETECT, 1000))
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_DETECT to go off");
}
@@ -632,7 +618,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, PORT_HOTPLUG_STAT(display),
CRT_HOTPLUG_INT_STATUS);
- i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+ i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, 0);
return ret;
}
@@ -745,8 +731,10 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
transconf | TRANSCONF_FORCE_BORDER);
intel_de_posting_read(display,
TRANSCONF(display, cpu_transcoder));
- /* Wait for next Vblank to substitue
- * border color for Color info */
+ /*
+ * Wait for next Vblank to substitute
+ * border color for Color info.
+ */
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe));
st00 = intel_de_read8(display, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
@@ -856,7 +844,6 @@ intel_crt_detect(struct drm_connector *connector,
bool force)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
struct drm_atomic_state *state;
@@ -874,7 +861,7 @@ intel_crt_detect(struct drm_connector *connector,
return connector->status;
if (display->params.load_detect_test) {
- wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
+ wakeref = intel_display_power_get(display, encoder->power_domain);
goto load_detect;
}
@@ -882,9 +869,9 @@ intel_crt_detect(struct drm_connector *connector,
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
- wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
+ wakeref = intel_display_power_get(display, encoder->power_domain);
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -908,7 +895,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
status = connector_status_disconnected;
goto out;
}
@@ -939,7 +926,7 @@ load_detect:
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return status;
}
@@ -947,7 +934,6 @@ out:
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
@@ -957,10 +943,10 @@ static int intel_crt_get_modes(struct drm_connector *connector)
if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
- wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
+ wakeref = intel_display_power_get(display, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
- if (ret || !IS_G4X(dev_priv))
+ if (ret || !display->platform.g4x)
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -968,7 +954,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, ddc);
out:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -1019,16 +1005,15 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector;
struct intel_crt *crt;
i915_reg_t adpa_reg;
u8 ddc_pin;
u32 adpa;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -1076,7 +1061,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
crt->base.pipe_mask = BIT(PIPE_A);
else
crt->base.pipe_mask = ~0;
@@ -1088,7 +1073,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- if (I915_HAS_HOTPLUG(display) &&
+ if (HAS_HOTPLUG(display) &&
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
@@ -1099,7 +1084,7 @@ void intel_crt_init(struct intel_display *display)
connector->base.polled = connector->polled;
if (HAS_DDI(display)) {
- assert_port_valid(dev_priv, PORT_E);
+ assert_port_valid(display, PORT_E);
crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
@@ -1116,7 +1101,7 @@ void intel_crt_init(struct intel_display *display)
intel_ddi_buf_trans_init(&crt->base);
} else {
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
crt->base.compute_config = pch_crt_compute_config;
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
@@ -1138,7 +1123,7 @@ void intel_crt_init(struct intel_display *display)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/display/intel_crt_regs.h b/drivers/gpu/drm/i915/display/intel_crt_regs.h
index 9a93020b9a7e..571a67ae9afa 100644
--- a/drivers/gpu/drm/i915/display/intel_crt_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_crt_regs.h
@@ -45,4 +45,6 @@
#define ADPA_VSYNC_ACTIVE_HIGH REG_BIT(4)
#define ADPA_HSYNC_ACTIVE_HIGH REG_BIT(3)
+#define _VGA_MSR_WRITE _MMIO(0x3c2)
+
#endif /* __INTEL_CRT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index a2c528d707f4..9d2a23c96c61 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -9,14 +9,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
+#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "icl_dsi.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_cursor.h"
@@ -28,6 +29,7 @@
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_vblank.h"
@@ -44,9 +46,9 @@ static void assert_vblank_disabled(struct drm_crtc *crtc)
drm_crtc_vblank_put(crtc);
}
-struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915)
+struct intel_crtc *intel_first_crtc(struct intel_display *display)
{
- return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0));
+ return to_intel_crtc(drm_crtc_from_index(display->drm, 0));
}
struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display,
@@ -67,10 +69,9 @@ void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc)
drm_crtc_wait_one_vblank(&crtc->base);
}
-void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
+void intel_wait_for_vblank_if_active(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
if (crtc->active)
@@ -84,18 +85,23 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
if (!crtc->active)
return 0;
- if (!vblank->max_vblank_count)
- return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+ if (!vblank->max_vblank_count) {
+ /* On preempt-rt we cannot take the vblank spinlock since this function is called from tracepoints */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ return (u32)drm_crtc_vblank_count(&crtc->base);
+ else
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+ }
return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/*
- * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+ * From Gen 11, in case of dsi cmd mode, frame counter wouldn't
* have updated at the beginning of TE, if we want to use
* the hw counter, then we would find it updated in only
* the next TE, hence switching to sw counter.
@@ -108,13 +114,13 @@ u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
* On i965gm the hardware frame counter reads
* zero when the TV encoder is enabled :(
*/
- if (IS_I965GM(dev_priv) &&
+ if (display->platform.i965gm &&
(crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
return 0;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return 0xffffffff; /* full 32 bit counter */
- else if (DISPLAY_VER(dev_priv) >= 3)
+ else if (DISPLAY_VER(display) >= 3)
return 0xffffff; /* only 24 bits of frame count */
else
return 0; /* Gen2 doesn't have a hardware frame counter */
@@ -124,7 +130,7 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- crtc->block_dc_for_vblank = intel_psr_needs_block_dc_vblank(crtc_state);
+ crtc->vblank_psr_notify = intel_psr_needs_vblank_notification(crtc_state);
assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
@@ -141,8 +147,8 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
/*
* Should really happen exactly when we disable the pipe
@@ -154,9 +160,9 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
drm_crtc_vblank_off(&crtc->base);
assert_vblank_disabled(&crtc->base);
- crtc->block_dc_for_vblank = false;
+ crtc->vblank_psr_notify = false;
- flush_work(&display->irq.vblank_dc_work);
+ flush_work(&display->irq.vblank_notify_work);
}
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
@@ -303,7 +309,7 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
-int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+int intel_crtc_init(struct intel_display *display, enum pipe pipe)
{
struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
@@ -315,27 +321,27 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
return PTR_ERR(crtc);
crtc->pipe = pipe;
- crtc->num_scalers = DISPLAY_RUNTIME_INFO(dev_priv)->num_scalers[pipe];
+ crtc->num_scalers = DISPLAY_RUNTIME_INFO(display)->num_scalers[pipe];
- if (DISPLAY_VER(dev_priv) >= 9)
- primary = skl_universal_plane_create(dev_priv, pipe, PLANE_1);
+ if (DISPLAY_VER(display) >= 9)
+ primary = skl_universal_plane_create(display, pipe, PLANE_1);
else
- primary = intel_primary_plane_create(dev_priv, pipe);
+ primary = intel_primary_plane_create(display, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
}
crtc->plane_ids_mask |= BIT(primary->id);
- intel_init_fifo_underrun_reporting(dev_priv, crtc, false);
+ intel_init_fifo_underrun_reporting(display, crtc, false);
- for_each_sprite(dev_priv, pipe, sprite) {
+ for_each_sprite(display, pipe, sprite) {
struct intel_plane *plane;
- if (DISPLAY_VER(dev_priv) >= 9)
- plane = skl_universal_plane_create(dev_priv, pipe, PLANE_2 + sprite);
+ if (DISPLAY_VER(display) >= 9)
+ plane = skl_universal_plane_create(display, pipe, PLANE_2 + sprite);
else
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ plane = intel_sprite_plane_create(display, pipe, sprite);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto fail;
@@ -343,39 +349,41 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
crtc->plane_ids_mask |= BIT(plane->id);
}
- cursor = intel_cursor_plane_create(dev_priv, pipe);
+ cursor = intel_cursor_plane_create(display, pipe);
if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
goto fail;
}
crtc->plane_ids_mask |= BIT(cursor->id);
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ if (HAS_GMCH(display)) {
+ if (display->platform.cherryview ||
+ display->platform.valleyview ||
+ display->platform.g4x)
funcs = &g4x_crtc_funcs;
- else if (DISPLAY_VER(dev_priv) == 4)
+ else if (DISPLAY_VER(display) == 4)
funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ else if (display->platform.i945gm ||
+ display->platform.i915gm)
funcs = &i915gm_crtc_funcs;
- else if (DISPLAY_VER(dev_priv) == 3)
+ else if (DISPLAY_VER(display) == 3)
funcs = &i915_crtc_funcs;
else
funcs = &i8xx_crtc_funcs;
} else {
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
funcs = &bdw_crtc_funcs;
else
funcs = &ilk_crtc_funcs;
}
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
+ ret = drm_crtc_init_with_planes(display->drm, &crtc->base,
&primary->base, &cursor->base,
funcs, "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
drm_crtc_create_scaling_filter_property(&crtc->base,
BIT(DRM_SCALING_FILTER_DEFAULT) |
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
@@ -386,7 +394,10 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
- drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+ drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+
+ if (HAS_CASF(display))
+ drm_crtc_create_sharpness_strength_property(&crtc->base);
return 0;
@@ -415,10 +426,13 @@ int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
+
return crtc_state->hw.active &&
!crtc_state->preload_luts &&
!intel_crtc_needs_modeset(crtc_state) &&
- intel_crtc_needs_color_update(crtc_state) &&
+ (intel_crtc_needs_color_update(crtc_state) &&
+ !HAS_DOUBLE_BUFFERED_LUT(display)) &&
!intel_color_uses_dsb(crtc_state) &&
!crtc_state->use_dsb;
}
@@ -511,7 +525,7 @@ int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode,
void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -519,6 +533,8 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_vblank_evade_ctx evade;
int scanline;
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+
intel_psr_lock(new_crtc_state);
if (new_crtc_state->do_async_flip) {
@@ -545,7 +561,7 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade);
- if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
+ if (drm_WARN_ON(display->drm, drm_crtc_vblank_get(&crtc->base)))
goto irq_disable;
/*
@@ -648,6 +664,7 @@ void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state,
void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
@@ -656,6 +673,8 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+
if (new_crtc_state->do_async_flip)
goto out;
@@ -665,7 +684,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
* Incase of mipi dsi command mode, we need to set frame update
* request for every commit.
*/
- if (DISPLAY_VER(dev_priv) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
icl_dsi_frame_update(new_crtc_state);
@@ -713,7 +732,8 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
* which would cause the next frame to terminate already at vmin
* vblank start instead of vmax vblank start.
*/
- intel_vrr_send_push(new_crtc_state);
+ if (!state->base.legacy_cursor_update)
+ intel_vrr_send_push(NULL, new_crtc_state);
local_irq_enable();
@@ -722,7 +742,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
if (crtc->debug.start_vbl_count &&
crtc->debug.start_vbl_count != end_vbl_count) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
pipe_name(pipe), crtc->debug.start_vbl_count,
end_vbl_count,
@@ -737,3 +757,89 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
out:
intel_psr_unlock(new_crtc_state);
}
+
+bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->hw.enable != new_crtc_state->hw.enable;
+}
+
+bool intel_any_crtc_enable_changed(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_enable_changed(old_crtc_state, new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->hw.active != new_crtc_state->hw.active;
+}
+
+bool intel_any_crtc_active_changed(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_active_changed(old_crtc_state, new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
+}
+
+unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int data_rate = 0;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->data_rate[plane_id];
+
+ if (DISPLAY_VER(display) < 11)
+ data_rate += crtc_state->data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+/* "Maximum Pipe Read Bandwidth" */
+int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) < 12)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(intel_crtc_bw_data_rate(crtc_state), 10), 512);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index de54ae1deedf..07917e8a9ae3 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -13,7 +13,6 @@ enum pipe;
struct drm_device;
struct drm_display_mode;
struct drm_file;
-struct drm_i915_private;
struct drm_pending_vblank_event;
struct intel_atomic_state;
struct intel_crtc;
@@ -38,7 +37,7 @@ void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state);
void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state,
struct drm_pending_vblank_event **event);
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
-int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+int intel_crtc_init(struct intel_display *display, enum pipe pipe);
int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
@@ -52,11 +51,22 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
-struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
+struct intel_crtc *intel_first_crtc(struct intel_display *display);
struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display,
enum pipe pipe);
-void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
+void intel_wait_for_vblank_if_active(struct intel_display *display,
enum pipe pipe);
void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc);
+bool intel_any_crtc_enable_changed(struct intel_atomic_state *state);
+bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state);
+bool intel_any_crtc_active_changed(struct intel_atomic_state *state);
+bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state);
+
+unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state);
+unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state);
+int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 1faef60be472..c2a6217c2262 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -5,11 +5,13 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
+#include "intel_vblank.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
@@ -41,13 +43,13 @@ intel_dump_m_n_config(struct drm_printer *p,
}
static void
-intel_dump_infoframe(struct drm_i915_private *i915,
+intel_dump_infoframe(struct intel_display *display,
const union hdmi_infoframe *frame)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+ hdmi_infoframe_log(KERN_DEBUG, display->drm->dev, frame);
}
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
@@ -135,7 +137,7 @@ static void intel_dump_plane_state(struct drm_printer *p,
}
static void
-ilk_dump_csc(struct drm_i915_private *i915,
+ilk_dump_csc(struct intel_display *display,
struct drm_printer *p,
const char *name,
const struct intel_csc_matrix *csc)
@@ -151,7 +153,7 @@ ilk_dump_csc(struct drm_i915_private *i915,
csc->coeff[3 * i + 1],
csc->coeff[3 * i + 2]);
- if (DISPLAY_VER(i915) < 7)
+ if (DISPLAY_VER(display) < 7)
return;
drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
@@ -175,8 +177,8 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
struct intel_atomic_state *state,
const char *context)
{
+ struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
struct drm_printer p;
@@ -186,7 +188,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
crtc->base.base.id, crtc->base.name,
@@ -248,11 +250,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
str_enabled_disabled(pipe_config->has_sel_update),
str_enabled_disabled(pipe_config->has_panel_replay),
str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
+ drm_printf(&p, "minimum HBlank: %d\n", pipe_config->min_hblank);
}
- drm_printf(&p, "framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
-
drm_printf(&p, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
pipe_config->has_audio, pipe_config->has_infoframe,
pipe_config->infoframes.enable);
@@ -262,19 +262,19 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
- intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.avi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
- intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ intel_dump_infoframe(display, &pipe_config->infoframes.spd);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
- intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.hdmi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
@@ -286,13 +286,23 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_print_hex_dump(&p, "ELD: ", pipe_config->eld,
drm_eld_size(pipe_config->eld));
- drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ drm_printf(&p, "scanline offset: %d\n",
+ intel_crtc_scanline_offset(pipe_config));
+
+ drm_printf(&p, "framestart delay: %d, MSA timing delay: %d, set context latency: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay,
+ pipe_config->set_context_latency);
+
+ drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
- pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ str_yes_no(intel_vrr_is_fixed_rr(pipe_config)),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
- pipe_config->vrr.flipline,
- intel_vrr_vmin_vblank_start(pipe_config),
- intel_vrr_vmax_vblank_start(pipe_config));
+ pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end);
+
+ drm_printf(&p, "vrr: vmin vblank: %d, vmax vblank: %d, vmin vtotal: %d, vmax vtotal: %d\n",
+ intel_vrr_vmin_vblank_start(pipe_config), intel_vrr_vmax_vblank_start(pipe_config),
+ intel_vrr_vmin_vtotal(pipe_config), intel_vrr_vmax_vtotal(pipe_config));
drm_printf(&p, "requested mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.mode));
@@ -302,21 +312,21 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode);
- drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d, min cdclk %d\n",
pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
+ pipe_config->pixel_rate, pipe_config->min_cdclk);
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id,
pipe_config->hw.scaling_filter);
- if (HAS_GMCH(i915))
+ if (HAS_GMCH(display))
drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -331,9 +341,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->ips_enabled, pipe_config->double_wide,
pipe_config->has_drrs);
- intel_dpll_dump_hw_state(i915, &p, &pipe_config->dpll_hw_state);
+ intel_dpll_dump_hw_state(display, &p, &pipe_config->dpll_hw_state);
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
pipe_config->cgm_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
@@ -344,24 +354,29 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ display->color.glk_linear_degamma_lut ? "(linear) " : "",
pipe_config->pre_csc_lut ?
drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
pipe_config->post_csc_lut ?
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
- if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
+ if (DISPLAY_VER(display) >= 11)
+ ilk_dump_csc(display, &p, "output csc", &pipe_config->output_csc);
- if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
- else if (IS_CHERRYVIEW(i915))
+ if (!HAS_GMCH(display))
+ ilk_dump_csc(display, &p, "pipe csc", &pipe_config->csc);
+ else if (display->platform.cherryview)
vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
intel_vdsc_state_dump(&p, 0, pipe_config);
+ drm_printf(&p, "sharpness strength: %d, sharpness tap size: %d, sharpness enable: %d\n",
+ pipe_config->hw.casf_params.strength,
+ pipe_config->hw.casf_params.win_size,
+ pipe_config->hw.casf_params.casf_enable);
+
dump_planes:
if (!state)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index ed88a28a3afa..a10b2425b94d 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -9,19 +9,20 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_vblank.h"
@@ -32,18 +33,9 @@ static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+static u32 intel_cursor_surf_offset(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- u32 base;
-
- if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
- base = plane_state->phys_dma_addr;
- else
- base = intel_plane_ggtt_offset(plane_state);
-
- return base + plane_state->view.color_plane[0].offset;
+ return plane_state->view.color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state,
@@ -91,8 +83,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
unsigned int rotation = plane_state->hw.rotation;
int src_x, src_y;
u32 offset;
@@ -113,8 +105,9 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
plane_state, 0);
if (src_x != 0 || src_y != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Arbitrary cursor panning not supported\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] arbitrary cursor panning not supported\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -126,7 +119,7 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
src_x << 16, src_y << 16);
/* ILK+ do this automagically in hardware */
- if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ if (HAS_GMCH(display) && rotation & DRM_MODE_ROTATE_180) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
@@ -144,21 +137,23 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
static int intel_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
const struct drm_rect src = plane_state->uapi.src;
const struct drm_rect dst = plane_state->uapi.dst;
int ret;
if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] cursor cannot be tiled\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
if (ret)
return ret;
@@ -187,8 +182,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
static unsigned int
i845_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
return 2048;
}
@@ -210,8 +205,7 @@ static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
return cntl;
}
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i845_cursor_ctl(const struct intel_plane_state *plane_state)
{
return CURSOR_ENABLE |
CURSOR_FORMAT_ARGB |
@@ -232,8 +226,9 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
int ret;
ret = intel_check_cursor(crtc_state, plane_state);
@@ -246,14 +241,15 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
- drm_dbg_kms(&i915->drm,
- "Cursor dimension %dx%d not supported\n",
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] cursor dimension %dx%d not supported\n",
+ plane->base.base.id, plane->base.name,
drm_rect_width(&plane_state->uapi.dst),
drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ drm_WARN_ON(display->drm, plane_state->uapi.visible &&
plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
switch (fb->pitches[0]) {
@@ -263,12 +259,13 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
case 2048:
break;
default:
- drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] invalid cursor stride (%u)\n",
+ plane->base.base.id, plane->base.name,
fb->pitches[0]);
return -EINVAL;
}
- plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+ plane_state->ctl = i845_cursor_ctl(plane_state);
return 0;
}
@@ -279,7 +276,7 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
u32 cntl = 0, base = 0, pos = 0, size = 0;
if (plane_state && plane_state->uapi.visible) {
@@ -291,7 +288,7 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb,
size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width);
- base = intel_cursor_base(plane_state);
+ base = plane_state->surf;
pos = intel_cursor_position(crtc_state, plane_state, false);
}
@@ -301,17 +298,17 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb,
if (plane->cursor.base != base ||
plane->cursor.size != size ||
plane->cursor.cntl != cntl) {
- intel_de_write_fw(dev_priv, CURCNTR(dev_priv, PIPE_A), 0);
- intel_de_write_fw(dev_priv, CURBASE(dev_priv, PIPE_A), base);
- intel_de_write_fw(dev_priv, CURSIZE(dev_priv, PIPE_A), size);
- intel_de_write_fw(dev_priv, CURPOS(dev_priv, PIPE_A), pos);
- intel_de_write_fw(dev_priv, CURCNTR(dev_priv, PIPE_A), cntl);
+ intel_de_write_fw(display, CURCNTR(display, PIPE_A), 0);
+ intel_de_write_fw(display, CURBASE(display, PIPE_A), base);
+ intel_de_write_fw(display, CURSIZE(display, PIPE_A), size);
+ intel_de_write_fw(display, CURPOS(display, PIPE_A), pos);
+ intel_de_write_fw(display, CURCNTR(display, PIPE_A), cntl);
plane->cursor.base = base;
plane->cursor.size = size;
plane->cursor.cntl = cntl;
} else {
- intel_de_write_fw(dev_priv, CURPOS(dev_priv, PIPE_A), pos);
+ intel_de_write_fw(display, CURPOS(display, PIPE_A), pos);
}
}
@@ -325,29 +322,29 @@ static void i845_cursor_disable_arm(struct intel_dsb *dsb,
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(PIPE_A);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_A)) & CURSOR_ENABLE;
+ ret = intel_de_read(display, CURCNTR(display, PIPE_A)) & CURSOR_ENABLE;
*pipe = PIPE_A;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
static unsigned int
i9xx_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
return plane->base.dev->mode_config.cursor_width * 4;
}
@@ -371,16 +368,21 @@ static unsigned int i9xx_cursor_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
+ struct intel_display *display = to_intel_display(plane);
+
+ if (intel_scanout_needs_vtd_wa(display))
+ return 64 * 1024;
+
return 4 * 1024; /* physical for i915/i945 */
}
static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cntl = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return cntl;
if (crtc_state->gamma_enable)
@@ -389,20 +391,18 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (crtc_state->csc_enable)
cntl |= MCURSOR_PIPE_CSC_ENABLE;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x)
cntl |= MCURSOR_PIPE_SEL(crtc->pipe);
return cntl;
}
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_cursor_ctl(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
u32 cntl = 0;
- if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ if (display->platform.sandybridge || display->platform.ivybridge)
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
switch (drm_rect_width(&plane_state->uapi.dst)) {
@@ -424,7 +424,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
cntl |= MCURSOR_ROTATE_180;
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(dev_priv) == 13)
+ if (DISPLAY_VER(display) == 13)
cntl |= MCURSOR_ARB_SLOTS(1);
return cntl;
@@ -432,8 +432,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
int width = drm_rect_width(&plane_state->uapi.dst);
int height = drm_rect_height(&plane_state->uapi.dst);
@@ -456,7 +455,7 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
* cursor is not rotated. Everything else requires square
* cursors.
*/
- if (HAS_CUR_FBC(dev_priv) &&
+ if (HAS_CUR_FBC(display) &&
plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
if (height < 8 || height > width)
return false;
@@ -471,8 +470,8 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int ret;
@@ -487,22 +486,23 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
- drm_dbg(&dev_priv->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] cursor dimension %dx%d not supported\n",
+ plane->base.base.id, plane->base.name,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ drm_WARN_ON(display->drm, plane_state->uapi.visible &&
plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] invalid cursor stride (%u) (cursor width %d)\n",
+ plane->base.base.id, plane->base.name,
+ fb->pitches[0], drm_rect_width(&plane_state->uapi.dst));
return -EINVAL;
}
@@ -516,14 +516,15 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+ if (display->platform.cherryview && pipe == PIPE_C &&
plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "CHV cursor C not allowed to straddle the left screen edge\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] cursor not allowed to straddle the left screen edge\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
- plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+ plane_state->ctl = i9xx_cursor_ctl(plane_state);
return 0;
}
@@ -532,7 +533,7 @@ static void i9xx_cursor_disable_sel_fetch_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -546,8 +547,7 @@ static void wa_16021440873(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
u32 ctl = plane_state->ctl;
int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1;
enum pipe pipe = plane->pipe;
@@ -557,7 +557,7 @@ static void wa_16021440873(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), ctl);
- intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe),
+ intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(display, pipe),
CURSOR_POS_Y(et_y_position));
}
@@ -566,8 +566,7 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -578,7 +577,7 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb,
u32 val = intel_cursor_position(crtc_state, plane_state,
true);
- intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), val);
+ intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(display, pipe), val);
}
intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), plane_state->ctl);
@@ -652,8 +651,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
@@ -664,10 +662,10 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
- if (width != height)
+ if (DISPLAY_VER(display) < 14 && width != height)
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
- base = intel_cursor_base(plane_state);
+ base = plane_state->surf;
pos = intel_cursor_position(crtc_state, plane_state, false);
}
@@ -679,7 +677,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
* CURPOS.
*
* On other platforms CURPOS always requires the
- * CURBASE write to arm the update. Additonally
+ * CURBASE write to arm the update. Additionally
* a write to any of the cursor register will cancel
* an already armed cursor update. Thus leaving out
* the CURBASE write after CURPOS could lead to a
@@ -691,7 +689,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
* the CURCNTR write arms the update.
*/
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_write_cursor_wm(dsb, plane, crtc_state);
if (plane_state)
@@ -702,18 +700,18 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
- if (HAS_CUR_FBC(dev_priv))
- intel_de_write_dsb(display, dsb, CUR_FBC_CTL(dev_priv, pipe), fbc_ctl);
- intel_de_write_dsb(display, dsb, CURCNTR(dev_priv, pipe), cntl);
- intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos);
- intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base);
+ if (HAS_CUR_FBC(display))
+ intel_de_write_dsb(display, dsb, CUR_FBC_CTL(display, pipe), fbc_ctl);
+ intel_de_write_dsb(display, dsb, CURCNTR(display, pipe), cntl);
+ intel_de_write_dsb(display, dsb, CURPOS(display, pipe), pos);
+ intel_de_write_dsb(display, dsb, CURBASE(display, pipe), base);
plane->cursor.base = base;
plane->cursor.size = fbc_ctl;
plane->cursor.cntl = cntl;
} else {
- intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos);
- intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base);
+ intel_de_write_dsb(display, dsb, CURPOS(display, pipe), pos);
+ intel_de_write_dsb(display, dsb, CURBASE(display, pipe), base);
}
}
@@ -727,7 +725,7 @@ static void i9xx_cursor_disable_arm(struct intel_dsb *dsb,
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
@@ -739,24 +737,45 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- val = intel_de_read(dev_priv, CURCNTR(dev_priv, plane->pipe));
+ val = intel_de_read(display, CURCNTR(display, plane->pipe));
ret = val & MCURSOR_MODE_MASK;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
*pipe = plane->pipe;
else
*pipe = REG_FIELD_GET(MCURSOR_PIPE_SEL_MASK, val);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
+static void g4x_cursor_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ error->ctl = intel_de_read(display, CURCNTR(display, crtc->pipe));
+ error->surf = intel_de_read(display, CURBASE(display, crtc->pipe));
+ error->surflive = intel_de_read(display, CURSURFLIVE(display, crtc->pipe));
+}
+
+static void i9xx_cursor_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ error->ctl = intel_de_read(display, CURCNTR(display, crtc->pipe));
+ error->surf = intel_de_read(display, CURBASE(display, crtc->pipe));
+}
+
static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -789,7 +808,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
{
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_crtc *crtc = to_intel_crtc(_crtc);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
struct intel_plane_state *new_plane_state;
@@ -864,7 +883,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
if (ret)
goto out_free;
- ret = intel_plane_pin_fb(new_plane_state);
+ ret = intel_plane_pin_fb(new_plane_state, old_plane_state);
if (ret)
goto out_free;
@@ -893,7 +912,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
intel_psr_lock(crtc_state);
- if (!drm_WARN_ON(&i915->drm, drm_crtc_vblank_get(&crtc->base))) {
+ if (!drm_WARN_ON(display->drm, drm_crtc_vblank_get(&crtc->base))) {
/*
* TODO: maybe check if we're still in PSR
* and skip the vblank evasion entirely?
@@ -959,8 +978,8 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- const struct drm_mode_config *config = &i915->drm.mode_config;
+ struct intel_display *display = to_intel_display(plane);
+ const struct drm_mode_config *config = &display->drm->mode_config;
struct drm_plane_size_hint hints[4];
int size, max_size, num_hints = 0;
@@ -968,7 +987,7 @@ static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
/* for simplicity only enumerate the supported square+POT sizes */
for (size = 64; size <= max_size; size *= 2) {
- if (drm_WARN_ON(&i915->drm, num_hints >= ARRAY_SIZE(hints)))
+ if (drm_WARN_ON(display->drm, num_hints >= ARRAY_SIZE(hints)))
break;
hints[num_hints].width = size;
@@ -980,7 +999,7 @@ static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
}
struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+intel_cursor_plane_create(struct intel_display *display,
enum pipe pipe)
{
struct intel_plane *cursor;
@@ -996,7 +1015,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
cursor->id = PLANE_CURSOR;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ if (display->platform.i845g || display->platform.i865g) {
cursor->max_stride = i845_cursor_max_stride;
cursor->min_alignment = i845_cursor_min_alignment;
cursor->update_arm = i845_cursor_update_arm;
@@ -1006,28 +1025,38 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
} else {
cursor->max_stride = i9xx_cursor_max_stride;
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
cursor->min_alignment = i830_cursor_min_alignment;
- else if (IS_I85X(dev_priv))
+ else if (display->platform.i85x)
cursor->min_alignment = i85x_cursor_min_alignment;
else
cursor->min_alignment = i9xx_cursor_min_alignment;
+ if (intel_scanout_needs_vtd_wa(display))
+ cursor->vtd_guard = 2;
+
cursor->update_arm = i9xx_cursor_update_arm;
cursor->disable_arm = i9xx_cursor_disable_arm;
cursor->get_hw_state = i9xx_cursor_get_hw_state;
cursor->check_plane = i9xx_check_cursor;
}
+ cursor->surf_offset = intel_cursor_surf_offset;
+
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ cursor->capture_error = g4x_cursor_capture_error;
+ else
+ cursor->capture_error = i9xx_cursor_capture_error;
+
cursor->cursor.base = ~0;
cursor->cursor.cntl = ~0;
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+ if (display->platform.i845g || display->platform.i865g || HAS_CUR_FBC(display))
cursor->cursor.size = ~0;
- modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE);
+ modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_NONE);
- ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ ret = drm_universal_plane_init(display->drm, &cursor->base,
0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
@@ -1040,7 +1069,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (ret)
goto fail;
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
drm_plane_create_rotation_property(&cursor->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
@@ -1048,10 +1077,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
intel_cursor_add_size_hints_property(cursor);
- zpos = DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ zpos = DISPLAY_RUNTIME_INFO(display)->num_sprites[pipe] + 1;
drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
drm_plane_enable_fb_damage_clips(&cursor->base);
intel_plane_helper_add(cursor);
@@ -1063,3 +1092,23 @@ fail:
return ERR_PTR(ret);
}
+
+void intel_cursor_mode_config_init(struct intel_display *display)
+{
+ struct drm_mode_config *mode_config = &display->drm->mode_config;
+
+ if (display->platform.i845g) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 1023;
+ } else if (display->platform.i865g) {
+ mode_config->cursor_width = 512;
+ mode_config->cursor_height = 1023;
+ } else if (display->platform.i830 || display->platform.i85x ||
+ display->platform.i915g || display->platform.i915gm) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
+ } else {
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
index e2d9ec710a86..7c269d7381ad 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.h
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -7,14 +7,16 @@
#define _INTEL_CURSOR_H_
enum pipe;
-struct drm_i915_private;
+struct intel_display;
struct intel_plane;
struct kthread_work;
struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+intel_cursor_plane_create(struct intel_display *display,
enum pipe pipe);
void intel_cursor_unpin_work(struct kthread_work *base);
+void intel_cursor_mode_config_init(struct intel_display *display);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index cc734079c3b8..d98b4cf6b60e 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -5,22 +5,25 @@
#include <linux/log2.h>
#include <linux/math64.h>
-#include "i915_reg.h"
+
+#include <drm/drm_print.h>
+
+#include "intel_alpm.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
+#include "intel_lt_phy.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_snps_hdmi_pll.h"
#include "intel_tc.h"
-#define MB_WRITE_COMMITTED true
-#define MB_WRITE_UNCOMMITTED false
-
#define for_each_cx0_lane_in_mask(__lane_mask, __lane) \
for ((__lane) = 0; (__lane) < 2; (__lane)++) \
for_each_if((__lane_mask) & BIT(__lane))
@@ -31,13 +34,17 @@
bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- if (IS_PANTHERLAKE(i915) && phy == PHY_A)
- return true;
+ if (display->platform.pantherlake) {
+ if (display->platform.pantherlake_wildcatlake)
+ return phy <= PHY_B;
+ else
+ return phy == PHY_A;
+ }
- if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
+ if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
return true;
return false;
@@ -70,10 +77,9 @@ static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder)
static void
assert_dc_off(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
bool enabled;
- enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF);
+ enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF);
drm_WARN_ON(display->drm, !enabled);
}
@@ -100,12 +106,12 @@ static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder)
*/
static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder)
{
- intel_wakeref_t wakeref;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_wakeref_t wakeref;
intel_psr_pause(intel_dp);
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_DC_OFF);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
intel_cx0_program_msgbus_timer(encoder);
return wakeref;
@@ -113,15 +119,15 @@ static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *enc
static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_psr_resume(intel_dp);
- intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
-static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
- int lane)
+void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
struct intel_display *display = to_intel_display(encoder);
@@ -130,7 +136,7 @@ static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
-static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
+void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -139,9 +145,9 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_RESET,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_RESET,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_err_once(display->drm,
"Failed to bring PHY %c to idle.\n",
phy_name(phy));
@@ -151,19 +157,17 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
intel_clear_response_ready_flag(encoder, lane);
}
-static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
- int command, int lane, u32 *val)
+int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
+ int command, int lane, u32 *val)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
- if (intel_de_wait_custom(display,
- XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane),
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_MSGBUS_TIMEOUT_FAST_US,
- XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
+ if (intel_de_wait_ms(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_MSGBUS_TIMEOUT_MS, val)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
@@ -208,9 +212,9 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
int ack;
u32 val;
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -263,8 +267,7 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder,
return 0;
}
-static u8 intel_cx0_read(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr)
+u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
{
int lane = lane_mask_to_lane(lane_mask);
@@ -280,9 +283,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
int ack;
u32 val;
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -296,9 +299,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
XELPDP_PORT_M2P_DATA(data) |
XELPDP_PORT_M2P_ADDRESS(addr));
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -351,8 +354,8 @@ static void __intel_cx0_write(struct intel_encoder *encoder,
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
}
-static void intel_cx0_write(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr, u8 data, bool committed)
+void intel_cx0_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed)
{
int lane;
@@ -404,8 +407,8 @@ static void __intel_cx0_rmw(struct intel_encoder *encoder,
__intel_cx0_write(encoder, lane, addr, val, committed);
}
-static void intel_cx0_rmw(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
+void intel_cx0_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
{
u8 lane;
@@ -2001,19 +2004,6 @@ static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = {
NULL,
};
-static int intel_c10_phy_check_hdmi_link_rate(int clock)
-{
- const struct intel_c10pll_state * const *tables = mtl_c10_hdmi_tables;
- int i;
-
- for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->clock)
- return MODE_OK;
- }
-
- return MODE_CLOCK_RANGE;
-}
-
static const struct intel_c10pll_state * const *
intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
@@ -2031,21 +2021,25 @@ intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
return NULL;
}
-static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static void intel_cx0pll_update_ssc(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state, bool is_dp)
{
struct intel_display *display = to_intel_display(encoder);
- struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll;
- int i;
- if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (is_dp) {
if (intel_panel_use_ssc(display)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
pll_state->ssc_enabled =
(intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
}
}
+}
+
+static void intel_c10pll_update_pll(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ int i;
if (pll_state->ssc_enabled)
return;
@@ -2055,29 +2049,58 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
pll_state->c10.pll[i] = 0;
}
+static int intel_c10pll_calc_state_from_table(struct intel_encoder *encoder,
+ const struct intel_c10pll_state * const *tables,
+ bool is_dp, int port_clock,
+ struct intel_cx0pll_state *pll_state)
+{
+ int i;
+
+ for (i = 0; tables[i]; i++) {
+ if (port_clock == tables[i]->clock) {
+ pll_state->c10 = *tables[i];
+ intel_cx0pll_update_ssc(encoder, pll_state, is_dp);
+ intel_c10pll_update_pll(encoder, pll_state);
+ pll_state->use_c10 = true;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
const struct intel_c10pll_state * const *tables;
- int i;
+ int err;
tables = intel_c10pll_tables_get(crtc_state, encoder);
if (!tables)
return -EINVAL;
- for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->dpll_hw_state.cx0pll.c10 = *tables[i];
- intel_c10pll_update_pll(crtc_state, encoder);
- crtc_state->dpll_hw_state.cx0pll.use_c10 = true;
+ err = intel_c10pll_calc_state_from_table(encoder, tables,
+ intel_crtc_has_dp_encoder(crtc_state),
+ crtc_state->port_clock,
+ &crtc_state->dpll_hw_state.cx0pll);
- return 0;
- }
- }
+ if (err == 0 || !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return err;
- return -EINVAL;
+ /* For HDMI PLLs try SNPS PHY algorithm, if there are no precomputed tables */
+ intel_snps_hdmi_pll_compute_c10pll(&crtc_state->dpll_hw_state.cx0pll.c10,
+ crtc_state->port_clock);
+ intel_c10pll_update_pll(encoder,
+ &crtc_state->dpll_hw_state.cx0pll);
+ crtc_state->dpll_hw_state.cx0pll.use_c10 = true;
+
+ return 0;
}
+static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state);
+
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c10pll_state *pll_state)
{
@@ -2102,27 +2125,20 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0));
intel_cx0_phy_transaction_end(encoder, wakeref);
+
+ pll_state->clock = intel_c10pll_calc_port_clock(encoder, pll_state);
}
static void intel_c10_pll_program(struct intel_display *display,
- const struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+ struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state)
{
- const struct intel_c10pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c10;
int i;
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
- /* Custom width needs to be programmed to 0 for both the phy lanes */
- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
- C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
- MB_WRITE_COMMITTED);
- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
- 0, C10_VDR_CTRL_UPDATE_CFG,
- MB_WRITE_COMMITTED);
-
/* Program the pll values only for the master lane */
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
@@ -2132,6 +2148,10 @@ static void intel_c10_pll_program(struct intel_display *display,
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+ /* Custom width needs to be programmed to 0 for both the phy lanes */
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+ C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
+ MB_WRITE_COMMITTED);
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
@@ -2175,9 +2195,47 @@ static void intel_c10pll_dump_hw_state(struct intel_display *display,
i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]);
}
-static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
+/*
+ * Some ARLs SoCs have the same drm PCI IDs, so need a helper to differentiate based
+ * on the host bridge device ID to get the correct txx_mics value.
+ */
+static bool is_arrowlake_s_by_host_bridge(void)
+{
+ struct pci_dev *pdev = NULL;
+ u16 host_bridge_pci_dev_id;
+
+ while ((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, pdev)))
+ host_bridge_pci_dev_id = pdev->device;
+
+ return pdev && IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(host_bridge_pci_dev_id);
+}
+
+static u16 intel_c20_hdmi_tmds_tx_cgf_1(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ u16 tx_misc;
+ u16 tx_dcc_cal_dac_ctrl_range = 8;
+ u16 tx_term_ctrl = 2;
+
+ if (DISPLAY_VER(display) >= 20) {
+ tx_misc = 5;
+ tx_term_ctrl = 4;
+ } else if (display->platform.battlemage) {
+ tx_misc = 0;
+ } else if (display->platform.meteorlake_u ||
+ is_arrowlake_s_by_host_bridge()) {
+ tx_misc = 3;
+ } else {
+ tx_misc = 7;
+ }
+
+ return (C20_PHY_TX_MISC(tx_misc) |
+ C20_PHY_TX_DCC_CAL_RANGE(tx_dcc_cal_dac_ctrl_range) |
+ C20_PHY_TX_DCC_BYPASS | C20_PHY_TX_TERM_CTL(tx_term_ctrl));
+}
+
+static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
+{
struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
u64 datarate;
u64 mpll_tx_clk_div;
@@ -2187,7 +2245,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
u64 mpll_multiplier;
u64 mpll_fracn_quot;
u64 mpll_fracn_rem;
- u16 tx_misc;
u8 mpllb_ana_freq_vco;
u8 mpll_div_multiplier;
@@ -2207,11 +2264,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)),
datarate), 255);
- if (DISPLAY_VER(display) >= 20)
- tx_misc = 0x5;
- else
- tx_misc = 0x0;
-
if (vco_freq <= DATARATE_3000000000)
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3;
else if (vco_freq <= DATARATE_3500000000)
@@ -2223,7 +2275,7 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
pll_state->clock = crtc_state->port_clock;
pll_state->tx[0] = 0xbe88;
- pll_state->tx[1] = 0x9800 | C20_PHY_TX_MISC(tx_misc);
+ pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(crtc_state);
pll_state->tx[2] = 0x0000;
pll_state->cmn[0] = 0x0500;
pll_state->cmn[1] = 0x0005;
@@ -2251,31 +2303,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
return 0;
}
-static int intel_c20_phy_check_hdmi_link_rate(int clock)
-{
- const struct intel_c20pll_state * const *tables = mtl_c20_hdmi_tables;
- int i;
-
- for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->clock)
- return MODE_OK;
- }
-
- if (clock >= 25175 && clock <= 594000)
- return MODE_OK;
-
- return MODE_CLOCK_RANGE;
-}
-
-int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
-{
- struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
-
- if (intel_encoder_is_c10phy(&dig_port->base))
- return intel_c10_phy_check_hdmi_link_rate(clock);
- return intel_c20_phy_check_hdmi_link_rate(clock);
-}
-
static const struct intel_c20pll_state * const *
intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
@@ -2324,6 +2351,9 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->dpll_hw_state.cx0pll.c20 = *tables[i];
+ intel_cx0pll_update_ssc(encoder,
+ &crtc_state->dpll_hw_state.cx0pll,
+ intel_crtc_has_dp_encoder(crtc_state));
crtc_state->dpll_hw_state.cx0pll.use_c10 = false;
return 0;
}
@@ -2555,20 +2585,6 @@ static bool is_dp2(u32 clock)
return false;
}
-static bool is_hdmi_frl(u32 clock)
-{
- switch (clock) {
- case 300000: /* 3 Gbps */
- case 600000: /* 6 Gbps */
- case 800000: /* 8 Gbps */
- case 1000000: /* 10 Gbps */
- case 1200000: /* 12 Gbps */
- return true;
- default:
- return false;
- }
-}
-
static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -2582,28 +2598,25 @@ static int intel_get_c20_custom_width(u32 clock, bool dp)
{
if (dp && is_dp2(clock))
return 2;
- else if (is_hdmi_frl(clock))
+ else if (intel_hdmi_is_frl(clock))
return 1;
else
return 0;
}
static void intel_c20_pll_program(struct intel_display *display,
- const struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+ struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state,
+ bool is_dp, int port_clock)
{
- const struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
- bool dp = false;
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
- u32 clock = crtc_state->port_clock;
+ u8 serdes;
bool cntx;
int i;
- if (intel_crtc_has_dp_encoder(crtc_state))
- dp = true;
-
/* 1. Read current context selection */
- cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) &
+ PHY_C20_CONTEXT_TOGGLE;
/*
* 2. If there is a protocol switch from HDMI to DP or vice versa, clear
@@ -2669,32 +2682,35 @@ static void intel_c20_pll_program(struct intel_display *display,
/* 4. Program custom width to match the link protocol */
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_WIDTH,
PHY_C20_CUSTOM_WIDTH_MASK,
- PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
+ PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(port_clock, is_dp)),
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
- if (dp) {
- intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
- BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
- MB_WRITE_COMMITTED);
- } else {
- intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
- is_hdmi_frl(clock) ? BIT(7) : 0,
- MB_WRITE_COMMITTED);
+ serdes = 0;
+ if (is_dp)
+ serdes = PHY_C20_IS_DP |
+ PHY_C20_DP_RATE(intel_c20_get_dp_rate(port_clock));
+ else if (intel_hdmi_is_frl(port_clock))
+ serdes = PHY_C20_IS_HDMI_FRL;
- intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
- intel_c20_get_hdmi_rate(clock),
- MB_WRITE_COMMITTED);
- }
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ PHY_C20_IS_DP | PHY_C20_DP_RATE_MASK | PHY_C20_IS_HDMI_FRL,
+ serdes,
+ MB_WRITE_COMMITTED);
+
+ if (!is_dp)
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
+ PHY_C20_HDMI_RATE_MASK,
+ intel_c20_get_hdmi_rate(port_clock),
+ MB_WRITE_COMMITTED);
/*
* 7. Write Vendor specific registers to toggle context setting to load
* the updated programming toggle context bit
*/
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
+ PHY_C20_CONTEXT_TOGGLE, cntx ? 0 : PHY_C20_CONTEXT_TOGGLE,
+ MB_WRITE_COMMITTED);
}
static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
@@ -2725,7 +2741,8 @@ static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
}
static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
+ const struct intel_cx0pll_state *pll_state,
+ bool is_dp, int port_clock,
bool lane_reversal)
{
struct intel_display *display = to_intel_display(encoder);
@@ -2740,22 +2757,21 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- is_hdmi_frl(crtc_state->port_clock))
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ if (!is_dp && intel_hdmi_is_frl(port_clock))
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
- if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
- val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
+ if (port_clock == 1000000 || port_clock == 2000000)
+ val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
else
- val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+ val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
}
@@ -2781,8 +2797,8 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
return val;
}
-static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
- u8 lane_mask, u8 state)
+void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -2796,9 +2812,9 @@ static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
/* Wait for pending transactions.*/
for_each_cx0_lane_in_mask(lane_mask, lane)
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
phy_name(phy));
@@ -2810,26 +2826,26 @@ static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (intel_de_wait_custom(display, buf_ctl2_reg,
- intel_cx0_get_powerdown_update(lane_mask), 0,
- XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_for_clear_ms(display, buf_ctl2_reg,
+ intel_cx0_get_powerdown_update(lane_mask),
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
}
-static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
+void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
XELPDP_POWER_STATE_READY_MASK,
- XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
+ XELPDP_POWER_STATE_READY(XELPDP_P2_STATE_READY));
intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port),
XELPDP_POWER_STATE_ACTIVE_MASK |
XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
- XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
+ XELPDP_POWER_STATE_ACTIVE(XELPDP_P0_STATE_ACTIVE) |
XELPDP_PLL_LANE_STAGGERING_DELAY(0));
}
@@ -2871,48 +2887,47 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL1(display, port),
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US))
drm_warn(display->drm,
- "PHY %c failed to bring out of SOC reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
+ "PHY %c failed to bring out of SOC reset\n",
+ phy_name(phy));
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset,
lane_pipe_reset);
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_current_status, lane_phy_current_status,
- XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XELPDP_PORT_RESET_START_TIMEOUT_US))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
- intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
- intel_cx0_get_pclk_refclk_ack(lane_mask),
- XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
+ intel_cx0_get_pclk_refclk_ack(lane_mask),
+ XELPDP_REFCLK_ENABLE_TIMEOUT_US, NULL))
drm_warn(display->drm,
- "PHY %c failed to request refclk after %dus.\n",
- phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
+ "PHY %c failed to request refclk\n",
+ phy_name(phy));
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
- CX0_P2_STATE_RESET);
+ XELPDP_P2_STATE_RESET);
intel_cx0_setup_powerdown(encoder);
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0);
- if (intel_de_wait_for_clear(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_current_status,
- XELPDP_PORT_RESET_END_TIMEOUT))
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XELPDP_PORT_RESET_END_TIMEOUT_MS))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dms.\n",
- phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
}
static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count,
@@ -2981,8 +2996,9 @@ static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask)
return val;
}
-static void intel_cx0pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void __intel_cx0pll_enable(struct intel_encoder *encoder,
+ const struct intel_cx0pll_state *pll_state,
+ bool is_dp, int port_clock, int lane_count)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -2996,7 +3012,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* 1. Program PORT_CLOCK_CTL REGISTER to configure
* clock muxes, gating and SSC
*/
- intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
+ intel_program_port_clock_ctl(encoder, pll_state, is_dp, port_clock, lane_reversal);
/* 2. Bring PHY out of reset. */
intel_cx0_phy_lane_reset(encoder, lane_reversal);
@@ -3006,7 +3022,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* TODO: For DP alt mode use only one lane.
*/
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
- CX0_P2_STATE_READY);
+ XELPDP_P2_STATE_READY);
/*
* 4. Program PORT_MSGBUS_TIMER register's Message Bus Timer field to 0xA000.
@@ -3016,15 +3032,15 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
/* 5. Program PHY internal PLL internal registers. */
if (intel_encoder_is_c10phy(encoder))
- intel_c10_pll_program(display, crtc_state, encoder);
+ intel_c10_pll_program(display, encoder, &pll_state->c10);
else
- intel_c20_pll_program(display, crtc_state, encoder);
+ intel_c20_pll_program(display, encoder, &pll_state->c20, is_dp, port_clock);
/*
* 6. Program the enabled and disabled owned PHY lane
* transmitters over message bus
*/
- intel_cx0_program_phy_lane(encoder, crtc_state->lane_count, lane_reversal);
+ intel_cx0_program_phy_lane(encoder, lane_count, lane_reversal);
/*
* 7. Follow the Display Voltage Frequency Switching - Sequence
@@ -3035,8 +3051,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* 8. Program DDI_CLK_VALFREQ to match intended DDI
* clock frequency.
*/
- intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
- crtc_state->port_clock);
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), port_clock);
/*
* 9. Set PORT_CLOCK_CTL register PCLK PLL Request
@@ -3047,12 +3062,12 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
- intel_cx0_get_pclk_pll_ack(maxpclk_lane),
- XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
- drm_warn(display->drm, "Port %c PLL not locked after %dus.\n",
- phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
+ if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_pll_ack(maxpclk_lane),
+ XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, NULL))
+ drm_warn(display->drm, "Port %c PLL not locked\n",
+ phy_name(phy));
/*
* 11. Follow the Display Voltage Frequency Switching Sequence After
@@ -3063,6 +3078,14 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_phy_transaction_end(encoder, wakeref);
}
+static void intel_cx0pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ __intel_cx0pll_enable(encoder, &crtc_state->dpll_hw_state.cx0pll,
+ intel_crtc_has_dp_encoder(crtc_state),
+ crtc_state->port_clock, crtc_state->lane_count);
+}
+
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
@@ -3070,7 +3093,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
@@ -3085,13 +3108,18 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
return 540000;
case XELPDP_DDI_CLOCK_SELECT_TBT_810:
return 810000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_312_5:
+ return 1000000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_625:
+ return 2000000;
default:
MISSING_CASE(clock);
return 162000;
}
}
-static int intel_mtl_tbt_clock_select(int clock)
+static int intel_mtl_tbt_clock_select(struct intel_display *display,
+ int clock)
{
switch (clock) {
case 162000:
@@ -3102,27 +3130,46 @@ static int intel_mtl_tbt_clock_select(int clock)
return XELPDP_DDI_CLOCK_SELECT_TBT_540;
case 810000:
return XELPDP_DDI_CLOCK_SELECT_TBT_810;
+ case 1000000:
+ if (DISPLAY_VER(display) < 30) {
+ drm_WARN_ON(display->drm, "UHBR10 not supported for the platform\n");
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+ return XELPDP_DDI_CLOCK_SELECT_TBT_312_5;
+ case 2000000:
+ if (DISPLAY_VER(display) < 30) {
+ drm_WARN_ON(display->drm, "UHBR20 not supported for the platform\n");
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+ return XELPDP_DDI_CLOCK_SELECT_TBT_625;
default:
MISSING_CASE(clock);
return XELPDP_DDI_CLOCK_SELECT_TBT_162;
}
}
-static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val = 0;
+ u32 mask;
/*
* 1. Program PORT_CLOCK_CTL REGISTER to configure
* clock muxes, gating and SSC
*/
- val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(crtc_state->port_clock));
+
+ mask = XELPDP_DDI_CLOCK_SELECT_MASK(display);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display,
+ intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
+
+ mask |= XELPDP_FORWARD_CLOCK_UNGATE;
val |= XELPDP_FORWARD_CLOCK_UNGATE;
+
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
+ mask, val);
/* 2. Read back PORT_CLOCK_CTL REGISTER */
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
@@ -3139,12 +3186,9 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_TBT_CLOCK_ACK,
- XELPDP_TBT_CLOCK_ACK,
- 100, 0, NULL))
- drm_warn(display->drm,
- "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 100))
+ drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
/*
@@ -3171,19 +3215,58 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
intel_cx0pll_enable(encoder, crtc_state);
}
+/*
+ * According to HAS we need to enable MAC Transmitting LFPS in the "PHY Common
+ * Control 0" PIPE register in case of AUX Less ALPM is going to be used. This
+ * function is doing that and is called by link retrain sequence.
+ */
+void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ intel_wakeref_t wakeref;
+ int i;
+ u8 owned_lane_mask;
+
+ if (DISPLAY_VER(display) < 20 ||
+ !intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state))
+ return;
+
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+
+ wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0,
+ C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+
+ for (i = 0; i < 4; i++) {
+ int tx = i % 2 + 1;
+ u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
+
+ if (!(owned_lane_mask & lane_mask))
+ continue;
+
+ intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0),
+ CONTROL0_MAC_TRANSMIT_LFPS,
+ CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED);
+ }
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_encoder_is_c10phy(encoder))
- return CX0_P2PG_STATE_DISABLE;
+ return XELPDP_P2PG_STATE_DISABLE;
- if ((IS_BATTLEMAGE(i915) && encoder->port == PORT_A) ||
+ if ((display->platform.battlemage && encoder->port == PORT_A) ||
(DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP))
- return CX0_P2PG_STATE_DISABLE;
+ return XELPDP_P2PG_STATE_DISABLE;
- return CX0_P4PG_STATE_DISABLE;
+ return XELPDP_P4PG_STATE_DISABLE;
}
static void intel_cx0pll_disable(struct intel_encoder *encoder)
@@ -3215,13 +3298,12 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
- intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
- XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
- drm_warn(display->drm,
- "Port %c PLL not unlocked after %dus.\n",
- phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
+ intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES),
+ XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US))
+ drm_warn(display->drm, "Port %c PLL not unlocked\n",
+ phy_name(phy));
/*
* 6. Follow the Display Voltage Frequency Switching Sequence After
@@ -3230,14 +3312,24 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK, 0);
+ XELPDP_DDI_CLOCK_SELECT_MASK(display), 0);
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
intel_cx0_phy_transaction_end(encoder, wakeref);
}
-static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
+static bool intel_cx0_pll_is_enabled(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ u8 lane = dig_port->lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0;
+
+ return intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)) &
+ intel_cx0_get_pclk_pll_request(lane);
+}
+
+void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -3254,10 +3346,9 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
- drm_warn(display->drm,
- "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 10))
+ drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
/*
@@ -3269,7 +3360,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
/* 6. Program DDI_CLK_VALFREQ to 0. */
@@ -3298,7 +3389,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
* handling is done via the standard shared DPLL framework.
*/
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK)
@@ -3476,7 +3567,7 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_encoder *encoder;
struct intel_cx0pll_state mpll_hw_state = {};
- if (DISPLAY_VER(display) < 14)
+ if (!IS_DISPLAY_VER(display, 14, 30))
return;
if (!new_crtc_state->hw.active)
@@ -3498,3 +3589,54 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
else
intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
}
+
+/*
+ * WA 14022081154
+ * The dedicated display PHYs reset to a power state that blocks S0ix, increasing idle
+ * system power. After a system reset (cold boot, S3/4/5, warm reset) if a dedicated
+ * PHY is not being brought up shortly, use these steps to move the PHY to the lowest
+ * power state to save power. For PTL the workaround is needed only for port A. Port B
+ * is not connected.
+ *
+ * 1. Follow the PLL Enable Sequence, using any valid frequency such as DP 1.62 GHz.
+ * This brings lanes out of reset and enables the PLL to allow powerdown to be moved
+ * to the Disable state.
+ * 2. Follow PLL Disable Sequence. This moves powerdown to the Disable state and disables the PLL.
+ */
+void intel_cx0_pll_power_save_wa(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) != 30)
+ return;
+
+ for_each_intel_encoder(display->drm, encoder) {
+ struct intel_cx0pll_state pll_state = {};
+ int port_clock = 162000;
+
+ if (!intel_encoder_is_dig_port(encoder))
+ continue;
+
+ if (!intel_encoder_is_c10phy(encoder))
+ continue;
+
+ if (intel_cx0_pll_is_enabled(encoder))
+ continue;
+
+ if (intel_c10pll_calc_state_from_table(encoder,
+ mtl_c10_edp_tables,
+ true, port_clock,
+ &pll_state) < 0) {
+ drm_WARN_ON(display->drm,
+ "Unable to calc C10 state from the tables\n");
+ continue;
+ }
+
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Applying power saving workaround on disabled PLL\n",
+ encoder->base.base.id, encoder->base.name);
+
+ __intel_cx0pll_enable(encoder, &pll_state, true, port_clock, 4);
+ intel_cx0pll_disable(encoder);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index 711168882684..84d334b865f7 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -8,6 +8,9 @@
#include <linux/types.h>
+#define MB_WRITE_COMMITTED true
+#define MB_WRITE_UNCOMMITTED false
+
enum icl_port_dpll_id;
struct intel_atomic_state;
struct intel_c10pll_state;
@@ -19,6 +22,8 @@ struct intel_display;
struct intel_encoder;
struct intel_hdmi;
+void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane);
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
@@ -41,7 +46,25 @@ bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state);
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
+void intel_cx0_setup_powerdown(struct intel_encoder *encoder);
+bool intel_cx0_is_hdmi_frl(u32 clock);
+u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr);
+void intel_cx0_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed);
+void intel_cx0_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed);
+int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
+ int command, int lane, u32 *val);
+void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
+void intel_cx0_pll_power_save_wa(struct intel_display *display);
+void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index c685479c9756..8df5cd5ce418 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -6,8 +6,13 @@
#ifndef __INTEL_CX0_PHY_REGS_H__
#define __INTEL_CX0_PHY_REGS_H__
-#include "i915_reg_defs.h"
#include "intel_display_limits.h"
+#include "intel_display_reg_defs.h"
+
+/* DDI Buffer Control */
+#define _DDI_CLK_VALFREQ_A 0x64030
+#define _DDI_CLK_VALFREQ_B 0x64130
+#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
/*
* Wrapper macro to convert from port number to the index used in some of the
@@ -45,6 +50,7 @@
#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
#define XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
#define XELPDP_PORT_M2P_COMMAND_READ REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
+#define XELPDP_PORT_P2P_TRANSACTION_PENDING REG_BIT(24)
#define XELPDP_PORT_M2P_DATA_MASK REG_GENMASK(23, 16)
#define XELPDP_PORT_M2P_DATA(val) REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15)
@@ -68,14 +74,13 @@
#define XELPDP_PORT_P2M_DATA(val) REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)
#define XELPDP_PORT_P2M_ERROR_SET REG_BIT(15)
-#define XELPDP_MSGBUS_TIMEOUT_SLOW 1
-#define XELPDP_MSGBUS_TIMEOUT_FAST_US 2
+#define XELPDP_MSGBUS_TIMEOUT_MS 1
#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US 3200
#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US 20
#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US 100
#define XELPDP_PORT_RESET_START_TIMEOUT_US 5
-#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US 100
-#define XELPDP_PORT_RESET_END_TIMEOUT 15
+#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS 2
+#define XELPDP_PORT_RESET_END_TIMEOUT_MS 15
#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 1
#define _XELPDP_PORT_BUF_CTL1_LN0_A 0x64004
@@ -99,13 +104,16 @@
#define XELPDP_PORT_BUF_PORT_DATA_20BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 1)
#define XELPDP_PORT_BUF_PORT_DATA_40BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 2)
#define XELPDP_PORT_REVERSAL REG_BIT(16)
+#define XE3PLPDP_PHY_MODE_MASK REG_GENMASK(15, 12)
+#define XE3PLPDP_PHY_MODE_DP REG_FIELD_PREP(XE3PLPDP_PHY_MODE_MASK, 0x3)
#define XELPDP_PORT_BUF_IO_SELECT_TBT REG_BIT(11)
#define XELPDP_PORT_BUF_PHY_IDLE REG_BIT(7)
#define XELPDP_TC_PHY_OWNERSHIP REG_BIT(6)
#define XELPDP_TCSS_POWER_REQUEST REG_BIT(5)
#define XELPDP_TCSS_POWER_STATE REG_BIT(4)
#define XELPDP_PORT_WIDTH_MASK REG_GENMASK(3, 1)
-#define XELPDP_PORT_WIDTH(val) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
+#define XELPDP_PORT_WIDTH(width) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, \
+ ((width) == 3 ? 4 : (width) - 1))
#define _XELPDP_PORT_BUF_CTL2(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
@@ -118,6 +126,7 @@
_XELPDP_PORT_BUF_CTL2(port))
#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30))
#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28))
+#define XE3PLPDP_LANE_PHY_PULSE_STATUS(lane) _PICK(lane, REG_BIT(27), REG_BIT(26))
#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24))
#define _XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK REG_GENMASK(23, 20)
#define _XELPDP_LANE0_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val)
@@ -143,11 +152,12 @@
#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0)
#define XELPDP_POWER_STATE_ACTIVE(val) REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)
-#define CX0_P0_STATE_ACTIVE 0x0
-#define CX0_P2_STATE_READY 0x2
-#define CX0_P2PG_STATE_DISABLE 0x9
-#define CX0_P4PG_STATE_DISABLE 0xC
-#define CX0_P2_STATE_RESET 0x2
+#define XELPDP_P0_STATE_ACTIVE 0x0
+#define XELPDP_P2_STATE_READY 0x2
+#define XE3PLPD_P4_STATE_DISABLE 0x4
+#define XELPDP_P2PG_STATE_DISABLE 0x9
+#define XELPDP_P4PG_STATE_DISABLE 0xC
+#define XELPDP_P2_STATE_RESET 0x2
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_A 0x640d8
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8
@@ -186,8 +196,17 @@
#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
-#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
-#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
+#define _XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define _XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
+#define XELPDP_DDI_CLOCK_SELECT_MASK(display) (DISPLAY_VER(display) >= 30 ? \
+ _XE3_DDI_CLOCK_SELECT_MASK : _XELPDP_DDI_CLOCK_SELECT_MASK)
+#define XELPDP_DDI_CLOCK_SELECT_PREP(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_PREP(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_PREP(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+#define XELPDP_DDI_CLOCK_SELECT_GET(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_GET(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_GET(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+
#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
@@ -195,6 +214,8 @@
#define XELPDP_DDI_CLOCK_SELECT_TBT_270 0xd
#define XELPDP_DDI_CLOCK_SELECT_TBT_540 0xe
#define XELPDP_DDI_CLOCK_SELECT_TBT_810 0xf
+#define XELPDP_DDI_CLOCK_SELECT_TBT_312_5 0x18
+#define XELPDP_DDI_CLOCK_SELECT_TBT_625 0x19
#define XELPDP_FORWARD_CLOCK_UNGATE REG_BIT(10)
#define XELPDP_LANE1_PHY_CLOCK_SELECT REG_BIT(8)
#define XELPDP_SSC_ENABLE_PLLA REG_BIT(1)
@@ -209,10 +230,34 @@
/* C10 Vendor Registers */
#define PHY_C10_VDR_PLL(idx) (0xC00 + (idx))
+#define C10_PLL0_SSC_EN REG_BIT8(0)
+#define C10_PLL0_DIVCLK_EN REG_BIT8(1)
+#define C10_PLL0_DIV5CLK_EN REG_BIT8(2)
+#define C10_PLL0_WORDDIV2_EN REG_BIT8(3)
#define C10_PLL0_FRACEN REG_BIT8(4)
+#define C10_PLL0_PMIX_EN REG_BIT8(5)
+#define C10_PLL0_ANA_FREQ_VCO_MASK REG_GENMASK8(7, 6)
+#define C10_PLL1_DIV_MULTIPLIER_MASK REG_GENMASK8(7, 0)
+#define C10_PLL2_MULTIPLIERL_MASK REG_GENMASK8(7, 0)
#define C10_PLL3_MULTIPLIERH_MASK REG_GENMASK8(3, 0)
+#define C10_PLL8_SSC_UP_SPREAD REG_BIT8(5)
+#define C10_PLL9_FRACN_DENL_MASK REG_GENMASK8(7, 0)
+#define C10_PLL10_FRACN_DENH_MASK REG_GENMASK8(7, 0)
+#define C10_PLL11_FRACN_QUOT_L_MASK REG_GENMASK8(7, 0)
+#define C10_PLL12_FRACN_QUOT_H_MASK REG_GENMASK8(7, 0)
+#define C10_PLL13_FRACN_REM_L_MASK REG_GENMASK8(7, 0)
+#define C10_PLL14_FRACN_REM_H_MASK REG_GENMASK8(7, 0)
#define C10_PLL15_TXCLKDIV_MASK REG_GENMASK8(2, 0)
#define C10_PLL15_HDMIDIV_MASK REG_GENMASK8(5, 3)
+#define C10_PLL15_PIXELCLKDIV_MASK REG_GENMASK8(7, 6)
+#define C10_PLL16_ANA_CPINT REG_GENMASK8(6, 0)
+#define C10_PLL16_ANA_CPINTGS_L REG_BIT8(7)
+#define C10_PLL17_ANA_CPINTGS_H_MASK REG_GENMASK8(5, 0)
+#define C10_PLL17_ANA_CPPROP_L_MASK REG_GENMASK8(7, 6)
+#define C10_PLL18_ANA_CPPROP_H_MASK REG_GENMASK8(4, 0)
+#define C10_PLL18_ANA_CPPROPGS_L_MASK REG_GENMASK8(7, 5)
+#define C10_PLL19_ANA_CPPROPGS_H_MASK REG_GENMASK8(3, 0)
+#define C10_PLL19_ANA_V2I_MASK REG_GENMASK8(5, 4)
#define PHY_C10_VDR_CMN(idx) (0xC20 + (idx))
#define C10_CMN0_REF_RANGE REG_FIELD_PREP(REG_GENMASK(4, 0), 1)
@@ -244,6 +289,9 @@
#define PHY_CX0_TX_CONTROL(tx, control) (0x400 + ((tx) - 1) * 0x200 + (control))
#define CONTROL2_DISABLE_SINGLE_TX REG_BIT(6)
+#define PHY_CMN1_CONTROL(tx, control) (0x800 + ((tx) - 1) * 0x200 + (control))
+#define CONTROL0_MAC_TRANSMIT_LFPS REG_BIT(1)
+
/* C20 Registers */
#define PHY_C20_WR_ADDRESS_L 0xC02
#define PHY_C20_WR_ADDRESS_H 0xC03
@@ -254,10 +302,14 @@
#define PHY_C20_RD_DATA_L 0xC08
#define PHY_C20_RD_DATA_H 0xC09
#define PHY_C20_VDR_CUSTOM_SERDES_RATE 0xD00
-#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_IS_HDMI_FRL REG_BIT8(7)
+#define PHY_C20_IS_DP REG_BIT8(6)
+#define PHY_C20_DP_RATE_MASK REG_GENMASK8(4, 1)
+#define PHY_C20_DP_RATE(val) REG_FIELD_PREP8(PHY_C20_DP_RATE_MASK, val)
#define PHY_C20_CONTEXT_TOGGLE REG_BIT8(0)
-#define PHY_C20_CUSTOM_SERDES_MASK REG_GENMASK8(4, 1)
-#define PHY_C20_CUSTOM_SERDES(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_SERDES_MASK, val)
+#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_HDMI_RATE_MASK REG_GENMASK8(1, 0)
+#define PHY_C20_HDMI_RATE(val) REG_FIELD_PREP8(PHY_C20_HDMI_RATE_MASK, val)
#define PHY_C20_VDR_CUSTOM_WIDTH 0xD02
#define PHY_C20_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0)
#define PHY_C20_CUSTOM_WIDTH(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_WIDTH_MASK, val)
@@ -289,6 +341,12 @@
#define C20_PHY_TX_RATE REG_GENMASK(2, 0)
#define C20_PHY_TX_MISC_MASK REG_GENMASK16(7, 0)
#define C20_PHY_TX_MISC(val) REG_FIELD_PREP16(C20_PHY_TX_MISC_MASK, (val))
+#define C20_PHY_TX_DCC_CAL_RANGE_MASK REG_GENMASK16(11, 8)
+#define C20_PHY_TX_DCC_CAL_RANGE(val) \
+ REG_FIELD_PREP16(C20_PHY_TX_DCC_CAL_RANGE_MASK, (val))
+#define C20_PHY_TX_DCC_BYPASS REG_BIT(12)
+#define C20_PHY_TX_TERM_CTL_MASK REG_GENMASK16(15, 13)
+#define C20_PHY_TX_TERM_CTL(val) REG_FIELD_PREP16(C20_PHY_TX_TERM_CTL_MASK, (val))
#define PHY_C20_A_CMN_CNTX_CFG(i915, idx) \
((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_CMN_CNTX_CFG : _MTL_C20_A_CMN_CNTX_CFG) - (idx))
diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.c b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c
new file mode 100644
index 000000000000..8b8894c37f63
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "intel_dbuf_bw.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "skl_watermark.h"
+
+struct intel_dbuf_bw {
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
+};
+
+struct intel_dbuf_bw_state {
+ struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
+};
+
+struct intel_dbuf_bw_state *to_intel_dbuf_bw_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_dbuf_bw_state, base);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_old_global_obj_state(state, &display->dbuf_bw.obj);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_new_global_obj_state(state, &display->dbuf_bw.obj);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_global_obj_state(state, &display->dbuf_bw.obj);
+ if (IS_ERR(dbuf_bw_state))
+ return ERR_CAST(dbuf_bw_state);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_dbuf_bw_state_changed(struct intel_display *display,
+ const struct intel_dbuf_bw_state *old_dbuf_bw_state,
+ const struct intel_dbuf_bw_state *new_dbuf_bw_state)
+{
+ enum pipe pipe;
+
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
+ &old_dbuf_bw_state->dbuf_bw[pipe];
+ const struct intel_dbuf_bw *new_dbuf_bw =
+ &new_dbuf_bw_state->dbuf_bw[pipe];
+
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
+ struct intel_crtc *crtc,
+ enum plane_id plane_id,
+ const struct skl_ddb_entry *ddb,
+ unsigned int data_rate)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
+ enum dbuf_slice slice;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
+ }
+}
+
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb[plane_id],
+ crtc_state->data_rate[plane_id]);
+
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb_y[plane_id],
+ crtc_state->data_rate[plane_id]);
+ }
+}
+
+/* "Maximum Data Buffer Bandwidth" */
+int intel_dbuf_bw_min_cdclk(struct intel_display *display,
+ const struct intel_dbuf_bw_state *dbuf_bw_state)
+{
+ unsigned int total_max_bw = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ int num_active_planes = 0;
+ unsigned int max_bw = 0;
+ enum pipe pipe;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &dbuf_bw_state->dbuf_bw[pipe];
+
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
+ }
+ max_bw *= num_active_planes;
+
+ total_max_bw = max(total_max_bw, max_bw);
+ }
+
+ return DIV_ROUND_UP(total_max_bw, 64);
+}
+
+int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_dbuf_bw_state *new_dbuf_bw_state = NULL;
+ const struct intel_dbuf_bw_state *old_dbuf_bw_state = NULL;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
+ new_dbuf_bw_state = intel_atomic_get_dbuf_bw_state(state);
+ if (IS_ERR(new_dbuf_bw_state))
+ return PTR_ERR(new_dbuf_bw_state);
+
+ old_dbuf_bw_state = intel_atomic_get_old_dbuf_bw_state(state);
+
+ new_dbuf_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
+ }
+
+ if (!old_dbuf_bw_state)
+ return 0;
+
+ if (intel_dbuf_bw_state_changed(display, old_dbuf_bw_state, new_dbuf_bw_state)) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_cdclk_update_dbuf_bw_min_cdclk(state,
+ intel_dbuf_bw_min_cdclk(display, old_dbuf_bw_state),
+ intel_dbuf_bw_min_cdclk(display, new_dbuf_bw_state),
+ need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void intel_dbuf_bw_update_hw_state(struct intel_display *display)
+{
+ struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ skl_crtc_calc_dbuf_bw(&dbuf_bw_state->dbuf_bw[crtc->pipe], crtc_state);
+ }
+}
+
+void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ memset(&dbuf_bw_state->dbuf_bw[pipe], 0, sizeof(dbuf_bw_state->dbuf_bw[pipe]));
+}
+
+static struct intel_global_state *
+intel_dbuf_bw_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_bw_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ return &state->base;
+}
+
+static void intel_dbuf_bw_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_bw_funcs = {
+ .atomic_duplicate_state = intel_dbuf_bw_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_bw_destroy_state,
+};
+
+int intel_dbuf_bw_init(struct intel_display *display)
+{
+ struct intel_dbuf_bw_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(display, &display->dbuf_bw.obj,
+ &state->base, &intel_dbuf_bw_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.h b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h
new file mode 100644
index 000000000000..61875b9d5969
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_DBUF_BW_H__
+#define __INTEL_DBUF_BW_H__
+
+#include <drm/drm_atomic.h>
+
+struct intel_atomic_state;
+struct intel_dbuf_bw_state;
+struct intel_crtc;
+struct intel_display;
+struct intel_global_state;
+
+struct intel_dbuf_bw_state *
+to_intel_dbuf_bw_state(struct intel_global_state *obj_state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state);
+
+int intel_dbuf_bw_init(struct intel_display *display);
+int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc);
+int intel_dbuf_bw_min_cdclk(struct intel_display *display,
+ const struct intel_dbuf_bw_state *dbuf_bw_state);
+void intel_dbuf_bw_update_hw_state(struct intel_display *display);
+void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
+
+#endif /* __INTEL_DBUF_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 4f9c50996446..002ccd47856d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -26,14 +26,17 @@
*/
#include <linux/iopoll.h>
+#include <linux/seq_buf.h>
#include <linux/string_helpers.h>
+#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_consumer.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "icl_dsi.h"
+#include "intel_alpm.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
#include "intel_backlight.h"
@@ -47,7 +50,9 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
@@ -67,15 +72,20 @@
#include "intel_hotplug.h"
#include "intel_hti.h"
#include "intel_lspcon.h"
+#include "intel_lt_phy.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
+#include "intel_panel.h"
+#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
+#include "intel_step.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -104,14 +114,14 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
return level;
}
-static bool has_buf_trans_select(struct drm_i915_private *i915)
+static bool has_buf_trans_select(struct intel_display *display)
{
- return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) < 10 && !display->platform.broxton;
}
-static bool has_iboost(struct drm_i915_private *i915)
+static bool has_iboost(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) == 9 && !display->platform.broxton;
}
/*
@@ -122,25 +132,25 @@ static bool has_iboost(struct drm_i915_private *i915)
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, i),
trans->entries[i].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, i),
trans->entries[i].hsw.trans2);
}
}
@@ -153,7 +163,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
u32 iboost_bit = 0;
int n_entries;
@@ -161,88 +171,77 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, 9),
trans->entries[level].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, 9),
trans->entries[level].hsw.trans2);
}
-static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port)
+static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum port port)
{
- int ret;
-
- /* FIXME: find out why Bspec's 100us timeout is too short */
- ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port)) &
- XELPDP_PORT_BUF_PHY_IDLE), 10000);
- if (ret)
- drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n",
- port_name(port));
+ if (DISPLAY_VER(display) >= 14)
+ return XELPDP_PORT_BUF_CTL1(display, port);
+ else
+ return DDI_BUF_CTL(port);
}
-void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port)
+void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port)
{
- if (IS_BROXTON(dev_priv)) {
+ /*
+ * Bspec's platform specific timeouts:
+ * MTL+ : 100 us
+ * BXT : fixed 16 us
+ * HSW-ADL: 8 us
+ *
+ * FIXME: MTL requires 10 ms based on tests, find out why 100 us is too short
+ */
+ if (display->platform.broxton) {
udelay(16);
return;
}
- if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE), 8))
- drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n",
+ static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE);
+ if (intel_de_wait_for_set_ms(display, intel_ddi_buf_status_reg(display, port),
+ DDI_BUF_IS_IDLE, 10))
+ drm_err(display->drm, "Timeout waiting for DDI BUF %c to get idle\n",
port_name(port));
}
static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- int timeout_us;
- int ret;
- /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
- if (DISPLAY_VER(dev_priv) < 10) {
+ /*
+ * Bspec's platform specific timeouts:
+ * MTL+ : 10000 us
+ * DG2 : 1200 us
+ * TGL-ADL combo PHY: 1000 us
+ * TGL-ADL TypeC PHY: 3000 us
+ * HSW-ICL : fixed 518 us
+ */
+ if (DISPLAY_VER(display) < 10) {
usleep_range(518, 1000);
return;
}
- if (DISPLAY_VER(dev_priv) >= 14) {
- timeout_us = 10000;
- } else if (IS_DG2(dev_priv)) {
- timeout_us = 1200;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_encoder_is_tc(encoder))
- timeout_us = 3000;
- else
- timeout_us = 1000;
- } else {
- timeout_us = 500;
- }
-
- if (DISPLAY_VER(dev_priv) >= 14)
- ret = _wait_for(!(intel_de_read(dev_priv,
- XELPDP_PORT_BUF_CTL1(dev_priv, port)) &
- XELPDP_PORT_BUF_PHY_IDLE),
- timeout_us, 10, 10);
- else
- ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE),
- timeout_us, 10, 10);
-
- if (ret)
- drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
+ static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE);
+ if (intel_de_wait_for_clear_ms(display, intel_ddi_buf_status_reg(display, port),
+ DDI_BUF_IS_IDLE, 10))
+ drm_err(display->drm, "Timeout waiting for DDI BUF %c to get active\n",
port_name(port));
}
-static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
+static u32 hsw_pll_to_ddi_pll_sel(const struct intel_dpll *pll)
{
switch (pll->info->id) {
case DPLL_ID_WRPLL1:
@@ -266,7 +265,7 @@ static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
int clock = crtc_state->port_clock;
const enum intel_dpll_id id = pll->info->id;
@@ -327,10 +326,32 @@ static u32 ddi_buf_phy_link_rate(int port_clock)
}
}
+static int dp_phy_lane_stagger_delay(int port_clock)
+{
+ /*
+ * Return the number of symbol clocks delay used to stagger the
+ * assertion/desassertion of the port lane enables. The target delay
+ * time is 100 ns or greater, return the number of symbols specific to
+ * the provided port_clock (aka link clock) corresponding to this delay
+ * time, i.e. so that
+ *
+ * number_of_symbols * duration_of_one_symbol >= 100 ns
+ *
+ * The delay must be applied only on TypeC DP outputs, for everything else
+ * the delay must be set to 0.
+ *
+ * Return the number of link symbols per 100 ns:
+ * port_clock (10 kHz) -> bits / 100 us
+ * / symbol_size -> symbols / 100 us
+ * / 1000 -> symbols / 100 ns
+ */
+ return DIV_ROUND_UP(port_clock, intel_dp_link_symbol_size(port_clock) * 1000);
+}
+
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -343,24 +364,29 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
if (dig_port->ddi_a_4_lanes)
intel_dp->DP |= DDI_A_4_LANES;
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
else
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
+ if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
+
+ if (IS_DISPLAY_VER(display, 11, 13) && intel_encoder_is_tc(encoder)) {
+ int delay = dp_phy_lane_stagger_delay(crtc_state->port_clock);
+
+ intel_dp->DP |= DDI_BUF_LANE_STAGGER_DELAY(delay);
+ }
}
-static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
- enum port port)
+static int icl_calc_tbt_pll_link(struct intel_display *display, enum port port)
{
- u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+ u32 val = intel_de_read(display, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
switch (val) {
case DDI_CLK_SEL_NONE:
@@ -392,15 +418,14 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -423,7 +448,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
@@ -446,7 +471,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- intel_de_write(dev_priv, TRANS_MSA_MISC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MSA_MISC(display, cpu_transcoder),
temp);
}
@@ -485,8 +510,8 @@ static u32
intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
@@ -494,7 +519,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
temp |= TGL_TRANS_DDI_SELECT_PORT(port);
else
temp |= TRANS_DDI_SELECT_PORT(port);
@@ -556,24 +581,26 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
temp |= (crtc_state->fdi_lanes - 1) << 1;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
if (intel_dp_is_uhbr(crtc_state))
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
else
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(&dev_priv->drm,
- master == INVALID_TRANSCODER);
+ if (drm_WARN_ON(display->drm,
+ master == INVALID_TRANSCODER))
+ master = TRANSCODER_A;
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
} else {
@@ -581,7 +608,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
- if (IS_DISPLAY_VER(dev_priv, 8, 10) &&
+ if (IS_DISPLAY_VER(display, 8, 10) &&
crtc_state->master_transcoder != INVALID_TRANSCODER) {
u8 master_select =
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
@@ -596,11 +623,10 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
u32 ctl2 = 0;
@@ -612,12 +638,12 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
PORT_SYNC_MODE_MASTER_SELECT(master_select);
}
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
ctl2);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
intel_ddi_transcoder_func_reg_val_get(encoder,
crtc_state));
}
@@ -631,8 +657,7 @@ void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
@@ -640,7 +665,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
}
@@ -654,27 +679,26 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
0);
- ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- if (IS_DISPLAY_VER(dev_priv, 8, 10))
+ if (IS_DISPLAY_VER(display, 8, 10))
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
@@ -683,7 +707,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
if (intel_dp_mst_is_slave_trans(crtc_state))
@@ -701,26 +725,24 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask)
{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(intel_encoder);
intel_wakeref_t wakeref;
int ret = 0;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
intel_encoder->power_domain);
- if (drm_WARN_ON(dev, !wakeref))
+ if (drm_WARN_ON(display->drm, !wakeref))
return -ENXIO;
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
hdcp_mask, enable ? hdcp_mask : 0);
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ intel_display_power_put(display, intel_encoder->power_domain, wakeref);
return ret;
}
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(intel_connector);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
@@ -730,7 +752,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
u32 ddi_mode;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
@@ -741,12 +763,12 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
- ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ ddi_mode = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_MODE_SELECT_MASK;
if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
@@ -771,7 +793,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -780,28 +802,27 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
u32 tmp;
- u8 mst_pipe_mask;
+ u8 mst_pipe_mask = 0, dp128b132b_pipe_mask = 0;
*pipe_mask = 0;
*is_dp_mst = false;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return;
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ tmp = intel_de_read(display, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP));
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -822,18 +843,17 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- mst_pipe_mask = 0;
- for_each_pipe(dev_priv, p) {
+ for_each_pipe(display, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
- trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ trans_wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!trans_wakeref)
continue;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
} else {
@@ -841,9 +861,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
- intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
+ intel_display_power_put(display, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
if ((tmp & port_mask) != ddi_select)
@@ -851,20 +871,39 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_mode = tmp & TRANS_DDI_MODE_SELECT_MASK;
- if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST ||
- (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)))
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST)
mst_pipe_mask |= BIT(p);
+ else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display))
+ dp128b132b_pipe_mask |= BIT(p);
*pipe_mask |= BIT(p);
}
if (!*pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
+ if (!mst_pipe_mask && dp128b132b_pipe_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /*
+ * If we don't have 8b/10b MST, but have more than one
+ * transcoder in 128b/132b mode, we know it must be 128b/132b
+ * MST.
+ *
+ * Otherwise, we fall back to checking the current MST
+ * state. It's not accurate for hardware takeover at probe, but
+ * we don't expect MST to have been enabled at that point, and
+ * can assume it's SST.
+ */
+ if (hweight8(dp128b132b_pipe_mask) > 1 ||
+ intel_dp_mst_active_streams(intel_dp))
+ mst_pipe_mask = dp128b132b_pipe_mask;
+ }
+
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask);
@@ -872,25 +911,25 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
- "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ drm_dbg_kms(display->drm,
+ "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
- *pipe_mask, mst_pipe_mask);
+ *pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
out:
- if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
+ if (*pipe_mask && (display->platform.geminilake || display->platform.broxton)) {
+ tmp = intel_de_read(display, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
encoder->base.base.id, encoder->base.name, tmp);
}
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -913,7 +952,7 @@ static enum intel_display_power_domain
intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
/*
* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -929,8 +968,8 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
* extra wells.
*/
if (intel_psr_needs_aux_io_power(&dig_port->base, crtc_state))
- return intel_display_power_aux_io_domain(i915, dig_port->aux_ch);
- else if (DISPLAY_VER(i915) < 14 &&
+ return intel_display_power_aux_io_domain(display, dig_port->aux_ch);
+ else if (DISPLAY_VER(display) < 14 &&
(intel_crtc_has_dp_encoder(crtc_state) ||
intel_encoder_is_tc(&dig_port->base)))
return intel_aux_power_domain(dig_port);
@@ -942,23 +981,23 @@ static void
main_link_aux_power_domain_get(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum intel_display_power_domain domain =
intel_ddi_main_link_aux_domain(dig_port, crtc_state);
- drm_WARN_ON(&i915->drm, dig_port->aux_wakeref);
+ drm_WARN_ON(display->drm, dig_port->aux_wakeref);
if (domain == POWER_DOMAIN_INVALID)
return;
- dig_port->aux_wakeref = intel_display_power_get(i915, domain);
+ dig_port->aux_wakeref = intel_display_power_get(display, domain);
}
static void
main_link_aux_power_domain_put(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum intel_display_power_domain domain =
intel_ddi_main_link_aux_domain(dig_port, crtc_state);
intel_wakeref_t wf;
@@ -967,13 +1006,13 @@ main_link_aux_power_domain_put(struct intel_digital_port *dig_port,
if (!wf)
return;
- intel_display_power_put(i915, domain, wf);
+ intel_display_power_put(display, domain, wf);
}
static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port;
/*
@@ -981,15 +1020,15 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* happen since fake-MST encoders don't set their get_power_domains()
* hook.
*/
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
dig_port = enc_to_dig_port(encoder);
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
- dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -999,8 +1038,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
@@ -1008,53 +1046,53 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
else
val = TRANS_CLK_SEL_PORT(encoder->port);
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_DISABLED;
else
val = TRANS_CLK_SEL_DISABLED;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
-static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+static void _skl_ddi_set_iboost(struct intel_display *display,
enum port port, u8 iboost)
{
u32 tmp;
- tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(display, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(display, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int level)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1067,7 +1105,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
iboost = trans->entries[level].hsw.i_boost;
@@ -1075,28 +1113,28 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
+ drm_err(display->drm, "Invalid I_boost value %u\n", iboost);
return;
}
- _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
+ _skl_ddi_set_iboost(display, encoder->port, iboost);
if (encoder->port == PORT_A && dig_port->max_lanes == 4)
- _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
+ _skl_ddi_set_iboost(display, PORT_E, iboost);
}
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries;
encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
+ if (drm_WARN_ON(display->drm, n_entries < 1))
n_entries = 1;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
@@ -1129,14 +1167,14 @@ static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_stat
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
@@ -1144,24 +1182,25 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
intel_dp->hobl_active = is_hobl_buf_trans(trans);
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
+ intel_de_rmw(display, ICL_PORT_CL_DW10(phy), val,
intel_dp->hobl_active ? val : 0);
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
- TAP2_DISABLE | TAP3_DISABLE);
+ COEFF_POLARITY | CURSOR_PROGRAM |
+ TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW2_LN(ln, phy),
SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
@@ -1173,7 +1212,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
@@ -1184,7 +1223,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW7_LN(ln, phy),
N_SCALAR_MASK,
N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
}
@@ -1193,7 +1232,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1203,12 +1242,12 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -1218,33 +1257,33 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln < 4; ln++) {
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
LOADGEN_SELECT,
icl_combo_phy_loadgen_select(crtc_state, ln));
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy),
0, SUS_CLOCK_CONFIG);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1253,13 +1292,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
return;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
- intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
}
@@ -1269,13 +1308,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
}
@@ -1286,7 +1325,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1295,7 +1334,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1311,21 +1350,21 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
+ intel_de_rmw(display, MG_CLKHUB(ln, tc_port),
CFG_LOW_RATE_LKREN_EN,
crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
- intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
@@ -1335,9 +1374,9 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
- intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
}
}
@@ -1345,7 +1384,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1354,17 +1393,32 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
return;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 2; ln++) {
int level;
- intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0);
+ /* Wa_16011342517:adl-p */
+ if (display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)) {
+ if ((intel_encoder_is_hdmi(encoder) &&
+ crtc_state->port_clock == 594000) ||
+ (intel_encoder_is_dp(encoder) &&
+ crtc_state->port_clock == 162000)) {
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
+ LOADGEN_SHARING_PMD_DISABLE, 1);
+ } else {
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
+ LOADGEN_SHARING_PMD_DISABLE, 0);
+ }
+ }
+
+ intel_dkl_phy_write(display, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port, ln),
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL0(tc_port, ln),
DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK,
@@ -1374,7 +1428,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port, ln),
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL1(tc_port, ln),
DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK,
@@ -1382,10 +1436,10 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
- intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port, ln),
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
DKL_TX_DP20BITMODE, 0);
- if (IS_ALDERLAKE_P(dev_priv)) {
+ if (display->platform.alderlake_p) {
u32 val;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -1401,7 +1455,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0);
}
- intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port, ln),
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
val);
@@ -1413,10 +1467,15 @@ static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
struct intel_display *display = to_intel_display(intel_dp);
+ const u8 *signal_array;
+ size_t array_size;
int i;
- for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
- if (index_to_dp_signal_levels[i] == signal_levels)
+ signal_array = index_to_dp_signal_levels;
+ array_size = ARRAY_SIZE(index_to_dp_signal_levels);
+
+ for (i = 0; i < array_size; i++) {
+ if (signal_array[i] == signal_levels)
return i;
}
@@ -1447,12 +1506,12 @@ int intel_ddi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int lane)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
int level, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return 0;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1461,7 +1520,7 @@ int intel_ddi_level(struct intel_encoder *encoder,
level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
lane);
- if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ if (drm_WARN_ON_ONCE(display->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -1471,13 +1530,13 @@ static void
hsw_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
enum port port = encoder->port;
u32 signal_levels;
- if (has_iboost(dev_priv))
+ if (has_iboost(display))
skl_ddi_set_iboost(encoder, crtc_state, level);
/* HDMI ignores the rest */
@@ -1486,70 +1545,70 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
signal_levels = DDI_BUF_TRANS_SELECT(level);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
}
-static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_enable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
+ intel_de_rmw(display, reg, clk_sel_mask, clk_sel);
/*
* "This step and the step before must be
* done with separate register writes."
*/
- intel_de_rmw(i915, reg, clk_off, 0);
+ intel_de_rmw(display, reg, clk_off, 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_disable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, 0, clk_off);
+ intel_de_rmw(display, reg, 0, clk_off);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- return !(intel_de_read(i915, reg) & clk_off);
+ return !(intel_de_read(display, reg) & clk_off);
}
-static struct intel_shared_dpll *
-_icl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
+static struct intel_dpll *
+_icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel_shift)
{
enum intel_dpll_id id;
- id = (intel_de_read(i915, reg) & clk_sel_mask) >> clk_sel_shift;
+ id = (intel_de_read(display, reg) & clk_sel_mask) >> clk_sel_shift;
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_dpll_by_id(display, id);
}
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_enable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1557,28 +1616,28 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_disable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ return _icl_ddi_is_clock_enabled(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
+ return _icl_ddi_get_pll(display, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy));
}
@@ -1586,14 +1645,14 @@ static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1601,28 +1660,28 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_get_pll(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
}
@@ -1630,23 +1689,23 @@ static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* If we fail this, something went very wrong: first 2 PLLs should be
* used by first 2 phys and last 2 PLLs by last phys
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
(pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
(pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
return;
- _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_enable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1654,30 +1713,30 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_disable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ return _icl_ddi_is_clock_enabled(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
enum intel_dpll_id id;
u32 val;
- val = intel_de_read(i915, DG1_DPCLKA_CFGCR0(phy));
+ val = intel_de_read(display, DG1_DPCLKA_CFGCR0(phy));
val &= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val >>= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
id = val;
@@ -1690,20 +1749,20 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
if (phy >= PHY_C)
id += DPLL_ID_DG1_DPLL2;
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_dpll_by_id(display, id);
}
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1711,28 +1770,28 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
+struct intel_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_get_pll(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
}
@@ -1740,39 +1799,39 @@ struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
* MG does not exist, but the programming is required to ungate DDIC and DDID."
*/
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
icl_ddi_combo_enable_clock(encoder, crtc_state);
}
static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
icl_ddi_combo_disable_clock(encoder);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
@@ -1783,67 +1842,67 @@ static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, DDI_CLK_SEL(port),
+ intel_de_write(display, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
- tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
}
-static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
switch (tmp & DDI_CLK_SEL_MASK) {
case DDI_CLK_SEL_TBT_162:
@@ -1862,12 +1921,12 @@ static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encode
return NULL;
}
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_dpll_by_id(display, id);
}
-static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder->base.dev);
enum intel_dpll_id id;
switch (encoder->port) {
@@ -1885,63 +1944,63 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_dpll_by_id(display, id);
}
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
/*
* FIXME Not sure if the override affects both
* the PLL selection and the CLK_OFF bit.
*/
- return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+ return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
}
-static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
- tmp = intel_de_read(i915, DPLL_CTRL2);
+ tmp = intel_de_read(display, DPLL_CTRL2);
/*
* FIXME Not sure if the override affects both
@@ -1953,46 +2012,46 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_dpll_by_id(display, id);
}
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(display, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
void hsw_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(display, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+ return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
}
-static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
- tmp = intel_de_read(i915, PORT_CLK_SEL(port));
+ tmp = intel_de_read(display, PORT_CLK_SEL(port));
switch (tmp & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_WRPLL1:
@@ -2020,7 +2079,7 @@ static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_dpll_by_id(display, id);
}
void intel_ddi_enable_clock(struct intel_encoder *encoder,
@@ -2038,7 +2097,7 @@ void intel_ddi_disable_clock(struct intel_encoder *encoder)
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 port_mask;
bool ddi_clk_needed;
@@ -2058,7 +2117,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (drm_WARN_ON(&i915->drm, is_mst))
+ if (drm_WARN_ON(display->drm, is_mst))
return;
}
@@ -2073,11 +2132,11 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* Sanity check that we haven't incorrectly registered another
* encoder using any of the ports of this DSI encoder.
*/
- for_each_intel_encoder(&i915->drm, other_encoder) {
+ for_each_intel_encoder(display->drm, other_encoder) {
if (other_encoder == encoder)
continue;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
port_mask & BIT(other_encoder->port)))
return;
}
@@ -2092,7 +2151,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
!encoder->is_clock_enabled(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
encoder->base.base.id, encoder->base.name);
@@ -2100,39 +2159,50 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
}
static void
+tgl_dkl_phy_check_and_rewrite(struct intel_display *display,
+ enum tc_port tc_port, u32 ln0, u32 ln1)
+{
+ if (ln0 != intel_dkl_phy_read(display, DKL_DP_MODE(tc_port, 0)))
+ intel_dkl_phy_write(display, DKL_DP_MODE(tc_port, 0), ln0);
+ if (ln1 != intel_dkl_phy_read(display, DKL_DP_MODE(tc_port, 1)))
+ intel_dkl_phy_write(display, DKL_DP_MODE(tc_port, 1), ln1);
+}
+
+static void
icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- u32 ln0, ln1, pin_assignment;
+ enum intel_tc_pin_assignment pin_assignment;
+ u32 ln0, ln1;
u8 width;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return;
if (!intel_encoder_is_tc(&dig_port->base) ||
intel_tc_port_in_tbt_alt_mode(dig_port))
return;
- if (DISPLAY_VER(dev_priv) >= 12) {
- ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 0));
- ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 1));
+ if (DISPLAY_VER(display) >= 12) {
+ ln0 = intel_dkl_phy_read(display, DKL_DP_MODE(tc_port, 0));
+ ln1 = intel_dkl_phy_read(display, DKL_DP_MODE(tc_port, 1));
} else {
- ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
- ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
+ ln0 = intel_de_read(display, MG_DP_MODE(0, tc_port));
+ ln1 = intel_de_read(display, MG_DP_MODE(1, tc_port));
}
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */
- pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
+ pin_assignment = intel_tc_port_get_pin_assignment(dig_port);
width = crtc_state->lane_count;
switch (pin_assignment) {
- case 0x0:
- drm_WARN_ON(&dev_priv->drm,
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
+ drm_WARN_ON(display->drm,
!intel_tc_port_in_legacy_mode(dig_port));
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2141,20 +2211,20 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x1:
+ case INTEL_TC_PIN_ASSIGNMENT_A:
if (width == 4) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x2:
+ case INTEL_TC_PIN_ASSIGNMENT_B:
if (width == 2) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x3:
- case 0x5:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2163,8 +2233,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x4:
- case 0x6:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_F:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2177,12 +2247,16 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
MISSING_CASE(pin_assignment);
}
- if (DISPLAY_VER(dev_priv) >= 12) {
- intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 0), ln0);
- intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 1), ln1);
+ if (DISPLAY_VER(display) >= 12) {
+ intel_dkl_phy_write(display, DKL_DP_MODE(tc_port, 0), ln0);
+ intel_dkl_phy_write(display, DKL_DP_MODE(tc_port, 1), ln1);
+ /* WA_14018221282 */
+ if (IS_DISPLAY_VER(display, 12, 13))
+ tgl_dkl_phy_check_and_rewrite(display, tc_port, ln0, ln1);
+
} else {
- intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
- intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
+ intel_de_write(display, MG_DP_MODE(0, tc_port), ln0);
+ intel_de_write(display, MG_DP_MODE(1, tc_port), ln1);
}
}
@@ -2198,10 +2272,10 @@ tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state)
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_CTL(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_CTL(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_CTL(encoder->port);
@@ -2210,10 +2284,10 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_STATUS(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_STATUS(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_STATUS(encoder->port);
@@ -2233,8 +2307,8 @@ void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT, 1))
+ if (intel_de_wait_for_set_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT, 1))
drm_err(display->drm, "Timed out waiting for ACT sent\n");
}
@@ -2274,34 +2348,24 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n");
}
-static int read_fec_detected_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
static int wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
{
struct intel_display *display = to_intel_display(aux->drm_dev);
int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
- int status;
- int err;
+ u8 status = 0;
+ int ret, err;
- err = readx_poll_timeout(read_fec_detected_status, aux, status,
- status & mask || status < 0,
- 10000, 200000);
+ ret = poll_timeout_us(err = drm_dp_dpcd_read_byte(aux, DP_FEC_STATUS, &status),
+ err || (status & mask),
+ 10 * 1000, 200 * 1000, false);
- if (err || status < 0) {
+ /* Either can be non-zero, but not both */
+ ret = ret ?: err;
+ if (ret) {
drm_dbg_kms(display->drm,
- "Failed waiting for FEC %s to get detected: %d (status %d)\n",
- str_enabled_disabled(enabled), err, status);
- return err ? err : status;
+ "Failed waiting for FEC %s to get detected: %d (status 0x%02x)\n",
+ str_enabled_disabled(enabled), ret, status);
+ return ret;
}
return 0;
@@ -2319,11 +2383,11 @@ int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
return 0;
if (enabled)
- ret = intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ ret = intel_de_wait_for_set_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
else
- ret = intel_de_wait_for_clear(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ ret = intel_de_wait_for_clear_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
if (ret) {
drm_err(display->drm,
@@ -2388,26 +2452,26 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->fec_enable)
return;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_FEC_ENABLE, 0);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
}
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (intel_encoder_is_combo(encoder)) {
enum phy phy = intel_encoder_to_phy(encoder);
- intel_combo_phy_power_up_lanes(i915, phy, false,
+ intel_combo_phy_power_up_lanes(display, phy, false,
crtc_state->lane_count,
dig_port->lane_reversal);
}
@@ -2417,11 +2481,11 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
* Splitter enable for eDP MSO is limited to certain pipes, on certain
* platforms.
*/
-static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+static u8 intel_ddi_splitter_pipe_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) > 20)
+ if (DISPLAY_VER(display) > 20)
return ~0;
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2430,28 +2494,28 @@ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
- dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ dss1 = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
if (!pipe_config->splitter.enable)
return;
- if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
+ if (drm_WARN_ON(display->drm, !(intel_ddi_splitter_pipe_mask(display) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
default:
- drm_WARN(&i915->drm, true,
+ drm_WARN(display->drm, true,
"Invalid splitter configuration, dss1=0x%08x\n", dss1);
fallthrough;
case SPLITTER_CONFIGURATION_2_SEGMENT:
@@ -2467,12 +2531,12 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1 = 0;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
if (crtc_state->splitter.enable) {
@@ -2484,49 +2548,38 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
}
- intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ intel_de_rmw(display, ICL_PIPE_DSS_CTL1(pipe),
SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
OVERLAP_PIXELS_MASK, dss1);
}
-static u8 mtl_get_port_width(u8 lane_count)
-{
- switch (lane_count) {
- case 1:
- return 0;
- case 2:
- return 1;
- case 3:
- return 4;
- case 4:
- return 3;
- default:
- MISSING_CASE(lane_count);
- return 4;
- }
-}
-
static void
mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
+ int ret;
+
+ if (DISPLAY_VER(display) < 14)
+ return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, 0, set_bits);
+
+ ret = intel_de_wait_for_set_us(display, reg, wait_bits, 100);
+ if (ret) {
+ drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@@ -2534,16 +2587,13 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- u32 val;
+ u32 val = 0;
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port));
- val &= ~XELPDP_PORT_WIDTH_MASK;
- val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
+ val |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
- val &= ~XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK;
if (intel_dp_is_uhbr(crtc_state))
val |= XELPDP_PORT_BUF_PORT_DATA_40BIT;
else
@@ -2552,18 +2602,20 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
if (dig_port->lane_reversal)
val |= XELPDP_PORT_REVERSAL;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(i915, port), val);
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK,
+ val);
}
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val;
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@@ -2574,6 +2626,8 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ bool transparent_mode;
+ int ret;
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2624,6 +2678,9 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ transparent_mode = intel_dp_lttpr_transparent_mode_enabled(intel_dp);
+ drm_dp_lttpr_wake_timeout_setup(&intel_dp->aux, transparent_mode);
+
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
if (!is_mst)
intel_dp_sink_enable_decompression(state,
@@ -2668,6 +2725,14 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 6.o Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+ /* 7.a 128b/132b SST. */
+ if (!is_mst && intel_dp_is_uhbr(crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, tu slots */
+ ret = drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, crtc_state->dp_m_n.tu);
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+ }
+
if (!is_mst)
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2677,10 +2742,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int ret;
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2714,15 +2780,15 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 4. Enable the port PLL.
*
* The PLL enabling itself was already done before this function by
- * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * hsw_crtc_enable()->intel_enable_dpll(). We need only
* configure the PLL to port mapping here.
*/
intel_ddi_enable_clock(encoder, crtc_state);
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
- dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -2807,6 +2873,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+ if (!is_mst && intel_dp_is_uhbr(crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, tu slots */
+ ret = drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, crtc_state->dp_m_n.tu);
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+ }
+
if (!is_mst)
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2816,17 +2889,17 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- if (DISPLAY_VER(dev_priv) < 11)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 11)
+ drm_WARN_ON(display->drm,
is_mst && (port == PORT_A || port == PORT_E));
else
- drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
+ drm_WARN_ON(display->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2843,14 +2916,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
- dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
encoder->set_signal_levels(encoder, crtc_state);
@@ -2866,7 +2939,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(state, intel_dp, crtc_state);
- if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
+ if ((port != PORT_A || DISPLAY_VER(display) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp, crtc_state);
@@ -2890,8 +2963,7 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
/* Panel replay has to be enabled in sink dpcd before link training. */
- if (crtc_state->has_panel_replay)
- intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
+ intel_psr_panel_replay_enable_sink(enc_to_intel_dp(encoder));
if (DISPLAY_VER(display) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2912,15 +2984,15 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_enable_clock(encoder, crtc_state);
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
- dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -2948,20 +3020,20 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
* - crtc_state will be the state of the first stream to be activated on this
* port, and it may not be the same stream that will be deactivated last, but
* each stream should have a state that is identical when it comes to the DP
- * link parameteres
+ * link parameters.
*/
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, crtc_state->has_pch_encoder);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
@@ -2974,7 +3046,7 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
/* FIXME precompute everything properly */
/* FIXME how do we turn infoframes off again? */
- if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
+ if (intel_lspcon_active(dig_port) && intel_dp_has_hdmi_sink(&dig_port->dp))
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
@@ -2982,94 +3054,68 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
}
static void
-mtl_ddi_disable_d2d_link(struct intel_encoder *encoder)
+mtl_ddi_disable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
+ int ret;
+
+ if (DISPLAY_VER(display) < 14)
+ return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, clr_bits, 0);
+
+ ret = intel_de_wait_for_clear_us(display, reg, wait_bits, 100);
+ if (ret)
+ drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
-static void mtl_disable_ddi_buf(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_ddi_buf_enable(struct intel_encoder *encoder, u32 buf_ctl)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- u32 val;
- /* 3.b Clear DDI_CTL_DE Enable to 0. */
- val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
+ intel_de_write(display, DDI_BUF_CTL(port), buf_ctl | DDI_BUF_CTL_ENABLE);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
- /* 3.c Poll for PORT_BUF_CTL Idle Status == 1, timeout after 100us */
- mtl_wait_ddi_buf_idle(dev_priv, port);
- }
-
- /* 3.d Disable D2D Link */
- mtl_ddi_disable_d2d_link(encoder);
-
- /* 3.e Disable DP_TP_CTL */
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
- DP_TP_CTL_ENABLE, 0);
- }
+ intel_wait_ddi_buf_active(encoder);
}
-static void disable_ddi_buf(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_ddi_buf_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- bool wait = false;
- u32 val;
-
- val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
- wait = true;
- }
- if (intel_crtc_has_dp_encoder(crtc_state))
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
- DP_TP_CTL_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
- intel_ddi_disable_fec(encoder, crtc_state);
+ if (DISPLAY_VER(display) >= 14)
+ intel_wait_ddi_buf_idle(display, port);
- if (wait)
- intel_wait_ddi_buf_idle(dev_priv, port);
-}
+ mtl_ddi_disable_d2d(encoder);
-static void intel_disable_ddi_buf(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_ENABLE, 0);
+ }
- if (DISPLAY_VER(dev_priv) >= 14) {
- mtl_disable_ddi_buf(encoder, crtc_state);
+ intel_ddi_disable_fec(encoder, crtc_state);
- /* 3.f Disable DP_TP_CTL FEC Enable if it is needed */
- intel_ddi_disable_fec(encoder, crtc_state);
- } else {
- disable_ddi_buf(encoder, crtc_state);
- }
+ if (DISPLAY_VER(display) < 14)
+ intel_wait_ddi_buf_idle(display, port);
intel_ddi_wait_for_fec_status(encoder, crtc_state, false);
}
@@ -3079,7 +3125,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
intel_wakeref_t wakeref;
@@ -3096,12 +3142,12 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
*/
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- if (DISPLAY_VER(dev_priv) >= 12) {
- if (is_mst) {
+ if (DISPLAY_VER(display) >= 12) {
+ if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
0);
}
@@ -3110,7 +3156,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_clock(old_crtc_state);
}
- intel_disable_ddi_buf(encoder, old_crtc_state);
+ intel_ddi_buf_disable(encoder, old_crtc_state);
intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
@@ -3121,7 +3167,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* Configure Transcoder Clock select to direct no clock to the
* transcoder"
*/
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
@@ -3130,15 +3176,15 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
if (wakeref)
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
dig_port->ddi_io_power_domain,
wakeref);
intel_ddi_disable_clock(encoder);
/* De-select Thunderbolt */
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@@ -3147,7 +3193,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
intel_wakeref_t wakeref;
@@ -3155,17 +3201,17 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
- intel_disable_ddi_buf(encoder, old_crtc_state);
+ intel_ddi_buf_disable(encoder, old_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
if (wakeref)
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
dig_port->ddi_io_power_domain,
wakeref);
@@ -3180,8 +3226,9 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
+ bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
int i;
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3193,6 +3240,22 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(old_crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, clear */
+ drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, 0);
+
+ intel_ddi_clear_act_sent(encoder, old_crtc_state);
+
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder),
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
+
+ intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
+ drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
+ }
+
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3201,7 +3264,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -3232,7 +3295,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
* be deactivated on this port, and it may not be the same
* stream that was activated last, but each stream
* should have a state that is identical when it comes to
- * the DP link parameteres
+ * the DP link parameters
*/
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
@@ -3303,18 +3366,20 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
+ if (port == PORT_A && DISPLAY_VER(display) < 9)
intel_dp_stop_link_train(intel_dp, crtc_state);
drm_connector_update_privacy_screen(conn_state);
intel_edp_backlight_on(crtc_state, conn_state);
- if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp))
+ intel_panel_prepare(crtc_state, conn_state);
+
+ if (!intel_lspcon_active(dig_port) || intel_dp_has_hdmi_sink(&dig_port->dp))
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
trans_port_sync_stop_link_train(state, encoder, crtc_state);
@@ -3345,30 +3410,28 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
- u32 buf_ctl;
+ u32 buf_ctl = 0;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
/* e. Enable D2D Link for C10/C20 Phy */
- if (DISPLAY_VER(dev_priv) >= 14)
- mtl_ddi_enable_d2d(encoder);
+ mtl_ddi_enable_d2d(encoder);
encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
/*
* For some reason these chicken bits have been
* stuffed into a transcoder register, event though
@@ -3378,7 +3441,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(display, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3387,8 +3450,8 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
udelay(1);
@@ -3399,7 +3462,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
intel_ddi_power_up_lanes(encoder, crtc_state);
@@ -3415,37 +3478,32 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
* is filled with lane count, already set in the crtc_state.
* The same is required to be filled in PORT_BUF_CTL for C10/20 Phy.
*/
- buf_ctl = DDI_BUF_CTL_ENABLE;
-
if (dig_port->lane_reversal)
buf_ctl |= DDI_BUF_PORT_REVERSAL;
if (dig_port->ddi_a_4_lanes)
buf_ctl |= DDI_A_4_LANES;
- if (DISPLAY_VER(dev_priv) >= 14) {
- u8 lane_count = mtl_get_port_width(crtc_state->lane_count);
+ if (DISPLAY_VER(display) >= 14) {
u32 port_buf = 0;
- port_buf |= XELPDP_PORT_WIDTH(lane_count);
+ port_buf |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
- buf_ctl |= DDI_PORT_WIDTH(lane_count);
+ buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ } else if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
+ drm_WARN_ON(display->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
- intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
-
- intel_wait_ddi_buf_active(encoder);
+ intel_ddi_buf_enable(encoder, buf_ctl);
}
static void intel_ddi_enable(struct intel_atomic_state *state,
@@ -3455,12 +3513,37 @@ static void intel_ddi_enable(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *pipe_crtc;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ bool is_hdmi = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
int i;
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
+
+ intel_de_write(display, TRANS_DP2_VFREQHIGH(cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
+ intel_de_write(display, TRANS_DP2_VFREQLOW(cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
+ }
+
intel_ddi_enable_transcoder_func(encoder, crtc_state);
- /* Enable/Disable DP2.0 SDP split config before transcoder */
- intel_audio_sdp_split_update(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
+
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_ddi_clear_act_sent(encoder, crtc_state);
+
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder), 0,
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+
+ intel_ddi_wait_for_act_sent(encoder, crtc_state);
+ drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
+ }
intel_enable_transcoder(crtc_state);
@@ -3473,7 +3556,7 @@ static void intel_ddi_enable(struct intel_atomic_state *state,
intel_crtc_vblank_on(pipe_crtc_state);
}
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ if (is_hdmi)
intel_ddi_enable_hdmi(state, encoder, crtc_state, conn_state);
else
intel_ddi_enable_dp(state, encoder, crtc_state, conn_state);
@@ -3491,9 +3574,11 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
+ intel_panel_unprepare(old_conn_state);
intel_psr_disable(intel_dp, old_crtc_state);
+ intel_alpm_disable(intel_dp);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
intel_dp_sink_disable_decompression(state,
@@ -3508,12 +3593,12 @@ static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = old_conn_state->connector;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
}
@@ -3577,18 +3662,18 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
+ if (DISPLAY_VER(display) >= 14 || !intel_encoder_is_tc(encoder))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(crtc_state))
- intel_update_active_dpll(state, pipe_crtc, encoder);
+ intel_dpll_update_active(state, pipe_crtc, encoder);
}
/*
@@ -3602,7 +3687,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_tc_port = intel_encoder_is_tc(encoder);
@@ -3621,49 +3706,50 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
* Type-C ports. Skip this step for TBT.
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dpio_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
}
static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
int ln;
for (ln = 0; ln < 2; ln++)
- intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port, ln), DKL_PCS_DW5_CORE_SOFTRESET, 0);
+ intel_dkl_phy_rmw(display, DKL_PCS_DW5(tc_port, ln),
+ DKL_PCS_DW5_CORE_SOFTRESET, 0);
}
static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
u32 dp_tp_ctl;
/*
* TODO: To train with only a different voltage swing entry is not
* necessary disable and enable port
*/
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- if (dp_tp_ctl & DP_TP_CTL_ENABLE)
- mtl_disable_ddi_buf(encoder, crtc_state);
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
+
+ drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
/* 6.d Configure and enable DP_TP_CTL with link training pattern 1 selected */
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
} else {
dp_tp_ctl |= DP_TP_CTL_MODE_SST;
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
/* 6.f Enable D2D Link */
mtl_ddi_enable_d2d(encoder);
@@ -3675,76 +3761,66 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
mtl_port_buf_ctl_program(encoder, crtc_state);
/* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
- intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_ddi_buf_enable(encoder, intel_dp->DP);
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- /* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
- intel_wait_ddi_buf_active(encoder);
+ /*
+ * 6.k If AUX-Less ALPM is going to be enabled:
+ * i. Configure PORT_ALPM_CTL and PORT_ALPM_LFPS_CTL here
+ */
+ intel_alpm_port_configure(intel_dp, crtc_state);
+
+ /*
+ * ii. Enable MAC Transmits LFPS in the "PHY Common Control 0" PIPE
+ * register
+ */
+ intel_lnl_mac_transmit_lfps(encoder, crtc_state);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- u32 dp_tp_ctl, ddi_buf_ctl;
- bool wait = false;
-
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
-
- if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
- ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
- intel_de_write(dev_priv, DDI_BUF_CTL(port),
- ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
- wait = true;
- }
+ u32 dp_tp_ctl;
- dp_tp_ctl &= ~DP_TP_CTL_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
- if (wait)
- intel_wait_ddi_buf_idle(dev_priv, port);
- }
+ drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
} else {
dp_tp_ctl |= DP_TP_CTL_MODE_SST;
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
- if (IS_ALDERLAKE_P(dev_priv) &&
+ if (display->platform.alderlake_p &&
(intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
adlp_tbt_to_dp_alt_switch_wa(encoder);
+ intel_ddi_buf_enable(encoder, intel_dp->DP);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
-
- intel_wait_ddi_buf_active(encoder);
}
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 temp;
- temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ temp = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
@@ -3765,17 +3841,17 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
break;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp);
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), temp);
}
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
@@ -3785,26 +3861,26 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 12)
+ if (port == PORT_A && DISPLAY_VER(display) < 12)
return;
- if (intel_de_wait_for_set(dev_priv,
- dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_IDLE_DONE, 2))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set_ms(display,
+ dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_IDLE_DONE, 2))
+ drm_err(display->drm,
"Timed out waiting for DP idle patterns\n");
}
-static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+static bool intel_ddi_is_audio_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
if (cpu_transcoder == TRANSCODER_EDP)
return false;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO_MMIO))
+ if (!intel_display_power_is_enabled(display, POWER_DOMAIN_AUDIO_MMIO))
return false;
- return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(display, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
@@ -3834,34 +3910,34 @@ static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
-static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+static enum transcoder bdw_transcoder_master_readout(struct intel_display *display,
enum transcoder cpu_transcoder)
{
u32 master_select;
- if (DISPLAY_VER(dev_priv) >= 11) {
- u32 ctl2 = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 11) {
+ u32 ctl2 = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder));
if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
} else {
- u32 ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ u32 ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -3877,33 +3953,33 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de
static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum transcoder cpu_transcoder;
crtc_state->master_transcoder =
- bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+ bdw_transcoder_master_readout(display, crtc_state->cpu_transcoder);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ for_each_cpu_transcoder_masked(display, cpu_transcoder, transcoders) {
enum intel_display_power_domain power_domain;
intel_wakeref_t trans_wakeref;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ trans_wakeref = intel_display_power_get_if_enabled(display,
power_domain);
if (!trans_wakeref)
continue;
- if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ if (bdw_transcoder_master_readout(display, cpu_transcoder) ==
crtc_state->cpu_transcoder)
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
- intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ intel_display_power_put(display, power_domain, trans_wakeref);
}
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
crtc_state->master_transcoder != INVALID_TRANSCODER &&
crtc_state->sync_mode_slaves_mask);
}
@@ -3970,6 +4046,11 @@ static void intel_ddi_read_func_ctl_dp_sst(struct intel_encoder *encoder,
crtc_state->lane_count =
((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+ if (DISPLAY_VER(display) >= 12 &&
+ (ddi_func_ctl & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B)
+ crtc_state->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, ddi_func_ctl);
+
intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n);
intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, &crtc_state->dp_m2_n2);
@@ -3982,7 +4063,7 @@ static void intel_ddi_read_func_ctl_dp_sst(struct intel_encoder *encoder,
intel_de_read(display,
dp_tp_ctl_reg(encoder, crtc_state)) & DP_TP_CTL_FEC_ENABLE;
- if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
+ if (intel_lspcon_active(dig_port) && intel_dp_has_hdmi_sink(&dig_port->dp))
crtc_state->infoframes.enable |=
intel_lspcon_infoframes_enabled(encoder, crtc_state);
else
@@ -4021,11 +4102,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 ddi_func_ctl, ddi_mode, flags = 0;
- ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_func_ctl = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4064,9 +4144,19 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
intel_ddi_read_func_ctl_fdi(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_SST) {
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
- } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST ||
- (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display))) {
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /*
+ * If this is true, we know we're being called from mst stream
+ * encoder's ->get_config().
+ */
+ if (intel_dp_mst_active_streams(intel_dp))
+ intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
+ else
+ intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
}
}
@@ -4078,11 +4168,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
/* XXX: DSI transcoder paranoia */
- if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_ddi_read_func_ctl(encoder, pipe_config);
@@ -4090,14 +4180,14 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_mso_get_config(encoder, pipe_config);
pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+ intel_ddi_is_audio_enabled(display, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP)
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
ddi_dotclock_get(pipe_config);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
@@ -4118,7 +4208,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
intel_psr_get_config(encoder, pipe_config);
@@ -4132,26 +4222,39 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
bool pll_active;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
port_dpll->pll = pll;
- pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
- drm_WARN_ON(&i915->drm, !pll_active);
+ pll_active = intel_dpll_get_hw_state(display, pll, &port_dpll->hw_state);
+ drm_WARN_ON(display->drm, !pll_active);
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
}
+static void xe3plpd_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_lt_phy_pll_readout_hw_state(encoder, crtc_state, &crtc_state->dpll_hw_state.ltpll);
+
+ if (crtc_state->dpll_hw_state.ltpll.tbt_mode)
+ crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
+ else
+ crtc_state->port_clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -4202,7 +4305,7 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
-static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll)
+static bool icl_ddi_tc_pll_is_tbt(const struct intel_dpll *pll)
{
return pll->info->id == DPLL_ID_ICL_TBTPLL;
}
@@ -4211,10 +4314,10 @@ static enum icl_port_dpll_id
icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
if (icl_ddi_tc_pll_is_tbt(pll))
@@ -4235,14 +4338,14 @@ intel_ddi_port_pll_type(struct intel_encoder *encoder,
static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id;
struct icl_port_dpll *port_dpll;
bool pll_active;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
if (icl_ddi_tc_pll_is_tbt(pll))
@@ -4253,15 +4356,15 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
port_dpll->pll = pll;
- pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
- drm_WARN_ON(&i915->drm, !pll_active);
+ pll_active = intel_dpll_get_hw_state(display, pll, &port_dpll->hw_state);
+ drm_WARN_ON(display->drm, !pll_active);
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
- crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
+ if (icl_ddi_tc_pll_is_tbt(crtc_state->intel_dpll))
+ crtc_state->port_clock = icl_calc_tbt_pll_link(display, encoder->port);
else
- crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
}
@@ -4308,11 +4411,11 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
bool fastset = true;
if (intel_encoder_is_tc(encoder)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
@@ -4347,12 +4450,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4367,13 +4470,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ if (display->platform.haswell && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->pch_pfit.force_thru =
pipe_config->pch_pfit.enabled ||
pipe_config->crc_enabled;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -4424,9 +4527,9 @@ static u8
intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
int tile_group_id)
{
+ struct intel_display *display = to_intel_display(ref_crtc_state);
struct drm_connector *connector;
const struct drm_connector_state *conn_state;
- struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
struct intel_atomic_state *state =
to_intel_atomic_state(ref_crtc_state->uapi.state);
u8 transcoders = 0;
@@ -4436,7 +4539,7 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4468,11 +4571,18 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
+ int ret = 0;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ ret = intel_dp_compute_config_late(encoder, crtc_state, conn_state);
+
+ if (ret)
+ return ret;
+
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
@@ -4500,16 +4610,16 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->dev);
+ struct intel_display *display = to_intel_display(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
intel_dp_encoder_flush_work(encoder);
if (intel_encoder_is_tc(&dig_port->base))
intel_tc_port_cleanup(dig_port);
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
drm_encoder_cleanup(encoder);
- kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port->hdcp.port_data.streams);
kfree(dig_port);
}
@@ -4542,19 +4652,18 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.late_register = intel_ddi_encoder_late_register,
};
-static struct intel_connector *
-intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_connector *connector;
enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
- return NULL;
+ return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain;
else
dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -4566,29 +4675,28 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
if (!intel_dp_init_connector(dig_port, connector)) {
kfree(connector);
- return NULL;
+ return -EINVAL;
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
- struct drm_device *dev = dig_port->base.base.dev;
struct drm_privacy_screen *privacy_screen;
- privacy_screen = drm_privacy_screen_get(dev->dev, NULL);
+ privacy_screen = drm_privacy_screen_get(display->drm->dev, NULL);
if (!IS_ERR(privacy_screen)) {
drm_connector_attach_privacy_screen_provider(&connector->base,
privacy_screen);
} else if (PTR_ERR(privacy_screen) != -ENODEV) {
- drm_warn(dev, "Error getting privacy-screen\n");
+ drm_warn(display->drm, "Error getting privacy-screen\n");
}
}
- return connector;
+ return 0;
}
static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *ddc = connector->base.ddc;
@@ -4601,7 +4709,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
if (connector->base.status != connector_status_connected)
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -4618,7 +4726,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
@@ -4634,7 +4742,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
connector->base.base.id, connector->base.name, ret);
return 0;
}
@@ -4654,16 +4762,16 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
* would be perfectly happy if were to just reconfigure
* the SCDC settings on the fly.
*/
- return intel_modeset_commit_pipes(dev_priv, BIT(crtc->pipe), ctx);
+ return intel_modeset_commit_pipes(display, BIT(crtc->pipe), ctx);
}
static void intel_ddi_link_check(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* TODO: Move checking the HDMI link state here as well. */
- drm_WARN_ON(&i915->drm, !dig_port->dp.attached_connector);
+ drm_WARN_ON(display->drm, !dig_port->dp.attached_connector);
intel_dp_link_check(encoder);
}
@@ -4726,47 +4834,55 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(display, GEN8_DE_PORT_ISR) & bit;
}
-static struct intel_connector *
-intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
+static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
{
struct intel_connector *connector;
enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
- return NULL;
+ return -ENOMEM;
dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
- intel_hdmi_init_connector(dig_port, connector);
- return connector;
+ if (!intel_hdmi_init_connector(dig_port, connector)) {
+ /*
+ * HDMI connector init failures may just mean conflicting DDC
+ * pins or not having enough lanes. Handle them gracefully, but
+ * don't fail the entire DDI init.
+ */
+ dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
+ kfree(connector);
+ }
+
+ return 0;
}
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
if (dig_port->base.port != PORT_A)
return false;
@@ -4777,7 +4893,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
* supported configuration
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
return true;
return false;
@@ -4786,15 +4902,15 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
static int
intel_ddi_max_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum port port = dig_port->base.port;
int max_lanes = 4;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4807,7 +4923,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(dig_port)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Forcing DDI_A_4_LANES for port A\n");
dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
@@ -4816,8 +4932,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
return max_lanes;
}
-static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin xelpd_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_D_XELPD)
return HPD_PORT_D + port - PORT_D_XELPD;
@@ -4827,8 +4942,7 @@ static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin dg1_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4836,8 +4950,7 @@ static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin tgl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_TC1 + port - PORT_TC1;
@@ -4845,11 +4958,10 @@ static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin rkl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return tgl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return tgl_hpd_pin(display, port);
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4857,8 +4969,7 @@ static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin icl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_C)
return HPD_PORT_TC1 + port - PORT_C;
@@ -4866,31 +4977,30 @@ static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin ehl_hpd_pin(struct intel_display *display, enum port port)
{
if (port == PORT_D)
return HPD_PORT_A;
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+static enum hpd_pin skl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+static bool intel_ddi_is_tc(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return port >= PORT_TC1;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return port >= PORT_C;
else
return false;
@@ -4916,8 +5026,10 @@ static void intel_ddi_tc_encoder_suspend_complete(struct intel_encoder *encoder)
static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
{
- intel_dp_encoder_shutdown(encoder);
- intel_hdmi_encoder_shutdown(encoder);
+ if (intel_encoder_is_dp(encoder))
+ intel_dp_encoder_shutdown(encoder);
+ if (intel_encoder_is_hdmi(encoder))
+ intel_hdmi_encoder_shutdown(encoder);
}
static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder)
@@ -4931,21 +5043,21 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
-static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
+static bool port_strap_detected(struct intel_display *display, enum port port)
{
/* straps not used on skl+ */
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return true;
switch (port) {
case PORT_A:
- return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ return intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
case PORT_B:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
case PORT_C:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
case PORT_D:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
case PORT_E:
return true; /* no strap for DDI-E */
default:
@@ -4959,18 +5071,18 @@ static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
return init_dp || intel_encoder_is_tc(encoder);
}
-static bool assert_has_icl_dsi(struct drm_i915_private *i915)
+static bool assert_has_icl_dsi(struct intel_display *display)
{
- return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) &&
- !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11,
+ return !drm_WARN(display->drm, !display->platform.alderlake_p &&
+ !display->platform.tigerlake && DISPLAY_VER(display) != 11,
"Platform does not support DSI\n");
}
-static bool port_in_use(struct drm_i915_private *i915, enum port port)
+static bool port_in_use(struct intel_display *display, enum port port)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
/* FIXME what about second port for dual link DSI? */
if (encoder->port == port)
return true;
@@ -4979,12 +5091,45 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port)
return false;
}
+static const char *intel_ddi_encoder_name(struct intel_display *display,
+ enum port port, enum phy phy,
+ struct seq_buf *s)
+{
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ seq_buf_printf(s, "DDI %c/PHY %c",
+ port_name(port - PORT_D_XELPD + PORT_D),
+ phy_name(phy));
+ } else if (DISPLAY_VER(display) >= 12) {
+ enum tc_port tc_port = intel_port_to_tc(display, port);
+
+ seq_buf_printf(s, "DDI %s%c/PHY %s%c",
+ port >= PORT_TC1 ? "TC" : "",
+ port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else if (DISPLAY_VER(display) >= 11) {
+ enum tc_port tc_port = intel_port_to_tc(display, port);
+
+ seq_buf_printf(s, "DDI %c%s/PHY %s%c",
+ port_name(port),
+ port >= PORT_C ? " (TC)" : "",
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else {
+ seq_buf_printf(s, "DDI %c/PHY %c", port_name(port), phy_name(phy));
+ }
+
+ drm_WARN_ON(display->drm, seq_buf_has_overflowed(s));
+
+ return seq_buf_str(s);
+}
+
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
+ DECLARE_SEQ_BUF(encoder_name, 20);
bool init_hdmi, init_dp;
enum port port;
enum phy phy;
@@ -4994,31 +5139,31 @@ void intel_ddi_init(struct intel_display *display,
if (port == PORT_NONE)
return;
- if (!port_strap_detected(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!port_strap_detected(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c strap not detected\n", port_name(port));
return;
}
- if (!assert_port_valid(dev_priv, port))
+ if (!assert_port_valid(display, port))
return;
- if (port_in_use(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (port_in_use(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c already claimed\n", port_name(port));
return;
}
if (intel_bios_encoder_supports_dsi(devdata)) {
/* BXT/GLK handled elsewhere, for now at least */
- if (!assert_has_icl_dsi(dev_priv))
+ if (!assert_has_icl_dsi(display))
return;
icl_dsi_init(display, devdata);
return;
}
- phy = intel_port_to_phy(dev_priv, port);
+ phy = intel_port_to_phy(display, port);
/*
* On platforms with HTI (aka HDPORT), if it's enabled at boot it may
@@ -5027,7 +5172,7 @@ void intel_ddi_init(struct intel_display *display,
* outputs.
*/
if (intel_hti_uses_phy(display, phy)) {
- drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
+ drm_dbg_kms(display->drm, "PORT %c / PHY %c reserved by HTI\n",
port_name(port), phy_name(phy));
return;
}
@@ -5044,70 +5189,37 @@ void intel_ddi_init(struct intel_display *display,
*/
init_dp = true;
init_hdmi = false;
- drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ drm_dbg_kms(display->drm, "VBT says port %c has lspcon\n",
port_name(port));
}
if (!init_dp && !init_hdmi) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
return;
}
- if (intel_phy_is_snps(dev_priv, phy) &&
- dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (intel_phy_is_snps(display, phy) &&
+ display->snps.phy_failed_calibration & BIT(phy)) {
+ drm_dbg_kms(display->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
}
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return;
- dig_port->aux_ch = AUX_CH_NONE;
-
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c",
- port_name(port - PORT_D_XELPD + PORT_D),
- phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 12) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %s%c/PHY %s%c",
- port >= PORT_TC1 ? "TC" : "",
- port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
- tc_port != TC_PORT_NONE ? "TC" : "",
- tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c%s/PHY %s%c",
- port_name(port),
- port >= PORT_C ? " (TC)" : "",
- tc_port != TC_PORT_NONE ? "TC" : "",
- tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c", port_name(port), phy_name(phy));
- }
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS, "%s",
+ intel_ddi_encoder_name(display, port, phy, &encoder_name));
intel_encoder_link_check_init(encoder, intel_ddi_link_check);
- mutex_init(&dig_port->hdcp_mutex);
- dig_port->num_hdcp_streams = 0;
-
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
@@ -5129,37 +5241,42 @@ void intel_ddi_init(struct intel_display *display,
encoder->get_power_domains = intel_ddi_get_power_domains;
encoder->type = INTEL_OUTPUT_DDI;
- encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
+ encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
encoder->port = port;
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->enable_clock = intel_xe3plpd_pll_enable;
+ encoder->disable_clock = intel_xe3plpd_pll_disable;
+ encoder->port_pll_type = intel_mtl_port_pll_type;
+ encoder->get_config = xe3plpd_ddi_get_config;
+ } else if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = mtl_ddi_get_config;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->enable_clock = intel_mpllb_enable;
encoder->disable_clock = intel_mpllb_disable;
encoder->get_config = dg2_ddi_get_config;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (display->platform.alderlake_s) {
encoder->enable_clock = adls_ddi_enable_clock;
encoder->disable_clock = adls_ddi_disable_clock;
encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
encoder->get_config = adls_ddi_get_config;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (display->platform.rocketlake) {
encoder->enable_clock = rkl_ddi_enable_clock;
encoder->disable_clock = rkl_ddi_disable_clock;
encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
encoder->get_config = rkl_ddi_get_config;
- } else if (IS_DG1(dev_priv)) {
+ } else if (display->platform.dg1) {
encoder->enable_clock = dg1_ddi_enable_clock;
encoder->disable_clock = dg1_ddi_disable_clock;
encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
encoder->get_config = dg1_ddi_get_config;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
@@ -5171,8 +5288,8 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (DISPLAY_VER(display) >= 11) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
@@ -5184,36 +5301,38 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
/* BXT/GLK have fixed PLL->port mapping */
encoder->get_config = bxt_ddi_get_config;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
encoder->enable_clock = skl_ddi_enable_clock;
encoder->disable_clock = skl_ddi_disable_clock;
encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
encoder->get_config = skl_ddi_get_config;
- } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (display->platform.broadwell || display->platform.haswell) {
encoder->enable_clock = hsw_ddi_enable_clock;
encoder->disable_clock = hsw_ddi_disable_clock;
encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->set_signal_levels = intel_lt_phy_set_signal_levels;
+ } else if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
@@ -5221,31 +5340,30 @@ void intel_ddi_init(struct intel_display *display,
intel_ddi_buf_trans_init(encoder);
- if (DISPLAY_VER(dev_priv) >= 13)
- encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
- else if (IS_DG1(dev_priv))
- encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
- else if (IS_ROCKETLAKE(dev_priv))
- encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) >= 12)
- encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 11)
- encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
+ if (DISPLAY_VER(display) >= 13)
+ encoder->hpd_pin = xelpd_hpd_pin(display, port);
+ else if (display->platform.dg1)
+ encoder->hpd_pin = dg1_hpd_pin(display, port);
+ else if (display->platform.rocketlake)
+ encoder->hpd_pin = rkl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) >= 12)
+ encoder->hpd_pin = tgl_hpd_pin(display, port);
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ encoder->hpd_pin = ehl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 11)
+ encoder->hpd_pin = icl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
+ encoder->hpd_pin = skl_hpd_pin(display, port);
else
- encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ encoder->hpd_pin = intel_hpd_pin_default(port);
- ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(display, DDI_BUF_CTL(port));
dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
if (need_aux_ch(encoder, init_dp)) {
@@ -5262,7 +5380,7 @@ void intel_ddi_init(struct intel_display *display,
if (!is_legacy && init_hdmi) {
is_legacy = !init_dp;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
port_name(port),
str_yes_no(init_dp),
@@ -5279,24 +5397,24 @@ void intel_ddi_init(struct intel_display *display,
goto err;
}
- drm_WARN_ON(&dev_priv->drm, port > PORT_I);
- dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
+ drm_WARN_ON(display->drm, port > PORT_I);
+ dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(display, port);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
dig_port->connected = bdw_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_HASWELL(dev_priv)) {
+ } else if (display->platform.haswell) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -5306,13 +5424,13 @@ void intel_ddi_init(struct intel_display *display,
intel_infoframe_init(dig_port);
if (init_dp) {
- if (!intel_ddi_init_dp_connector(dig_port))
+ if (intel_ddi_init_dp_connector(dig_port))
goto err;
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(display);
}
/*
@@ -5320,7 +5438,7 @@ void intel_ddi_init(struct intel_display *display,
* but leave it just in case we have some really bad VBTs...
*/
if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(dig_port))
+ if (intel_ddi_init_hdmi_connector(dig_port))
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 2faadd1441e2..f6f511bb0431 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -9,7 +9,6 @@
#include "i915_reg_defs.h"
struct drm_connector_state;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_bios_encoder_data;
struct intel_connector;
@@ -17,9 +16,9 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dp;
+struct intel_dpll;
struct intel_dpll_hw_state;
struct intel_encoder;
-struct intel_shared_dpll;
enum pipe;
enum port;
enum transcoder;
@@ -41,7 +40,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder,
void intel_ddi_disable_clock(struct intel_encoder *encoder);
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll);
+ struct intel_dpll *pll);
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
@@ -51,11 +50,10 @@ intel_ddi_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
+struct intel_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port);
+void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port);
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 9389b295036e..395dba8c9e4d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -3,13 +3,14 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
+#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
-#include "intel_cx0_phy.h"
+#include "intel_lt_phy.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
@@ -1115,6 +1116,69 @@ static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = {
.num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr),
};
+/* DP1.4 */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_dp14[] = {
+ { .lt = { 1, 0, 0, 21, 0 } },
+ { .lt = { 1, 1, 0, 24, 3 } },
+ { .lt = { 1, 2, 0, 28, 7 } },
+ { .lt = { 0, 3, 0, 35, 13 } },
+ { .lt = { 1, 1, 0, 27, 0 } },
+ { .lt = { 1, 2, 0, 31, 4 } },
+ { .lt = { 0, 3, 0, 39, 9 } },
+ { .lt = { 1, 2, 0, 35, 0 } },
+ { .lt = { 0, 3, 0, 41, 7 } },
+ { .lt = { 0, 3, 0, 48, 0 } },
+};
+
+/* DP2.1 */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_uhbr[] = {
+ { .lt = { 0, 0, 0, 48, 0 } },
+ { .lt = { 0, 0, 0, 43, 5 } },
+ { .lt = { 0, 0, 0, 40, 8 } },
+ { .lt = { 0, 0, 0, 37, 11 } },
+ { .lt = { 0, 0, 0, 33, 15 } },
+ { .lt = { 0, 0, 2, 46, 0 } },
+ { .lt = { 0, 0, 2, 42, 4 } },
+ { .lt = { 0, 0, 2, 38, 8 } },
+ { .lt = { 0, 0, 2, 35, 11 } },
+ { .lt = { 0, 0, 2, 33, 13 } },
+ { .lt = { 0, 0, 4, 44, 0 } },
+ { .lt = { 0, 0, 4, 40, 4 } },
+ { .lt = { 0, 0, 4, 37, 7 } },
+ { .lt = { 0, 0, 4, 33, 11 } },
+ { .lt = { 0, 0, 8, 40, 0 } },
+ { .lt = { 1, 0, 2, 26, 2 } },
+};
+
+/* eDp */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_edp[] = {
+ { .lt = { 1, 0, 0, 12, 0 } },
+ { .lt = { 1, 1, 0, 13, 1 } },
+ { .lt = { 1, 2, 0, 15, 3 } },
+ { .lt = { 1, 3, 0, 19, 7 } },
+ { .lt = { 1, 1, 0, 14, 0 } },
+ { .lt = { 1, 2, 0, 16, 2 } },
+ { .lt = { 1, 3, 0, 21, 5 } },
+ { .lt = { 1, 2, 0, 18, 0 } },
+ { .lt = { 1, 3, 0, 22, 4 } },
+ { .lt = { 1, 3, 0, 26, 0 } },
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_dp14 = {
+ .entries = _xe3plpd_lt_trans_dp14,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_dp14),
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_uhbr = {
+ .entries = _xe3plpd_lt_trans_uhbr,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_uhbr),
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_edp = {
+ .entries = _xe3plpd_lt_trans_edp,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_edp),
+};
+
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
{
return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
@@ -1407,10 +1471,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (crtc_state->port_clock > 270000) {
- if (IS_TIGERLAKE_UY(dev_priv)) {
+ if (display->platform.tigerlake_uy) {
return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2,
n_entries);
} else {
@@ -1707,61 +1771,84 @@ mtl_get_c20_buf_trans(struct intel_encoder *encoder,
return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
}
+static const struct intel_ddi_buf_trans *
+xe3plpd_get_lt_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state))
+ return intel_get_buf_trans(&xe3plpd_lt_trans_uhbr, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return intel_get_buf_trans(&xe3plpd_lt_trans_edp, n_entries);
+ else
+ return intel_get_buf_trans(&xe3plpd_lt_trans_dp14, n_entries);
+}
+
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(i915) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->get_buf_trans = xe3plpd_get_lt_buf_trans;
+ } else if (DISPLAY_VER(display) >= 14) {
if (intel_encoder_is_c10phy(encoder))
encoder->get_buf_trans = mtl_get_c10_buf_trans;
else
encoder->get_buf_trans = mtl_get_c20_buf_trans;
- } else if (IS_DG2(i915)) {
+ } else if (display->platform.dg2) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = adlp_get_combo_buf_trans;
else
encoder->get_buf_trans = adlp_get_dkl_buf_trans;
- } else if (IS_ALDERLAKE_S(i915)) {
+ } else if (display->platform.alderlake_s) {
encoder->get_buf_trans = adls_get_combo_buf_trans;
- } else if (IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.rocketlake) {
encoder->get_buf_trans = rkl_get_combo_buf_trans;
- } else if (IS_DG1(i915)) {
+ } else if (display->platform.dg1) {
encoder->get_buf_trans = dg1_get_combo_buf_trans;
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = tgl_get_combo_buf_trans;
else
encoder->get_buf_trans = tgl_get_dkl_buf_trans;
- } else if (DISPLAY_VER(i915) == 11) {
- if (IS_JASPERLAKE(i915))
+ } else if (DISPLAY_VER(display) == 11) {
+ if (display->platform.jasperlake)
encoder->get_buf_trans = jsl_get_combo_buf_trans;
- else if (IS_ELKHARTLAKE(i915))
+ else if (display->platform.elkhartlake)
encoder->get_buf_trans = ehl_get_combo_buf_trans;
else if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = icl_get_combo_buf_trans;
else
encoder->get_buf_trans = icl_get_mg_buf_trans;
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
encoder->get_buf_trans = bxt_get_buf_trans;
- } else if (IS_COMETLAKE_ULX(i915) || IS_COFFEELAKE_ULX(i915) || IS_KABYLAKE_ULX(i915)) {
+ } else if (display->platform.cometlake_ulx ||
+ display->platform.coffeelake_ulx ||
+ display->platform.kabylake_ulx) {
encoder->get_buf_trans = kbl_y_get_buf_trans;
- } else if (IS_COMETLAKE_ULT(i915) || IS_COFFEELAKE_ULT(i915) || IS_KABYLAKE_ULT(i915)) {
+ } else if (display->platform.cometlake_ult ||
+ display->platform.coffeelake_ult ||
+ display->platform.kabylake_ult) {
encoder->get_buf_trans = kbl_u_get_buf_trans;
- } else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) {
+ } else if (display->platform.cometlake ||
+ display->platform.coffeelake ||
+ display->platform.kabylake) {
encoder->get_buf_trans = kbl_get_buf_trans;
- } else if (IS_SKYLAKE_ULX(i915)) {
+ } else if (display->platform.skylake_ulx) {
encoder->get_buf_trans = skl_y_get_buf_trans;
- } else if (IS_SKYLAKE_ULT(i915)) {
+ } else if (display->platform.skylake_ult) {
encoder->get_buf_trans = skl_u_get_buf_trans;
- } else if (IS_SKYLAKE(i915)) {
+ } else if (display->platform.skylake) {
encoder->get_buf_trans = skl_get_buf_trans;
- } else if (IS_BROADWELL(i915)) {
+ } else if (display->platform.broadwell) {
encoder->get_buf_trans = bdw_get_buf_trans;
- } else if (IS_HASWELL(i915)) {
+ } else if (display->platform.haswell) {
encoder->get_buf_trans = hsw_get_buf_trans;
} else {
- MISSING_CASE(INTEL_INFO(i915)->platform);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+
+ MISSING_CASE(pdev->device);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 2133984a572b..cec332090a20 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_encoder;
struct intel_crtc_state;
@@ -51,6 +50,14 @@ struct dg2_snps_phy_buf_trans {
u8 post_cursor;
};
+struct xe3plpd_lt_phy_buf_trans {
+ u8 txswing;
+ u8 txswing_level;
+ u8 pre_cursor;
+ u8 main_cursor;
+ u8 post_cursor;
+};
+
union intel_ddi_buf_trans_entry {
struct hsw_ddi_buf_trans hsw;
struct bxt_ddi_buf_trans bxt;
@@ -58,6 +65,7 @@ union intel_ddi_buf_trans_entry {
struct icl_mg_phy_ddi_buf_trans mg;
struct tgl_dkl_phy_ddi_buf_trans dkl;
struct dg2_snps_phy_buf_trans snps;
+ struct xe3plpd_lt_phy_buf_trans lt;
};
struct intel_ddi_buf_trans {
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 4561de5d5e10..a7ce3b875e06 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,18 +6,19 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
-#include "i915_drv.h"
-#include "i915_trace.h"
+#include "intel_display_core.h"
+#include "intel_dmc_wl.h"
#include "intel_dsb.h"
#include "intel_uncore.h"
+#include "intel_uncore_trace.h"
static inline struct intel_uncore *__to_uncore(struct intel_display *display)
{
- return &to_i915(display->drm)->uncore;
+ return to_intel_uncore(display->drm);
}
static inline u32
-__intel_de_read(struct intel_display *display, i915_reg_t reg)
+intel_de_read(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -29,7 +30,6 @@ __intel_de_read(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
intel_de_read8(struct intel_display *display, i915_reg_t reg)
@@ -64,7 +64,7 @@ intel_de_read64_2x32(struct intel_display *display,
}
static inline void
-__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
+intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
{
intel_dmc_wl_get(display, reg);
@@ -72,10 +72,9 @@ __intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
{
intel_dmc_wl_get(display, reg);
@@ -83,60 +82,32 @@ __intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
-__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
- u32 clear, u32 set)
-{
- return intel_uncore_rmw(__to_uncore(display), reg, clear, set);
-}
-
-static inline u32
-__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
- u32 set)
+intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
{
u32 val;
intel_dmc_wl_get(display, reg);
- val = __intel_de_rmw_nowl(display, reg, clear, set);
+ val = intel_uncore_rmw(__to_uncore(display), reg, clear, set);
intel_dmc_wl_put(display, reg);
return val;
}
-#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
-
-static inline int
-__intel_de_wait_for_register_nowl(struct intel_display *display,
- i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
-{
- return intel_wait_for_register(__to_uncore(display), reg, mask,
- value, timeout);
-}
-
-static inline int
-__intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
- i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us)
-{
- return __intel_wait_for_register(__to_uncore(display), reg, mask,
- value, fast_timeout_us, 0, NULL);
-}
static inline int
-intel_de_wait(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+intel_de_wait_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_us,
+ u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
- ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
- timeout);
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, timeout_us, 0, out_value);
intel_dmc_wl_put(display, reg);
@@ -144,15 +115,16 @@ intel_de_wait(struct intel_display *display, i915_reg_t reg,
}
static inline int
-intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+intel_de_wait_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_ms,
+ u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
- ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
- value, timeout);
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, 2, timeout_ms, out_value);
intel_dmc_wl_put(display, reg);
@@ -160,39 +132,50 @@ intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
}
static inline int
-intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms, u32 *out_value)
+intel_de_wait_fw_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_ms,
+ u32 *out_value)
{
- int ret;
-
- intel_dmc_wl_get(display, reg);
+ return __intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, 2, timeout_ms, out_value);
+}
- ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
- value,
- fast_timeout_us, slow_timeout_ms, out_value);
+static inline int
+intel_de_wait_fw_us_atomic(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_us,
+ u32 *out_value)
+{
+ return __intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, timeout_us, 0, out_value);
+}
- intel_dmc_wl_put(display, reg);
+static inline int
+intel_de_wait_for_set_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_us)
+{
+ return intel_de_wait_us(display, reg, mask, mask, timeout_us, NULL);
+}
- return ret;
+static inline int
+intel_de_wait_for_clear_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_us)
+{
+ return intel_de_wait_us(display, reg, mask, 0, timeout_us, NULL);
}
static inline int
-__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_set_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, mask, timeout);
+ return intel_de_wait_ms(display, reg, mask, mask, timeout_ms, NULL);
}
-#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_clear_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, 0, timeout);
+ return intel_de_wait_ms(display, reg, mask, 0, timeout_ms, NULL);
}
-#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -203,7 +186,7 @@ __intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
+intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -212,15 +195,25 @@ __intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
intel_uncore_write_fw(__to_uncore(display), reg, val);
}
-#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
+
+static inline u32
+intel_de_rmw_fw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 old, val;
+
+ old = intel_de_read_fw(display, reg);
+ val = (old & ~clear) | set;
+ intel_de_write_fw(display, reg, val);
+
+ return old;
+}
static inline u32
intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 21319f753a34..095a319f8bc9 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,6 +41,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
@@ -51,29 +52,33 @@
#include "i915_config.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
+#include "intel_alpm.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_audio.h"
#include "intel_bo.h"
#include "intel_bw.h"
+#include "intel_casf.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
#include "intel_color.h"
#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_cx0_phy.h"
-#include "intel_cursor.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -91,10 +96,12 @@
#include "intel_fbc.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
+#include "intel_flipq.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_link_bw.h"
+#include "intel_lt_phy.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
@@ -103,8 +110,9 @@
#include "intel_panel.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
-#include "intel_pcode.h"
+#include "intel_pfit.h"
#include "intel_pipe_crc.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
@@ -123,13 +131,10 @@
#include "intel_wm.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
-#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
-#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
-#include "vlv_sideband.h"
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
@@ -137,63 +142,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
-/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
-{
- int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
-
- /* Obtain SKU information */
- hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
- CCK_FUSE_HPLL_FREQ_MASK;
-
- return vco_freq[hpll_freq] * 1000;
-}
-
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
- const char *name, u32 reg, int ref_freq)
-{
- u32 val;
- int divider;
-
- val = vlv_cck_read(dev_priv, reg);
- divider = val & CCK_FREQUENCY_VALUES;
-
- drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
- (divider << CCK_FREQUENCY_STATUS_SHIFT),
- "%s change in progress\n", name);
-
- return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
-}
-
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
- const char *name, u32 reg)
-{
- int hpll;
-
- vlv_cck_get(dev_priv);
-
- if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
-
- hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
-
- vlv_cck_put(dev_priv);
-
- return hpll;
-}
-
-void intel_update_czclk(struct drm_i915_private *dev_priv)
-{
- if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
- return;
-
- dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
- CCK_CZ_CLOCK_CONTROL);
-
- drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
- dev_priv->czclk_freq);
-}
-
static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
{
return (crtc_state->active_planes &
@@ -202,29 +150,29 @@ static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
/* WA Display #0827: Gen9:all */
static void
-skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
+skl_wa_827(struct intel_display *display, enum pipe pipe, bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
DUPS1_GATING_DIS | DUPS2_GATING_DIS,
enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
}
/* Wa_2006604312:icl,ehl */
static void
-icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+icl_wa_scalerclkgating(struct intel_display *display, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
DPFR_GATING_DIS,
enable ? DPFR_GATING_DIS : 0);
}
/* Wa_1604331009:icl,jsl,ehl */
static void
-icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+icl_wa_cursorclkgating(struct intel_display *display, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
CURSOR_GATING_DIS,
enable ? CURSOR_GATING_DIS : 0);
}
@@ -404,41 +352,40 @@ struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state)
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
- TRANSCONF_STATE_ENABLE, 100))
- drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
+ if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, cpu_transcoder),
+ TRANSCONF_STATE_ENABLE, 100))
+ drm_WARN(display->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
}
-void assert_transcoder(struct drm_i915_private *dev_priv,
+void assert_transcoder(struct intel_display *display,
enum transcoder cpu_transcoder, bool state)
{
- struct intel_display *display = &dev_priv->display;
bool cur_state;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
/* we keep both pipes enabled on 830 */
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
state = true;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (wakeref) {
- u32 val = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ u32 val = intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder));
cur_state = !!(val & TRANSCONF_ENABLE);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
} else {
cur_state = false;
}
@@ -468,57 +415,22 @@ static void assert_plane(struct intel_plane *plane, bool state)
static void assert_planes_disabled(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane)
assert_plane_disabled(plane);
}
-void vlv_wait_port_ready(struct intel_display *display,
- struct intel_digital_port *dig_port,
- unsigned int expected_mask)
-{
- u32 port_mask;
- i915_reg_t dpll_reg;
-
- switch (dig_port->base.port) {
- default:
- MISSING_CASE(dig_port->base.port);
- fallthrough;
- case PORT_B:
- port_mask = DPLL_PORTB_READY_MASK;
- dpll_reg = DPLL(display, 0);
- break;
- case PORT_C:
- port_mask = DPLL_PORTC_READY_MASK;
- dpll_reg = DPLL(display, 0);
- expected_mask <<= 4;
- break;
- case PORT_D:
- port_mask = DPLL_PORTD_READY_MASK;
- dpll_reg = DPIO_PHY_STATUS;
- break;
- }
-
- if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000))
- drm_WARN(display->drm, 1,
- "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
- dig_port->base.base.base.id, dig_port->base.base.name,
- intel_de_read(display, dpll_reg) & port_mask,
- expected_mask);
-}
-
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
u32 val;
- drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(display->drm, "enabling pipe %c\n", pipe_name(pipe));
assert_planes_disabled(crtc);
@@ -527,56 +439,56 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
- if (HAS_GMCH(dev_priv)) {
+ if (HAS_GMCH(display)) {
if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
- assert_dsi_pll_enabled(dev_priv);
+ assert_dsi_pll_enabled(display);
else
- assert_pll_enabled(dev_priv, pipe);
+ assert_pll_enabled(display, pipe);
} else {
if (new_crtc_state->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv,
+ assert_fdi_rx_pll_enabled(display,
intel_crtc_pch_transcoder(crtc));
- assert_fdi_tx_pll_enabled(dev_priv,
+ assert_fdi_tx_pll_enabled(display,
(enum pipe) cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(dev_priv) == 13)
- intel_de_rmw(dev_priv, PIPE_ARB_CTL(dev_priv, pipe),
+ if (DISPLAY_VER(display) == 13)
+ intel_de_rmw(display, PIPE_ARB_CTL(display, pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
u32 set = 0;
- if (DISPLAY_VER(dev_priv) == 14)
+ if (DISPLAY_VER(display) == 14)
set |= DP_FEC_BS_JITTER_WA;
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
clear, set);
}
- val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
- drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.i830);
return;
}
/* Wa_1409098942:adlp+ */
- if (DISPLAY_VER(dev_priv) >= 13 &&
+ if (DISPLAY_VER(display) >= 13 &&
new_crtc_state->dsc.compression_enable) {
val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
TRANSCONF_PIXEL_COUNT_SCALING_X4);
}
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder),
val | TRANSCONF_ENABLE);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
@@ -593,12 +505,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
u32 val;
- drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(display->drm, "disabling pipe %c\n", pipe_name(pipe));
/*
* Make sure planes won't keep trying to pump pixels to us,
@@ -606,7 +517,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if ((val & TRANSCONF_ENABLE) == 0)
return;
@@ -618,17 +529,17 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
val &= ~TRANSCONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
- if (!IS_I830(dev_priv))
+ if (!display->platform.i830)
val &= ~TRANSCONF_ENABLE;
/* Wa_1409098942:adlp+ */
- if (DISPLAY_VER(dev_priv) >= 13 &&
+ if (DISPLAY_VER(display) >= 13 &&
old_crtc_state->dsc.compression_enable)
val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
@@ -636,107 +547,41 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
intel_wait_for_pipe_off(old_crtc_state);
}
-unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
-{
- unsigned int size = 0;
- int i;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
- size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
-
- return size;
-}
-
-unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
-{
- unsigned int size = 0;
- int i;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- unsigned int plane_size;
-
- if (rem_info->plane[i].linear)
- plane_size = rem_info->plane[i].size;
- else
- plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
-
- if (plane_size == 0)
- continue;
-
- if (rem_info->plane_alignment)
- size = ALIGN(size, rem_info->plane_alignment);
-
- size += plane_size;
- }
-
- return size;
-}
-
-bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- return DISPLAY_VER(dev_priv) < 4 ||
- (plane->fbc && !plane_state->no_fbc_reason &&
- plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
-}
-
-/*
- * Convert the x/y offsets into a linear offset.
- * Only valid with 0/180 degree rotation, which is fine since linear
- * offset is only used with linear buffers on pre-hsw and tiled buffers
- * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
- */
-u32 intel_fb_xy_to_linear(int x, int y,
- const struct intel_plane_state *state,
- int color_plane)
-{
- const struct drm_framebuffer *fb = state->hw.fb;
- unsigned int cpp = fb->format->cpp[color_plane];
- unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
-
- return y * pitch + x * cpp;
-}
-
-/*
- * Add the x/y offsets derived from fb->offsets[] to the user
- * specified plane src x/y offsets. The resulting x/y offsets
- * specify the start of scanout from the beginning of the gtt mapping.
- */
-void intel_add_fb_offsets(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane)
-
-{
- *x += state->view.color_plane[color_plane].x;
- *y += state->view.color_plane[color_plane].y;
-}
-
-u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
- u32 pixel_format, u64 modifier)
+u32 intel_plane_fb_max_stride(struct intel_display *display,
+ const struct drm_format_info *info,
+ u64 modifier)
{
struct intel_crtc *crtc;
struct intel_plane *plane;
- if (!HAS_DISPLAY(dev_priv))
- return 0;
-
/*
* We assume the primary plane for pipe A has
* the highest stride limits of them all,
* if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_first_crtc(dev_priv);
+ crtc = intel_first_crtc(display);
if (!crtc)
return 0;
plane = to_intel_plane(crtc->base.primary);
- return plane->max_stride(plane, pixel_format, modifier,
+ return plane->max_stride(plane, info, modifier,
DRM_MODE_ROTATE_0);
}
+u32 intel_dumb_fb_max_stride(struct drm_device *drm,
+ u32 pixel_format, u64 modifier)
+{
+ struct intel_display *display = to_intel_display(drm);
+
+ if (!HAS_DISPLAY(display))
+ return 0;
+
+ return intel_plane_fb_max_stride(display,
+ drm_get_format_info(drm, pixel_format, modifier),
+ modifier);
+}
+
void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
bool visible)
@@ -753,7 +598,7 @@ void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_plane *plane;
/*
@@ -764,7 +609,7 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
- drm_for_each_plane_mask(plane, &dev_priv->drm,
+ drm_for_each_plane_mask(plane, display->drm,
crtc_state->uapi.plane_mask) {
crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
@@ -774,29 +619,27 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
plane->base.base.id, plane->base.name,
crtc->base.base.id, crtc->base.name);
+ intel_plane_set_invisible(crtc_state, plane_state);
intel_set_plane_visible(crtc_state, plane_state, false);
intel_plane_fixup_bitmasks(crtc_state);
- crtc_state->data_rate[plane->id] = 0;
- crtc_state->data_rate_y[plane->id] = 0;
- crtc_state->rel_data_rate[plane->id] = 0;
- crtc_state->rel_data_rate_y[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+
+ skl_wm_plane_disable_noatomic(crtc, plane);
if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
hsw_ips_disable(crtc_state)) {
crtc_state->ips_enabled = false;
- intel_crtc_wait_for_next_vblank(crtc);
+ intel_plane_initial_vblank_wait(crtc);
}
/*
@@ -808,19 +651,19 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) &&
- intel_set_memory_cxsr(dev_priv, false))
- intel_crtc_wait_for_next_vblank(crtc);
+ if (HAS_GMCH(display) &&
+ intel_set_memory_cxsr(display, false))
+ intel_plane_initial_vblank_wait(crtc);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (DISPLAY_VER(display) == 2 && !crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, false);
intel_plane_disable_arm(NULL, plane, crtc_state);
- intel_crtc_wait_for_next_vblank(crtc);
+ intel_plane_initial_vblank_wait(crtc);
}
unsigned int
@@ -836,12 +679,12 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
+ tmp = intel_de_read(display, PIPE_CHICKEN(pipe));
/*
* Display WA #1153: icl
@@ -861,24 +704,24 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
* Underrun recovery must always be disabled on display 13+.
* DG2 chicken bit meaning is inverted compared to other platforms.
*/
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
- else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30))
+ else if ((DISPLAY_VER(display) >= 13) && (DISPLAY_VER(display) < 30))
tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
/* Wa_14010547955:dg2 */
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
tmp |= DG2_RENDER_CCSTAG_4_3_EN;
- intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
+ intel_de_write(display, PIPE_CHICKEN(pipe), tmp);
}
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
+bool intel_has_pending_fb_unpin(struct intel_display *display)
{
struct drm_crtc *crtc;
bool cleanup_done;
- drm_for_each_crtc(crtc, &dev_priv->drm) {
+ drm_for_each_crtc(crtc, display->drm) {
struct drm_crtc_commit *commit;
spin_lock(&crtc->commit_lock);
commit = list_first_entry_or_null(&crtc->commit_list,
@@ -930,36 +773,6 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
return encoder;
}
-static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
- enum pipe pipe = crtc->pipe;
- int width = drm_rect_width(dst);
- int height = drm_rect_height(dst);
- int x = dst->x1;
- int y = dst->y1;
-
- if (!crtc_state->pch_pfit.enabled)
- return;
-
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
- PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
- else
- intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
- PF_FILTER_MED_3x3);
- intel_de_write_fw(dev_priv, PF_WIN_POS(pipe),
- PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
- intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe),
- PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
-}
-
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
{
if (crtc->overlay)
@@ -972,13 +785,13 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!crtc_state->nv12_planes)
return false;
/* WA Display #0827: Gen9:all */
- if (DISPLAY_VER(dev_priv) == 9)
+ if (DISPLAY_VER(display) == 9)
return true;
return false;
@@ -986,10 +799,10 @@ static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* Wa_2006604312:icl,ehl */
- if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
+ if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(display) == 11)
return true;
return false;
@@ -997,31 +810,31 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* Wa_1604331009:icl,jsl,ehl */
if (is_hdr_mode(crtc_state) &&
crtc_state->active_planes & BIT(PLANE_CURSOR) &&
- DISPLAY_VER(dev_priv) == 11)
+ DISPLAY_VER(display) == 11)
return true;
return false;
}
-static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
+static void intel_async_flip_vtd_wa(struct intel_display *display,
enum pipe pipe, bool enable)
{
- if (DISPLAY_VER(i915) == 9) {
+ if (DISPLAY_VER(display) == 9) {
/*
- * "Plane N strech max must be programmed to 11b (x1)
+ * "Plane N stretch max must be programmed to 11b (x1)
* when Async flips are enabled on that plane."
*/
- intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ intel_de_rmw(display, CHICKEN_PIPESL_1(pipe),
SKL_PLANE1_STRETCH_MAX_MASK,
enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
} else {
/* Also needed on HSW/BDW albeit undocumented */
- intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ intel_de_rmw(display, CHICKEN_PIPESL_1(pipe),
HSW_PRI_STRETCH_MAX_MASK,
enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
}
@@ -1029,10 +842,11 @@ static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
- (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
+ return crtc_state->uapi.async_flip && intel_display_vtd_active(display) &&
+ (DISPLAY_VER(display) == 9 || display->platform.broadwell ||
+ display->platform.haswell);
}
static void intel_encoders_audio_enable(struct intel_atomic_state *state,
@@ -1109,7 +923,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
- old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
+ old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
+ old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
+ old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
}
static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
@@ -1175,49 +991,74 @@ static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
}
+static bool intel_casf_enabling(const struct intel_crtc_state *new_crtc_state,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
+}
+
+static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
+}
+
#undef is_disabling
#undef is_enabling
static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- intel_psr_post_plane_update(state, crtc);
-
- intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
+ intel_frontbuffer_flip(display, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_fbc_post_update(state, crtc);
if (needs_async_flip_vtd_wa(old_crtc_state) &&
!needs_async_flip_vtd_wa(new_crtc_state))
- intel_async_flip_vtd_wa(dev_priv, pipe, false);
+ intel_async_flip_vtd_wa(display, pipe, false);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
- skl_wa_827(dev_priv, pipe, false);
+ skl_wa_827(display, pipe, false);
if (needs_scalerclk_wa(old_crtc_state) &&
!needs_scalerclk_wa(new_crtc_state))
- icl_wa_scalerclkgating(dev_priv, pipe, false);
+ icl_wa_scalerclkgating(display, pipe, false);
if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state))
- icl_wa_cursorclkgating(dev_priv, pipe, false);
+ icl_wa_cursorclkgating(display, pipe, false);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_post_update(new_crtc_state);
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+
+ if (intel_display_wa(display, 14011503117)) {
+ if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled)
+ adl_scaler_ecc_unmask(new_crtc_state);
+ }
+
+ intel_alpm_post_plane_update(state, crtc);
+
+ intel_psr_post_plane_update(state, crtc);
}
static void intel_post_plane_update_after_readout(struct intel_atomic_state *state,
@@ -1305,13 +1146,16 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_alpm_pre_plane_update(state, crtc);
+ intel_psr_pre_plane_update(state, crtc);
+
if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
@@ -1320,9 +1164,10 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (audio_disabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_disable(state, crtc);
- intel_drrs_deactivate(old_crtc_state);
+ if (intel_casf_disabling(old_crtc_state, new_crtc_state))
+ intel_casf_disable(new_crtc_state);
- intel_psr_pre_plane_update(state, crtc);
+ intel_drrs_deactivate(old_crtc_state);
if (hsw_ips_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1332,22 +1177,22 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (!needs_async_flip_vtd_wa(old_crtc_state) &&
needs_async_flip_vtd_wa(new_crtc_state))
- intel_async_flip_vtd_wa(dev_priv, pipe, true);
+ intel_async_flip_vtd_wa(display, pipe, true);
/* Display WA 827 */
if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
- skl_wa_827(dev_priv, pipe, true);
+ skl_wa_827(display, pipe, true);
/* Wa_2006604312:icl,ehl */
if (!needs_scalerclk_wa(old_crtc_state) &&
needs_scalerclk_wa(new_crtc_state))
- icl_wa_scalerclkgating(dev_priv, pipe, true);
+ icl_wa_scalerclkgating(display, pipe, true);
/* Wa_1604331009:icl,jsl,ehl */
if (!needs_cursorclk_wa(old_crtc_state) &&
needs_cursorclk_wa(new_crtc_state))
- icl_wa_cursorclkgating(dev_priv, pipe, true);
+ icl_wa_cursorclkgating(display, pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
@@ -1358,8 +1203,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ if (HAS_GMCH(display) && old_crtc_state->hw.active &&
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1369,8 +1214,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
+ if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
+ new_crtc_state->disable_cxsr && ilk_disable_cxsr(display))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1394,7 +1239,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (!intel_initial_watermarks(state, crtc))
if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
}
/*
@@ -1405,8 +1250,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* chance of catching underruns with the intermediate watermarks
* vs. the old plane configuration.
*/
- if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ if (DISPLAY_VER(display) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
/*
* WA for platforms where async address update enable bit
@@ -1419,7 +1264,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
unsigned int update_mask = new_crtc_state->update_planes;
@@ -1441,12 +1286,12 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
fb_bits |= plane->frontbuffer_bit;
}
- intel_frontbuffer_flip(dev_priv, fb_bits);
+ intel_frontbuffer_flip(display, fb_bits);
}
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -1455,12 +1300,12 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
* Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
* TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
*/
- if (i915->display.dpll.mgr) {
+ if (display->dpll.mgr) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
- new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
+ new_crtc_state->intel_dpll = old_crtc_state->intel_dpll;
new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
}
}
@@ -1645,12 +1490,12 @@ static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
static void ilk_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
/*
@@ -1663,8 +1508,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
*
* Spurious PCH underruns also occur during PCH enabling.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(display, pipe, false);
ilk_configure_cpu_transcoder(new_crtc_state);
@@ -1677,8 +1522,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
if (new_crtc_state->has_pch_encoder) {
ilk_pch_pre_enable(state, crtc);
} else {
- assert_fdi_tx_disabled(dev_priv, pipe);
- assert_fdi_rx_disabled(dev_priv, pipe);
+ assert_fdi_tx_disabled(display, pipe);
+ assert_fdi_rx_disabled(display, pipe);
}
ilk_pfit_enable(new_crtc_state);
@@ -1699,7 +1544,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_wait_for_pipe_scanline_moving(crtc);
/*
@@ -1712,33 +1557,33 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
}
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(display, pipe, true);
}
/* Display WA #1180: WaDisableScalarClockGating: glk */
static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled;
+ return DISPLAY_VER(display) == 10 && crtc_state->pch_pfit.enabled;
}
static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
- intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(crtc->pipe),
mask, enable ? mask : 0);
}
static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
+ intel_de_write(display, WM_LINETIME(crtc->pipe),
HSW_LINETIME(crtc_state->linetime) |
HSW_IPS_LINETIME(crtc_state->ips_linetime));
}
@@ -1754,8 +1599,8 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (crtc_state->has_pch_encoder) {
@@ -1769,11 +1614,10 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- if (HAS_VRR(dev_priv))
- intel_vrr_set_transcoder_timings(crtc_state);
+ intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
- intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
crtc_state->pixel_multiplier - 1);
hsw_set_frame_start_delay(crtc_state);
@@ -1787,26 +1631,24 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
struct intel_crtc *pipe_crtc;
int i;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
- for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i)
- intel_dmc_enable_pipe(display, pipe_crtc->pipe);
-
- intel_encoders_pre_pll_enable(state, crtc);
-
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
- const struct intel_crtc_state *pipe_crtc_state =
+ const struct intel_crtc_state *new_pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
- if (pipe_crtc_state->shared_dpll)
- intel_enable_shared_dpll(pipe_crtc_state);
+ intel_dmc_enable_pipe(new_pipe_crtc_state);
}
+ intel_encoders_pre_pll_enable(state, crtc);
+
+ if (new_crtc_state->intel_dpll)
+ intel_dpll_enable(new_crtc_state);
+
intel_encoders_pre_enable(state, crtc);
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
@@ -1815,12 +1657,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_dsc_enable(pipe_crtc_state);
- if (HAS_UNCOMPRESSED_JOINER(dev_priv))
+ if (HAS_UNCOMPRESSED_JOINER(display))
intel_uncompressed_joiner_enable(pipe_crtc_state);
intel_set_pipe_src_size(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 || display->platform.broadwell)
bdw_set_pipe_misc(NULL, pipe_crtc_state);
}
@@ -1836,7 +1678,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_pfit_enable(pipe_crtc_state);
else
ilk_pfit_enable(pipe_crtc_state);
@@ -1849,7 +1691,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
hsw_set_linetime_wm(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_set_pipe_chicken(pipe_crtc_state);
intel_initial_watermarks(state, pipe_crtc);
@@ -1872,7 +1714,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
* enabling, we need to change the workaround.
*/
hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
- if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ if (display->platform.haswell && hsw_workaround_pipe != INVALID_PIPE) {
struct intel_crtc *wa_crtc =
intel_crtc_for_pipe(display, hsw_workaround_pipe);
@@ -1882,28 +1724,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
}
}
-void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- /* To avoid upsetting the power well on haswell only disable the pfit if
- * it's in use. The hw state code will make sure we get this right. */
- if (!old_crtc_state->pch_pfit.enabled)
- return;
-
- intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
- intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
- intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
-}
-
static void ilk_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/*
@@ -1911,8 +1737,8 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(display, pipe, false);
intel_encoders_disable(state, crtc);
@@ -1930,10 +1756,8 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
if (old_crtc_state->has_pch_encoder)
ilk_pch_post_disable(state, crtc);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-
- intel_disable_shared_dpll(old_crtc_state);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(display, pipe, true);
}
static void hsw_crtc_disable(struct intel_atomic_state *state,
@@ -1952,57 +1776,30 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
+ intel_dpll_disable(old_crtc_state);
+
+ intel_encoders_post_pll_disable(state, crtc);
+
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
- intel_disable_shared_dpll(old_pipe_crtc_state);
+ intel_dmc_disable_pipe(old_pipe_crtc_state);
}
-
- intel_encoders_post_pll_disable(state, crtc);
-
- for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i)
- intel_dmc_disable_pipe(display, pipe_crtc->pipe);
-}
-
-static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!crtc_state->gmch_pfit.control)
- return;
-
- /*
- * The panel fitter should only be adjusted whilst the pipe is disabled,
- * according to register description and PRM.
- */
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_ENABLE);
- assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
-
- intel_de_write(dev_priv, PFIT_PGM_RATIOS(dev_priv),
- crtc_state->gmch_pfit.pgm_ratios);
- intel_de_write(dev_priv, PFIT_CONTROL(dev_priv),
- crtc_state->gmch_pfit.control);
-
- /* Border color in case we don't scale up to the full screen. Black by
- * default, change to something else for debugging. */
- intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0);
}
/* Prefer intel_encoder_is_combo() */
-bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
+bool intel_phy_is_combo(struct intel_display *display, enum phy phy)
{
if (phy == PHY_NONE)
return false;
- else if (IS_ALDERLAKE_S(dev_priv))
+ else if (display->platform.alderlake_s)
return phy <= PHY_E;
- else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.dg1 || display->platform.rocketlake)
return phy <= PHY_D;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
return phy <= PHY_C;
- else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
+ else if (display->platform.alderlake_p || IS_DISPLAY_VER(display, 11, 12))
return phy <= PHY_B;
else
/*
@@ -2014,47 +1811,47 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
}
/* Prefer intel_encoder_is_tc() */
-bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
+bool intel_phy_is_tc(struct intel_display *display, enum phy phy)
{
/*
* Discrete GPU phy's are not attached to FIA's to support TC
* subsystem Legacy or non-legacy, and only support native DP/HDMI
*/
- if (IS_DGFX(dev_priv))
+ if (display->platform.dgfx)
return false;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return phy >= PHY_F && phy <= PHY_I;
- else if (IS_TIGERLAKE(dev_priv))
+ else if (display->platform.tigerlake)
return phy >= PHY_D && phy <= PHY_I;
- else if (IS_ICELAKE(dev_priv))
+ else if (display->platform.icelake)
return phy >= PHY_C && phy <= PHY_F;
return false;
}
/* Prefer intel_encoder_is_snps() */
-bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
+bool intel_phy_is_snps(struct intel_display *display, enum phy phy)
{
/*
* For DG2, and for DG2 only, all four "combo" ports and the TC1 port
* (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
*/
- return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
+ return display->platform.dg2 && phy > PHY_NONE && phy <= PHY_E;
}
/* Prefer intel_encoder_to_phy() */
-enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
+enum phy intel_port_to_phy(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD)
return PHY_D + port - PORT_D_XELPD;
- else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
+ else if (DISPLAY_VER(display) >= 13 && port >= PORT_TC1)
return PHY_F + port - PORT_TC1;
- else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
+ else if (display->platform.alderlake_s && port >= PORT_TC1)
return PHY_B + port - PORT_TC1;
- else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
+ else if ((display->platform.dg1 || display->platform.rocketlake) && port >= PORT_TC1)
return PHY_C + port - PORT_TC1;
- else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
port == PORT_D)
return PHY_A;
@@ -2062,12 +1859,12 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
}
/* Prefer intel_encoder_to_tc() */
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+enum tc_port intel_port_to_tc(struct intel_display *display, enum port port)
{
- if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
+ if (!intel_phy_is_tc(display, intel_port_to_phy(display, port)))
return TC_PORT_NONE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return TC_PORT_1 + port - PORT_TC1;
else
return TC_PORT_1 + port - PORT_C;
@@ -2075,55 +1872,55 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_port_to_phy(i915, encoder->port);
+ return intel_port_to_phy(display, encoder->port);
}
bool intel_encoder_is_combo(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder));
+ return intel_phy_is_combo(display, intel_encoder_to_phy(encoder));
}
bool intel_encoder_is_snps(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder));
+ return intel_phy_is_snps(display, intel_encoder_to_phy(encoder));
}
bool intel_encoder_is_tc(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder));
+ return intel_phy_is_tc(display, intel_encoder_to_phy(encoder));
}
enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_port_to_tc(i915, encoder->port);
+ return intel_port_to_tc(display, encoder->port);
}
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
if (intel_tc_port_in_tbt_alt_mode(dig_port))
- return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
+ return intel_display_power_tbt_aux_domain(display, dig_port->aux_ch);
- return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+ return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
}
static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
struct intel_power_domain_mask *mask)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
@@ -2139,17 +1936,17 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
crtc_state->pch_pfit.force_thru)
set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
- drm_for_each_encoder_mask(encoder, &dev_priv->drm,
+ drm_for_each_encoder_mask(encoder, display->drm,
crtc_state->uapi.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
set_bit(intel_encoder->power_domain, mask->bits);
}
- if (HAS_DDI(dev_priv) && crtc_state->has_audio)
+ if (HAS_DDI(display) && crtc_state->has_audio)
set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
- if (crtc_state->shared_dpll)
+ if (crtc_state->intel_dpll)
set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
if (crtc_state->dsc.compression_enable)
@@ -2159,8 +1956,8 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
struct intel_power_domain_mask *old_domains)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
struct intel_power_domain_mask domains, new_domains;
@@ -2176,7 +1973,7 @@ void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
POWER_DOMAIN_NUM);
for_each_power_domain(domain, &new_domains)
- intel_display_power_get_in_set(dev_priv,
+ intel_display_power_get_in_set(display,
&crtc->enabled_power_domains,
domain);
}
@@ -2184,7 +1981,9 @@ void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
struct intel_power_domain_mask *domains)
{
- intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_display_power_put_mask_in_set(display,
&crtc->enabled_power_domains,
domains);
}
@@ -2209,33 +2008,33 @@ static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_st
static void valleyview_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
i9xx_configure_cpu_transcoder(new_crtc_state);
intel_set_pipe_src_size(new_crtc_state);
- intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
+ intel_de_write(display, VLV_PIPE_MSA_MISC(display, pipe), 0);
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe),
+ if (display->platform.cherryview && pipe == PIPE_B) {
+ intel_de_write(display, CHV_BLEND(display, pipe),
CHV_BLEND_LEGACY);
- intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0);
+ intel_de_write(display, CHV_CANVAS(display, pipe), 0);
}
crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_encoders_pre_pll_enable(state, crtc);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
chv_enable_pll(new_crtc_state);
else
vlv_enable_pll(new_crtc_state);
@@ -2257,12 +2056,12 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
i9xx_configure_cpu_transcoder(new_crtc_state);
@@ -2271,8 +2070,8 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
crtc->active = true;
- if (DISPLAY_VER(dev_priv) != 2)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ if (DISPLAY_VER(display) != 2)
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_encoders_pre_enable(state, crtc);
@@ -2283,7 +2082,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_color_modeset(new_crtc_state);
if (!intel_initial_watermarks(state, crtc))
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -2291,30 +2090,14 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
/* prevents spurious underruns */
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
intel_crtc_wait_for_next_vblank(crtc);
}
-static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!old_crtc_state->gmch_pfit.control)
- return;
-
- assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
-
- drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
- intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)));
- intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 0);
-}
-
static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
@@ -2323,7 +2106,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
intel_crtc_wait_for_next_vblank(crtc);
intel_encoders_disable(state, crtc);
@@ -2337,24 +2120,24 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_encoders_post_disable(state, crtc);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
- if (IS_CHERRYVIEW(dev_priv))
- chv_disable_pll(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev_priv))
- vlv_disable_pll(dev_priv, pipe);
+ if (display->platform.cherryview)
+ chv_disable_pll(display, pipe);
+ else if (display->platform.valleyview)
+ vlv_disable_pll(display, pipe);
else
i9xx_disable_pll(old_crtc_state);
}
intel_encoders_post_pll_disable(state, crtc);
- if (DISPLAY_VER(dev_priv) != 2)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ if (DISPLAY_VER(display) != 2)
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
- if (!dev_priv->display.funcs.wm->initial_watermarks)
- intel_update_watermarks(dev_priv);
+ if (!display->funcs.wm->initial_watermarks)
+ intel_update_watermarks(display);
/* clock the pipe down to 640x480@60 to potentially save power */
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
i830_enable_pipe(display, pipe);
}
@@ -2368,11 +2151,11 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
- const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
/* GDG double wide on either pipe, otherwise pipe A only */
- return HAS_DOUBLE_WIDE(dev_priv) &&
- (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
+ return HAS_DOUBLE_WIDE(display) &&
+ (crtc->pipe == PIPE_A || display->platform.i915g);
}
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
@@ -2419,9 +2202,9 @@ static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
/* FIXME calculate proper pipe pixel rate for GMCH pfit */
crtc_state->pixel_rate =
crtc_state->hw.pipe_mode.crtc_clock;
@@ -2532,8 +2315,8 @@ static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
intel_joiner_compute_pipe_src(crtc_state);
@@ -2545,15 +2328,15 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
*/
if (drm_rect_width(&crtc_state->pipe_src) & 1) {
if (crtc_state->double_wide) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(i915)) {
- drm_dbg_kms(&i915->drm,
+ intel_is_dual_link_lvds(display)) {
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -2565,11 +2348,11 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int clock_limit = i915->display.cdclk.max_dotclk_freq;
+ int clock_limit = display->cdclk.max_dotclk_freq;
/*
* Start with the adjusted_mode crtc timings, which
@@ -2584,8 +2367,8 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
intel_joiner_adjust_timings(crtc_state, pipe_mode);
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- if (DISPLAY_VER(i915) < 4) {
- clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
+ if (DISPLAY_VER(display) < 4) {
+ clock_limit = display->cdclk.max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -2593,13 +2376,13 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
*/
if (intel_crtc_supports_double_wide(crtc) &&
pipe_mode->crtc_clock > clock_limit) {
- clock_limit = i915->display.cdclk.max_dotclk_freq;
+ clock_limit = display->cdclk.max_dotclk_freq;
crtc_state->double_wide = true;
}
}
if (pipe_mode->crtc_clock > clock_limit) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
crtc->base.base.id, crtc->base.name,
pipe_mode->crtc_clock, clock_limit,
@@ -2610,33 +2393,63 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static bool intel_crtc_needs_wa_14015401596(struct intel_crtc_state *crtc_state)
+static int intel_crtc_set_context_latency(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int set_context_latency = 0;
+
+ if (!HAS_DSB(display))
+ return 0;
+
+ set_context_latency = max(set_context_latency,
+ intel_psr_min_set_context_latency(crtc_state));
- return intel_vrr_possible(crtc_state) && crtc_state->has_psr &&
- adjusted_mode->crtc_vblank_start == adjusted_mode->crtc_vdisplay &&
- IS_DISPLAY_VER(display, 13, 14);
+ return set_context_latency;
}
-static int intel_crtc_compute_config(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int intel_crtc_compute_set_context_latency(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- int ret;
+ int set_context_latency, max_vblank_delay;
+
+ set_context_latency = intel_crtc_set_context_latency(crtc_state);
+
+ max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1;
- /* Wa_14015401596 */
- if (intel_crtc_needs_wa_14015401596(crtc_state))
- adjusted_mode->crtc_vblank_start += 1;
+ if (set_context_latency > max_vblank_delay) {
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] set context latency (%d) exceeds max (%d)\n",
+ crtc->base.base.id, crtc->base.name,
+ set_context_latency,
+ max_vblank_delay);
+ return -EINVAL;
+ }
+
+ crtc_state->set_context_latency = set_context_latency;
+ adjusted_mode->crtc_vblank_start += set_context_latency;
+
+ return 0;
+}
+
+static int intel_crtc_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
ret = intel_dpll_crtc_compute_clock(state, crtc);
if (ret)
return ret;
+ ret = intel_crtc_compute_set_context_latency(state, crtc);
+ if (ret)
+ return ret;
+
ret = intel_crtc_compute_pipe_src(crtc_state);
if (ret)
return ret;
@@ -2650,6 +2463,8 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
if (crtc_state->has_pch_encoder)
return ilk_fdi_compute_config(crtc, crtc_state);
+ intel_vrr_compute_guardband(crtc_state);
+
return 0;
}
@@ -2703,7 +2518,7 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
0x80000);
}
-void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+void intel_panel_sanitize_ssc(struct intel_display *display)
{
/*
* There may be no VBT; and if the BIOS enabled SSC we can
@@ -2711,17 +2526,17 @@ void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = intel_de_read(dev_priv,
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
+ bool bios_lvds_use_ssc = intel_de_read(display,
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
- if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- drm_dbg_kms(&dev_priv->drm,
+ if (display->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ drm_dbg_kms(display->drm,
"SSC %s by BIOS, overriding VBT which says %s\n",
str_enabled_disabled(bios_lvds_use_ssc),
- str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
- dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ str_enabled_disabled(display->vbt.lvds_use_ssc));
+ display->vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
}
@@ -2733,45 +2548,45 @@ void intel_zero_m_n(struct intel_link_m_n *m_n)
m_n->tu = 1;
}
-void intel_set_m_n(struct drm_i915_private *i915,
+void intel_set_m_n(struct intel_display *display,
const struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg)
{
- intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
- intel_de_write(i915, data_n_reg, m_n->data_n);
- intel_de_write(i915, link_m_reg, m_n->link_m);
+ intel_de_write(display, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
+ intel_de_write(display, data_n_reg, m_n->data_n);
+ intel_de_write(display, link_m_reg, m_n->link_m);
/*
* On BDW+ writing LINK_N arms the double buffered update
* of all the M/N registers, so it must be written last.
*/
- intel_de_write(i915, link_n_reg, m_n->link_n);
+ intel_de_write(display, link_n_reg, m_n->link_n);
}
-bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+bool intel_cpu_transcoder_has_m2_n2(struct intel_display *display,
enum transcoder transcoder)
{
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
return transcoder == TRANSCODER_EDP;
- return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
+ return IS_DISPLAY_VER(display, 5, 7) || display->platform.cherryview;
}
void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
enum transcoder transcoder,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (DISPLAY_VER(dev_priv) >= 5)
- intel_set_m_n(dev_priv, m_n,
- PIPE_DATA_M1(dev_priv, transcoder),
- PIPE_DATA_N1(dev_priv, transcoder),
- PIPE_LINK_M1(dev_priv, transcoder),
- PIPE_LINK_N1(dev_priv, transcoder));
+ if (DISPLAY_VER(display) >= 5)
+ intel_set_m_n(display, m_n,
+ PIPE_DATA_M1(display, transcoder),
+ PIPE_DATA_N1(display, transcoder),
+ PIPE_LINK_M1(display, transcoder),
+ PIPE_LINK_N1(display, transcoder));
else
- intel_set_m_n(dev_priv, m_n,
+ intel_set_m_n(display, m_n,
PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
}
@@ -2780,28 +2595,39 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
enum transcoder transcoder,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
+ if (!intel_cpu_transcoder_has_m2_n2(display, transcoder))
return;
- intel_set_m_n(dev_priv, m_n,
- PIPE_DATA_M2(dev_priv, transcoder),
- PIPE_DATA_N2(dev_priv, transcoder),
- PIPE_LINK_M2(dev_priv, transcoder),
- PIPE_LINK_N2(dev_priv, transcoder));
+ intel_set_m_n(display, m_n,
+ PIPE_DATA_M2(display, transcoder),
+ PIPE_DATA_N2(display, transcoder),
+ PIPE_LINK_M2(display, transcoder),
+ PIPE_LINK_N2(display, transcoder));
+}
+
+static bool
+transcoder_has_vrr(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder);
}
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
int vsyncshift = 0;
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
+
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
crtc_vdisplay = adjusted_mode->crtc_vdisplay;
@@ -2827,40 +2653,52 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* VBLANK_START no longer works on ADL+, instead we must use
* TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
*/
- if (DISPLAY_VER(dev_priv) >= 13) {
- intel_de_write(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder),
- crtc_vblank_start - crtc_vdisplay);
+ if (DISPLAY_VER(display) >= 13) {
+ intel_de_write(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
+ crtc_state->set_context_latency);
/*
* VBLANK_START not used by hw, just clear it
* to make it stand out in register dumps.
*/
crtc_vblank_start = 1;
+ } else if (DISPLAY_VER(display) == 12) {
+ /* VBLANK_START - VACTIVE defines SCL on TGL */
+ crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
}
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write(dev_priv,
- TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 4 && DISPLAY_VER(display) < 35)
+ intel_de_write(display,
+ TRANS_VSYNCSHIFT(display, cpu_transcoder),
vsyncshift);
- intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder),
HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
HTOTAL(adjusted_mode->crtc_htotal - 1));
- intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder),
HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
- intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder),
HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
+ intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
- intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder),
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
- intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder),
VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
@@ -2868,48 +2706,93 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
* documented on the DDI_FUNC_CTL register description, EDP Input Select
* bits. */
- if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
+ if (display->platform.haswell && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe),
+ intel_de_write(display, TRANS_VTOTAL(display, pipe),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ if (DISPLAY_VER(display) >= 30) {
+ /*
+ * Address issues for resolutions with high refresh rate that
+ * have small Hblank, specifically where Hblank is smaller than
+ * one MTP. Simulations indicate this will address the
+ * jitter issues that currently causes BS to be immediately
+ * followed by BE which DPRX devices are unable to handle.
+ * https://groups.vesa.org/wg/DP/document/20494
+ */
+ intel_de_write(display, DP_MIN_HBLANK_CTL(cpu_transcoder),
+ crtc_state->min_hblank);
+ }
}
static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
+
crtc_vdisplay = adjusted_mode->crtc_vdisplay;
crtc_vtotal = adjusted_mode->crtc_vtotal;
crtc_vblank_start = adjusted_mode->crtc_vblank_start;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
- drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ crtc_vtotal -= 1;
+ crtc_vblank_end -= 1;
+ }
+
+ if (DISPLAY_VER(display) >= 13) {
+ intel_de_write(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
+ crtc_state->set_context_latency);
+
+ /*
+ * VBLANK_START not used by hw, just clear it
+ * to make it stand out in register dumps.
+ */
+ crtc_vblank_start = 1;
+ } else if (DISPLAY_VER(display) == 12) {
+ /* VBLANK_START - VACTIVE defines SCL on TGL */
+ crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
+ }
/*
* The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode.
* But let's write it anyway to keep the state checker happy.
*/
- intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder),
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
/*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
+ /*
* The double buffer latch point for TRANS_VTOTAL
* is the transcoder's undelayed vblank.
*/
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int width = drm_rect_width(&crtc_state->pipe_src);
int height = drm_rect_height(&crtc_state->pipe_src);
enum pipe pipe = crtc->pipe;
@@ -2917,63 +2800,62 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
- intel_de_write(dev_priv, PIPESRC(dev_priv, pipe),
+ intel_de_write(display, PIPESRC(display, pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2 || DISPLAY_VER(display) >= 35)
return false;
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
+ return intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
else
- return intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
+ return intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
}
static void intel_get_transcoder_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 tmp;
- tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder));
adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_HBLANK(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_HBLANK(display, cpu_transcoder));
adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder));
adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
- tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder));
adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
/* FIXME TGL+ DSI transcoders have this! */
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_VBLANK(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_VBLANK(display, cpu_transcoder));
adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder));
adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
@@ -2983,11 +2865,28 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vblank_end += 1;
}
- if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
+ if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder)) {
+ pipe_config->set_context_latency =
+ intel_de_read(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
adjusted_mode->crtc_vblank_start =
adjusted_mode->crtc_vdisplay +
- intel_de_read(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder));
+ pipe_config->set_context_latency;
+ } else if (DISPLAY_VER(display) == 12) {
+ /*
+ * TGL doesn't have a dedicated register for SCL.
+ * Instead, the hardware derives SCL from the difference between
+ * TRANS_VBLANK.vblank_start and TRANS_VTOTAL.vactive.
+ * To reflect the HW behaviour, readout the value for SCL as
+ * Vblank start - Vactive.
+ */
+ pipe_config->set_context_latency =
+ adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+ }
+
+ if (DISPLAY_VER(display) >= 30)
+ pipe_config->min_hblank = intel_de_read(display,
+ DP_MIN_HBLANK_CTL(cpu_transcoder));
}
static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -3010,11 +2909,10 @@ static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe));
+ tmp = intel_de_read(display, PIPESRC(display, crtc->pipe));
drm_rect_init(&pipe_config->pipe_src, 0, 0,
REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
@@ -3025,8 +2923,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -3035,15 +2932,15 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During modeset the pipe is still disabled and must remain so
* - During fastset the pipe is already enabled and must remain so
*/
- if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
+ if (display->platform.i830 || !intel_crtc_needs_modeset(crtc_state))
val |= TRANSCONF_ENABLE;
if (crtc_state->double_wide)
val |= TRANSCONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (crtc_state->dither && crtc_state->pipe_bpp != 30)
val |= TRANSCONF_DITHER_EN |
@@ -3067,7 +2964,7 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (DISPLAY_VER(dev_priv) < 4 ||
+ if (DISPLAY_VER(display) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -3076,8 +2973,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_INTERLACE_PROGRESSIVE;
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- crtc_state->limited_color_range)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ crtc_state->limited_color_range)
val |= TRANSCONF_COLOR_RANGE_SELECT;
val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
@@ -3087,54 +2984,17 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
-}
-
-static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
-{
- if (IS_I830(dev_priv))
- return false;
-
- return DISPLAY_VER(dev_priv) >= 4 ||
- IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
-}
-
-static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe;
- u32 tmp;
-
- if (!i9xx_has_pfit(dev_priv))
- return;
-
- tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
- if (!(tmp & PFIT_ENABLE))
- return;
-
- /* Check whether the pfit is attached to our pipe. */
- if (DISPLAY_VER(dev_priv) >= 4)
- pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
- else
- pipe = PIPE_B;
-
- if (pipe != crtc->pipe)
- return;
-
- crtc_state->gmch_pfit.control = tmp;
- crtc_state->gmch_pfit.pgm_ratios =
- intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static enum intel_output_format
bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
+ tmp = intel_de_read(display, PIPE_MISC(crtc->pipe));
if (tmp & PIPE_MISC_YUV420_ENABLE) {
/*
@@ -3142,8 +3002,8 @@ bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
* For xe3_lpd+ this is implied in YUV420 Enable bit.
* Ensure the same for prior platforms in YUV420 Mode bit.
*/
- if (DISPLAY_VER(dev_priv) < 30)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 30)
+ drm_WARN_ON(display->drm,
(tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
@@ -3157,31 +3017,29 @@ bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum intel_display_power_domain power_domain;
+ enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe;
intel_wakeref_t wakeref;
+ bool ret = false;
u32 tmp;
- bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->sink_format = pipe_config->output_format;
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = NULL;
-
- ret = false;
-
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (!(tmp & TRANSCONF_ENABLE))
goto out;
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
+ pipe_config->cpu_transcoder = cpu_transcoder;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->sink_format = pipe_config->output_format;
+
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview) {
switch (tmp & TRANSCONF_BPC_MASK) {
case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
@@ -3198,7 +3056,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
(tmp & TRANSCONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
@@ -3206,29 +3064,29 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
(tmp & TRANSCONF_WGC_ENABLE))
pipe_config->wgc_enable = true;
intel_color_get_config(pipe_config);
- if (HAS_DOUBLE_WIDE(dev_priv))
+ if (HAS_DOUBLE_WIDE(display))
pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- i9xx_get_pfit_config(pipe_config);
+ i9xx_pfit_get_config(pipe_config);
i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state);
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
tmp = pipe_config->dpll_hw_state.i9xx.dpll_md;
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
tmp = pipe_config->dpll_hw_state.i9xx.dpll;
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -3240,9 +3098,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
chv_crtc_clock_get(pipe_config);
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
vlv_crtc_clock_get(pipe_config);
else
i9xx_crtc_clock_get(pipe_config);
@@ -3258,15 +3116,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -3308,7 +3165,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* This would end up with an odd purple hue over
* the entire display. Make sure we don't do it.
*/
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range &&
@@ -3323,14 +3180,13 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -3341,28 +3197,29 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
if (!intel_crtc_needs_modeset(crtc_state))
val |= TRANSCONF_ENABLE;
- if (IS_HASWELL(dev_priv) && crtc_state->dither)
+ if (display->platform.haswell && crtc_state->dither)
val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
- if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= TRANSCONF_INTERLACE_IF_ID_ILK;
- else
- val |= TRANSCONF_INTERLACE_PF_PD_ILK;
+ if (DISPLAY_VER(display) < 35) {
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
+ else
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
+ }
- if (IS_HASWELL(dev_priv) &&
+ if (display->platform.haswell &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc->base.dev);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
switch (crtc_state->pipe_bpp) {
@@ -3377,7 +3234,7 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
@@ -3396,14 +3253,14 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE :
PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND;
- if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
+ if (DISPLAY_VER(display) >= 11 && is_hdr_mode(crtc_state))
val |= PIPE_MISC_HDR_MODE_PRECISION;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
/* allow PSR with sprite enabled */
- if (IS_BROADWELL(dev_priv))
+ if (display->platform.broadwell)
val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val);
@@ -3411,10 +3268,10 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
+ tmp = intel_de_read(display, PIPE_MISC(crtc->pipe));
switch (tmp & PIPE_MISC_BPC_MASK) {
case PIPE_MISC_BPC_6:
@@ -3434,7 +3291,7 @@ int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
* MIPI DSI HW readout.
*/
case PIPE_MISC_BPC_12_ADLP:
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 36;
fallthrough;
default:
@@ -3454,33 +3311,33 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-void intel_get_m_n(struct drm_i915_private *i915,
+void intel_get_m_n(struct intel_display *display,
struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg)
{
- m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
- m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
- m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
- m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
- m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
+ m_n->link_m = intel_de_read(display, link_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->link_n = intel_de_read(display, link_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_m = intel_de_read(display, data_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_n = intel_de_read(display, data_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(display, data_m_reg)) + 1;
}
void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
enum transcoder transcoder,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (DISPLAY_VER(dev_priv) >= 5)
- intel_get_m_n(dev_priv, m_n,
- PIPE_DATA_M1(dev_priv, transcoder),
- PIPE_DATA_N1(dev_priv, transcoder),
- PIPE_LINK_M1(dev_priv, transcoder),
- PIPE_LINK_N1(dev_priv, transcoder));
+ if (DISPLAY_VER(display) >= 5)
+ intel_get_m_n(display, m_n,
+ PIPE_DATA_M1(display, transcoder),
+ PIPE_DATA_N1(display, transcoder),
+ PIPE_LINK_M1(display, transcoder),
+ PIPE_LINK_N1(display, transcoder));
else
- intel_get_m_n(dev_priv, m_n,
+ intel_get_m_n(display, m_n,
PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
}
@@ -3489,77 +3346,39 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
enum transcoder transcoder,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
- return;
-
- intel_get_m_n(dev_priv, m_n,
- PIPE_DATA_M2(dev_priv, transcoder),
- PIPE_DATA_N2(dev_priv, transcoder),
- PIPE_LINK_M2(dev_priv, transcoder),
- PIPE_LINK_N2(dev_priv, transcoder));
-}
-
-static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 ctl, pos, size;
- enum pipe pipe;
+ struct intel_display *display = to_intel_display(crtc);
- ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
- if ((ctl & PF_ENABLE) == 0)
+ if (!intel_cpu_transcoder_has_m2_n2(display, transcoder))
return;
- if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
- else
- pipe = crtc->pipe;
-
- crtc_state->pch_pfit.enabled = true;
-
- pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
- size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
-
- drm_rect_init(&crtc_state->pch_pfit.dst,
- REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
- REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
- REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
- REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
-
- /*
- * We currently do not free assignements of panel fitters on
- * ivb/hsw (since we don't use the higher upscaling modes which
- * differentiates them) so just WARN about this case for now.
- */
- drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe);
+ intel_get_m_n(display, m_n,
+ PIPE_DATA_M2(display, transcoder),
+ PIPE_DATA_N2(display, transcoder),
+ PIPE_LINK_M2(display, transcoder),
+ PIPE_LINK_N2(display, transcoder));
}
static bool ilk_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum intel_display_power_domain power_domain;
+ enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe;
intel_wakeref_t wakeref;
+ bool ret = false;
u32 tmp;
- bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = NULL;
-
- ret = false;
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (!(tmp & TRANSCONF_ENABLE))
goto out;
+ pipe_config->cpu_transcoder = cpu_transcoder;
+
switch (tmp & TRANSCONF_BPC_MASK) {
case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
@@ -3607,31 +3426,31 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- ilk_get_pfit_config(pipe_config);
+ ilk_pfit_get_config(pipe_config);
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
-static u8 joiner_pipes(struct drm_i915_private *i915)
+static u8 joiner_pipes(struct intel_display *display)
{
u8 pipes;
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
pipes = BIT(PIPE_B) | BIT(PIPE_C);
else
pipes = 0;
- return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask;
+ return pipes & DISPLAY_RUNTIME_INFO(display)->pipe_mask;
}
-static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
+static bool transcoder_ddi_func_is_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
enum intel_display_power_domain power_domain;
@@ -3640,9 +3459,9 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ with_intel_display_power_if_enabled(display, power_domain, wakeref)
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
return tmp & TRANS_DDI_FUNC_ENABLE;
}
@@ -3650,7 +3469,6 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3659,14 +3477,14 @@ static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
if (!HAS_UNCOMPRESSED_JOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = POWER_DOMAIN_PIPE(pipe);
- with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
+ with_intel_display_power_if_enabled(display, power_domain, wakeref) {
u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (tmp & UNCOMPRESSED_JOINER_PRIMARY)
@@ -3680,7 +3498,6 @@ static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
static void enabled_bigjoiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3689,14 +3506,14 @@ static void enabled_bigjoiner_pipes(struct intel_display *display,
if (!HAS_BIGJOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
- with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
+ with_intel_display_power_if_enabled(display, power_domain, wakeref) {
u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (!(tmp & BIG_JOINER_ENABLE))
@@ -3748,10 +3565,9 @@ static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes,
return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3;
}
-static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
+static void enabled_ultrajoiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3760,15 +3576,15 @@ static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
if (!HAS_ULTRAJOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
- with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
- u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ with_intel_display_power_if_enabled(display, power_domain, wakeref) {
+ u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (!(tmp & ULTRA_JOINER_ENABLE))
continue;
@@ -3781,11 +3597,10 @@ static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
}
}
-static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
+static void enabled_joiner_pipes(struct intel_display *display,
enum pipe pipe,
u8 *primary_pipe, u8 *secondary_pipes)
{
- struct intel_display *display = to_intel_display(&dev_priv->drm);
u8 primary_ultrajoiner_pipes;
u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes;
u8 secondary_ultrajoiner_pipes;
@@ -3793,21 +3608,21 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
u8 ultrajoiner_pipes;
u8 uncompressed_joiner_pipes, bigjoiner_pipes;
- enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes,
+ enabled_ultrajoiner_pipes(display, &primary_ultrajoiner_pipes,
&secondary_ultrajoiner_pipes);
/*
* For some strange reason the last pipe in the set of four
* shouldn't have ultrajoiner enable bit set in hardware.
* Set the bit anyway to make life easier.
*/
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
expected_secondary_pipes(primary_ultrajoiner_pipes, 3) !=
secondary_ultrajoiner_pipes);
secondary_ultrajoiner_pipes =
fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes,
secondary_ultrajoiner_pipes);
- drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0);
+ drm_WARN_ON(display->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0);
enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes,
&secondary_uncompressed_joiner_pipes);
@@ -3901,11 +3716,11 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
}
}
-static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
+static u8 hsw_panel_transcoders(struct intel_display *display)
{
u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
return panel_transcoder_mask;
@@ -3913,9 +3728,8 @@ static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
+ struct intel_display *display = to_intel_display(crtc);
+ u8 panel_transcoder_mask = hsw_panel_transcoders(display);
enum transcoder cpu_transcoder;
u8 primary_pipe, secondary_pipes;
u8 enabled_transcoders = 0;
@@ -3924,7 +3738,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
+ for_each_cpu_transcoder_masked(display, cpu_transcoder,
panel_transcoder_mask) {
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -3932,16 +3746,16 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
u32 tmp = 0;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ with_intel_display_power_if_enabled(display, power_domain, wakeref)
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- drm_WARN(dev, 1,
+ drm_WARN(display->drm, 1,
"unknown pipe linked to transcoder %s\n",
transcoder_name(cpu_transcoder));
fallthrough;
@@ -3966,14 +3780,14 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
/* single pipe or joiner primary */
cpu_transcoder = (enum transcoder) crtc->pipe;
- if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ if (transcoder_ddi_func_is_enabled(display, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
/* joiner secondary -> consider the primary pipe's transcoder as well */
- enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes);
+ enabled_joiner_pipes(display, crtc->pipe, &primary_pipe, &secondary_pipes);
if (secondary_pipes & BIT(crtc->pipe)) {
cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1;
- if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ if (transcoder_ddi_func_is_enabled(display, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
}
@@ -3998,17 +3812,17 @@ static bool has_pipe_transcoders(u8 enabled_transcoders)
BIT(TRANSCODER_DSI_1));
}
-static void assert_enabled_transcoders(struct drm_i915_private *i915,
+static void assert_enabled_transcoders(struct intel_display *display,
u8 enabled_transcoders)
{
/* Only one type of transcoder please */
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
has_edp_transcoders(enabled_transcoders) +
has_dsi_transcoders(enabled_transcoders) +
has_pipe_transcoders(enabled_transcoders) > 1);
/* Only DSI transcoders can be ganged */
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!has_dsi_transcoders(enabled_transcoders) &&
!is_power_of_2(enabled_transcoders));
}
@@ -4017,8 +3831,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
struct intel_display_power_domain_set *power_domain_set)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned long enabled_transcoders;
u32 tmp;
@@ -4026,7 +3839,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
if (!enabled_transcoders)
return false;
- assert_enabled_transcoders(dev_priv, enabled_transcoders);
+ assert_enabled_transcoders(display, enabled_transcoders);
/*
* With the exception of DSI we should only ever have
@@ -4035,20 +3848,20 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
*/
pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
- if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
- if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder));
+ if (hsw_panel_transcoders(display) & BIT(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, pipe_config->cpu_transcoder));
if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
pipe_config->pch_pfit.force_thru = true;
}
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANSCONF(display, pipe_config->cpu_transcoder));
return tmp & TRANSCONF_ENABLE;
}
@@ -4058,7 +3871,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_display_power_domain_set *power_domain_set)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -4069,7 +3881,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
else
cpu_transcoder = TRANSCODER_DSI_C;
- if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set,
POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
continue;
@@ -4080,7 +3892,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!bxt_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(display))
break;
/* XXX: this works for video mode only */
@@ -4101,12 +3913,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
static void intel_joiner_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u8 primary_pipe, secondary_pipes;
enum pipe pipe = crtc->pipe;
- enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes);
+ enabled_joiner_pipes(display, pipe, &primary_pipe, &secondary_pipes);
if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0)
return;
@@ -4118,21 +3930,18 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool active;
u32 tmp;
- if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
+ if (!intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
- pipe_config->shared_dpll = NULL;
-
active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
- drm_WARN_ON(&dev_priv->drm, active);
+ drm_WARN_ON(display->drm, active);
active = true;
}
@@ -4142,18 +3951,32 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_joiner_get_config(pipe_config);
intel_dsc_get_config(pipe_config);
+ /* intel_vrr_get_config() depends on .framestart_delay */
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
+
+ pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
+ } else {
+ /* no idea if this is correct */
+ pipe_config->framestart_delay = 1;
+ }
+
+ /*
+ * intel_vrr_get_config() depends on TRANS_SET_CONTEXT_LATENCY
+ * readout done by intel_get_transcoder_timings().
+ */
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- DISPLAY_VER(dev_priv) >= 11)
+ DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ if (transcoder_has_vrr(pipe_config))
intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- if (IS_HASWELL(dev_priv)) {
- u32 tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ if (display->platform.haswell) {
+ u32 tmp = intel_de_read(display,
+ TRANSCONF(display, pipe_config->cpu_transcoder));
if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
@@ -4168,18 +3991,18 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
- tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
+ tmp = intel_de_read(display, WM_LINETIME(crtc->pipe));
pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (display->platform.broadwell || display->platform.haswell)
pipe_config->ips_linetime =
REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
- if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
+ if (intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains,
POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_get_config(pipe_config);
else
- ilk_get_pfit_config(pipe_config);
+ ilk_pfit_get_config(pipe_config);
}
hsw_ips_get_config(pipe_config);
@@ -4187,33 +4010,24 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
- intel_de_read(dev_priv,
- TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1;
+ intel_de_read(display,
+ TRANS_MULT(display, pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
-
- pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
- } else {
- /* no idea if this is correct */
- pipe_config->framestart_delay = 1;
- }
-
out:
- intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
+ intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains);
return active;
}
bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
+ if (!display->funcs.display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -4229,7 +4043,7 @@ int intel_dotclock_calculate(int link_freq,
/*
* The calculation for the data clock -> pixel clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
- * But we want to avoid losing precison if possible, so:
+ * But we want to avoid losing precision if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
* and for link freq (10kbs units) -> pixel clock it is:
@@ -4339,137 +4153,6 @@ static bool check_single_encoder_cloning(struct intel_atomic_state *state,
return true;
}
-static int icl_add_linked_planes(struct intel_atomic_state *state)
-{
- struct intel_plane *plane, *linked;
- struct intel_plane_state *plane_state, *linked_plane_state;
- int i;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- linked = plane_state->planar_linked_plane;
-
- if (!linked)
- continue;
-
- linked_plane_state = intel_atomic_get_plane_state(state, linked);
- if (IS_ERR(linked_plane_state))
- return PTR_ERR(linked_plane_state);
-
- drm_WARN_ON(state->base.dev,
- linked_plane_state->planar_linked_plane != plane);
- drm_WARN_ON(state->base.dev,
- linked_plane_state->planar_slave == plane_state->planar_slave);
- }
-
- return 0;
-}
-
-static int icl_check_nv12_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane *plane, *linked;
- struct intel_plane_state *plane_state;
- int i;
-
- if (DISPLAY_VER(dev_priv) < 11)
- return 0;
-
- /*
- * Destroy all old plane links and make the slave plane invisible
- * in the crtc_state->active_planes mask.
- */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
- continue;
-
- plane_state->planar_linked_plane = NULL;
- if (plane_state->planar_slave && !plane_state->uapi.visible) {
- crtc_state->enabled_planes &= ~BIT(plane->id);
- crtc_state->active_planes &= ~BIT(plane->id);
- crtc_state->update_planes |= BIT(plane->id);
- crtc_state->data_rate[plane->id] = 0;
- crtc_state->rel_data_rate[plane->id] = 0;
- }
-
- plane_state->planar_slave = false;
- }
-
- if (!crtc_state->nv12_planes)
- return 0;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *linked_state = NULL;
-
- if (plane->pipe != crtc->pipe ||
- !(crtc_state->nv12_planes & BIT(plane->id)))
- continue;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
- if (!icl_is_nv12_y_plane(dev_priv, linked->id))
- continue;
-
- if (crtc_state->active_planes & BIT(linked->id))
- continue;
-
- linked_state = intel_atomic_get_plane_state(state, linked);
- if (IS_ERR(linked_state))
- return PTR_ERR(linked_state);
-
- break;
- }
-
- if (!linked_state) {
- drm_dbg_kms(&dev_priv->drm,
- "Need %d free Y planes for planar YUV\n",
- hweight8(crtc_state->nv12_planes));
-
- return -EINVAL;
- }
-
- plane_state->planar_linked_plane = linked;
-
- linked_state->planar_slave = true;
- linked_state->planar_linked_plane = plane;
- crtc_state->enabled_planes |= BIT(linked->id);
- crtc_state->active_planes |= BIT(linked->id);
- crtc_state->update_planes |= BIT(linked->id);
- crtc_state->data_rate[linked->id] =
- crtc_state->data_rate_y[plane->id];
- crtc_state->rel_data_rate[linked->id] =
- crtc_state->rel_data_rate_y[plane->id];
- drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
- linked->base.name, plane->base.name);
-
- /* Copy parameters to slave plane */
- linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
- linked_state->color_ctl = plane_state->color_ctl;
- linked_state->view = plane_state->view;
- linked_state->decrypt = plane_state->decrypt;
-
- intel_plane_copy_hw_state(linked_state, plane_state);
- linked_state->uapi.src = plane_state->uapi.src;
- linked_state->uapi.dst = plane_state->uapi.dst;
-
- if (icl_is_hdr_plane(dev_priv, plane->id)) {
- if (linked->id == PLANE_7)
- plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
- else if (linked->id == PLANE_6)
- plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
- else if (linked->id == PLANE_5)
- plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
- else if (linked->id == PLANE_4)
- plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
- else
- MISSING_CASE(linked->id);
- }
- }
-
- return 0;
-}
-
static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *pipe_mode =
@@ -4496,15 +4179,14 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
return 0;
linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
- cdclk_state->logical.cdclk);
+ intel_cdclk_logical(cdclk_state));
return min(linetime_wm, 0x1ff);
}
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
int linetime_wm;
@@ -4516,8 +4198,8 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
crtc_state->pixel_rate);
/* Display WA #1135: BXT:ALL GLK:ALL */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- skl_watermark_ipc_enabled(dev_priv))
+ if ((display->platform.geminilake || display->platform.broxton) &&
+ skl_watermark_ipc_enabled(display))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -4526,12 +4208,12 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_cdclk_state *cdclk_state;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
crtc_state->linetime = skl_linetime_wm(crtc_state);
else
crtc_state->linetime = hsw_linetime_wm(crtc_state);
@@ -4553,18 +4235,17 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x &&
intel_crtc_needs_modeset(crtc_state) &&
!crtc_state->hw.active)
crtc_state->update_wm_post = true;
if (intel_crtc_needs_modeset(crtc_state)) {
- ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
+ ret = intel_dpll_crtc_get_dpll(state, crtc);
if (ret)
return ret;
}
@@ -4575,15 +4256,20 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
ret = intel_wm_compute(state, crtc);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] watermarks are invalid\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
- if (DISPLAY_VER(dev_priv) >= 9) {
+ ret = intel_casf_compute_config(crtc_state);
+ if (ret)
+ return ret;
+
+ if (DISPLAY_VER(display) >= 9) {
if (intel_crtc_needs_modeset(crtc_state) ||
- intel_crtc_needs_fastset(crtc_state)) {
+ intel_crtc_needs_fastset(crtc_state) ||
+ intel_casf_needs_scaler(crtc_state)) {
ret = skl_update_scaler_crtc(crtc_state);
if (ret)
return ret;
@@ -4600,8 +4286,8 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell) {
ret = hsw_compute_linetime_wm(state, crtc);
if (ret)
return ret;
@@ -4619,8 +4305,8 @@ static int
compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_connector *connector = conn_state->connector;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_info *info = &connector->display_info;
int bpp;
@@ -4643,7 +4329,7 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
}
if (bpp < crtc_state->pipe_bpp) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Limiting display bpp to %d "
"(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
connector->base.id, connector->name,
@@ -4657,26 +4343,34 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
return 0;
}
+int intel_display_min_pipe_bpp(void)
+{
+ return 6 * 3;
+}
+
+int intel_display_max_pipe_bpp(struct intel_display *display)
+{
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
+ return 10*3;
+ else if (DISPLAY_VER(display) >= 5)
+ return 12*3;
+ else
+ return 8*3;
+}
+
static int
compute_baseline_pipe_bpp(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int bpp, i;
-
- if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)))
- bpp = 10*3;
- else if (DISPLAY_VER(dev_priv) >= 5)
- bpp = 12*3;
- else
- bpp = 8*3;
+ int i;
- crtc_state->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = intel_display_max_pipe_bpp(display);
/* Clamp display bpp to connector max bpp */
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
@@ -4695,7 +4389,7 @@ compute_baseline_pipe_bpp(struct intel_atomic_state *state,
static bool check_digital_port_conflicts(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
+ struct intel_display *display = to_intel_display(state);
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
unsigned int used_ports = 0;
@@ -4706,14 +4400,14 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
* We're going to peek into connector->state,
* hence connection_mutex must be held.
*/
- drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
+ drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex);
/*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
@@ -4729,11 +4423,11 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
encoder = to_intel_encoder(connector_state->best_encoder);
- drm_WARN_ON(dev, !connector_state->crtc);
+ drm_WARN_ON(display->drm, !connector_state->crtc);
switch (encoder->type) {
case INTEL_OUTPUT_DDI:
- if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
+ if (drm_WARN_ON(display->drm, !HAS_DDI(display)))
break;
fallthrough;
case INTEL_OUTPUT_DP:
@@ -4840,7 +4534,7 @@ copy_joiner_crtc_state_modeset(struct intel_atomic_state *state,
/* preserve some things from the slave's original crtc state */
saved_state->uapi = secondary_crtc_state->uapi;
saved_state->scaler_state = secondary_crtc_state->scaler_state;
- saved_state->shared_dpll = secondary_crtc_state->shared_dpll;
+ saved_state->intel_dpll = secondary_crtc_state->intel_dpll;
saved_state->crc_enabled = secondary_crtc_state->crc_enabled;
intel_crtc_free_hw_state(secondary_crtc_state);
@@ -4881,9 +4575,9 @@ static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *saved_state;
saved_state = intel_crtc_state_alloc(crtc);
@@ -4903,13 +4597,13 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
saved_state->uapi = crtc_state->uapi;
saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
- saved_state->shared_dpll = crtc_state->shared_dpll;
+ saved_state->intel_dpll = crtc_state->intel_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
sizeof(saved_state->icl_port_dplls));
saved_state->crc_enabled = crtc_state->crc_enabled;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
saved_state->wm = crtc_state->wm;
memcpy(crtc_state, saved_state, sizeof(*crtc_state));
@@ -4925,7 +4619,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
struct intel_crtc *crtc,
const struct intel_link_bw_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
@@ -4954,11 +4648,11 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret)
return ret;
- crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
+ crtc_state->dsc.compression_enabled_on_link = limits->link_dsc_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n",
crtc->base.base.id, crtc->base.name,
FXP_Q4_ARGS(crtc_state->max_link_bpp_x16));
@@ -4988,7 +4682,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
continue;
if (!check_single_encoder_cloning(state, crtc, encoder)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
encoder->base.base.id, encoder->base.name);
return -EINVAL;
@@ -5030,7 +4724,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] config failure: %d\n",
encoder->base.base.id, encoder->base.name, ret);
return ret;
}
@@ -5046,7 +4740,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] config failure: %d\n",
crtc->base.base.id, crtc->base.name, ret);
return ret;
}
@@ -5057,7 +4751,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
*/
crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
!crtc_state->dither_force_disable;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
crtc->base.base.id, crtc->base.name,
base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
@@ -5075,8 +4769,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state,
struct drm_connector *connector;
int i;
- intel_vrr_compute_config_late(crtc_state);
-
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -5189,7 +4881,7 @@ pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
const union hdmi_infoframe *a,
const union hdmi_infoframe *b)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const char *loglevel;
if (fastset) {
@@ -5204,9 +4896,9 @@ pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
pipe_config_mismatch(p, fastset, crtc, name, "infoframe");
drm_printf(p, "expected:\n");
- hdmi_infoframe_log(loglevel, i915->drm.dev, a);
+ hdmi_infoframe_log(loglevel, display->drm->dev, a);
drm_printf(p, "found:\n");
- hdmi_infoframe_log(loglevel, i915->drm.dev, b);
+ hdmi_infoframe_log(loglevel, display->drm->dev, b);
}
static void
@@ -5275,14 +4967,14 @@ pipe_config_pll_mismatch(struct drm_printer *p, bool fastset,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */
drm_printf(p, "expected:\n");
- intel_dpll_dump_hw_state(i915, p, a);
+ intel_dpll_dump_hw_state(display, p, a);
drm_printf(p, "found:\n");
- intel_dpll_dump_hw_state(i915, p, b);
+ intel_dpll_dump_hw_state(display, p, b);
}
static void
@@ -5303,21 +4995,58 @@ pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset,
intel_cx0pll_dump_hw_state(display, b);
}
+static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+
+ /*
+ * Allow fastboot to fix up vblank delay (handled via LRR
+ * codepaths), a bit dodgy as the registers aren't
+ * double buffered but seems to be working more or less...
+ *
+ * Also allow this when the VRR timing generator is always on,
+ * and optimized guardband is used. In such cases,
+ * vblank delay may vary even without inherited state, but it's
+ * still safe as VRR guardband is still same.
+ */
+ return HAS_LRR(display) &&
+ (old_crtc_state->inherited || intel_vrr_always_use_vrr_tg(display)) &&
+ !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+}
+
+static void
+pipe_config_lt_phy_pll_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
+ const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ char *chipname = "LTPHY";
+
+ pipe_config_mismatch(p, fastset, crtc, name, chipname);
+
+ drm_printf(p, "expected:\n");
+ intel_lt_phy_dump_hw_state(display, a);
+ drm_printf(p, "found:\n");
+ intel_lt_phy_dump_hw_state(display, b);
+}
+
bool
intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
{
struct intel_display *display = to_intel_display(current_config);
- struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_printer p;
+ u32 exclude_infoframes = 0;
bool ret = true;
if (fastset)
- p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
else
- p = drm_err_printer(&dev_priv->drm, NULL);
+ p = drm_err_printer(display->drm, NULL);
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
@@ -5408,7 +5137,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} while (0)
#define PIPE_CONF_CHECK_PLL(name) do { \
- if (!intel_dpll_compare_hw_state(dev_priv, &current_config->name, \
+ if (!intel_dpll_compare_hw_state(display, &current_config->name, \
&pipe_config->name)) { \
pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->name, \
@@ -5427,6 +5156,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_PLL_LT(name) do { \
+ if (!intel_lt_phy_pll_compare_hw_state(&current_config->name, \
+ &pipe_config->name)) { \
+ pipe_config_lt_phy_pll_mismatch(&p, fastset, crtc, __stringify(name), \
+ &current_config->name, \
+ &pipe_config->name); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_TIMINGS(name) do { \
PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
PIPE_CONF_CHECK_I(name.crtc_htotal); \
@@ -5435,7 +5174,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
- PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
+ if (!fastset || !allow_vblank_delay_fastset(current_config)) \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
if (!fastset || !pipe_config->update_lrr) { \
@@ -5549,6 +5289,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+ PIPE_CONF_CHECK_I(min_hblank);
+
if (HAS_DOUBLE_BUFFERED_M_N(display)) {
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -5583,8 +5325,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(output_format);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
- if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if ((DISPLAY_VER(display) < 8 && !display->platform.haswell) ||
+ display->platform.valleyview || display->platform.cherryview)
PIPE_CONF_CHECK_BOOL(limited_color_range);
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
@@ -5600,7 +5342,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (DISPLAY_VER(dev_priv) < 4)
+ if (DISPLAY_VER(display) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -5618,9 +5360,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_I(pixel_rate);
+ PIPE_CONF_CHECK_BOOL(hw.casf_params.casf_enable);
+ PIPE_CONF_CHECK_I(hw.casf_params.win_size);
+ PIPE_CONF_CHECK_I(hw.casf_params.strength);
PIPE_CONF_CHECK_X(gamma_mode);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
PIPE_CONF_CHECK_X(cgm_mode);
else
PIPE_CONF_CHECK_X(csc_mode);
@@ -5638,37 +5383,25 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(output_csc);
}
- /*
- * Panel replay has to be enabled before link training. PSR doesn't have
- * this requirement -> check these only if using panel replay
- */
- if (current_config->active_planes &&
- (current_config->has_panel_replay ||
- pipe_config->has_panel_replay)) {
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_sel_update);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et);
- PIPE_CONF_CHECK_BOOL(has_panel_replay);
- }
-
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->display.dpll.mgr)
- PIPE_CONF_CHECK_P(shared_dpll);
+ if (display->dpll.mgr)
+ PIPE_CONF_CHECK_P(intel_dpll);
/* FIXME convert everything over the dpll_mgr */
- if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv))
+ if (display->dpll.mgr || HAS_GMCH(display))
PIPE_CONF_CHECK_PLL(dpll_hw_state);
/* FIXME convert MTL+ platforms over to dpll_mgr */
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (HAS_LT_PHY(display))
+ PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll);
+ else if (DISPLAY_VER(display) >= 14)
PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
+ if (display->platform.g4x || DISPLAY_VER(display) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
if (!fastset || !pipe_config->update_m_n) {
@@ -5680,19 +5413,21 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(min_voltage_level);
if (current_config->has_psr || pipe_config->has_psr)
- PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
- ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
- else
- PIPE_CONF_CHECK_X(infoframes.enable);
+ exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+
+ if (current_config->vrr.enable || pipe_config->vrr.enable)
+ exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
+ PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, ~exclude_infoframes);
PIPE_CONF_CHECK_X(infoframes.gcp);
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
- if (!fastset)
+ if (!fastset) {
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
+ }
PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
- PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
@@ -5742,8 +5477,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
- PIPE_CONF_CHECK_I(vrr.pipeline_full);
- PIPE_CONF_CHECK_I(vrr.guardband);
PIPE_CONF_CHECK_I(vrr.vsync_start);
PIPE_CONF_CHECK_I(vrr.vsync_end);
PIPE_CONF_CHECK_LLI(cmrr.cmrr_m);
@@ -5751,6 +5484,13 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(cmrr.enable);
}
+ if (!fastset || intel_vrr_always_use_vrr_tg(display)) {
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+ PIPE_CONF_CHECK_I(vrr.guardband);
+ }
+
+ PIPE_CONF_CHECK_I(set_context_latency);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -5774,7 +5514,7 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->planar_slave ||
+ assert_plane(plane, plane_state->is_y_plane ||
plane_state->uapi.visible);
}
@@ -5782,11 +5522,11 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state,
const char *reason)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int ret;
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
crtc->base.base.id, crtc->base.name, reason);
ret = drm_atomic_add_affected_connectors(&state->base,
@@ -5802,7 +5542,7 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, crtc);
+ ret = intel_plane_add_affected(state, crtc);
if (ret)
return ret;
@@ -5826,10 +5566,10 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 mask)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mask) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -5873,10 +5613,10 @@ intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
const char *reason)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -5902,7 +5642,7 @@ int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
return 0;
}
-int intel_modeset_commit_pipes(struct drm_i915_private *i915,
+int intel_modeset_commit_pipes(struct intel_display *display,
u8 pipe_mask,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -5910,14 +5650,14 @@ int intel_modeset_commit_pipes(struct drm_i915_private *i915,
struct intel_crtc *crtc;
int ret;
- state = drm_atomic_state_alloc(&i915->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
to_intel_atomic_state(state)->internal = true;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_crtc_state(state, crtc);
@@ -5997,6 +5737,23 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
+u8 intel_calc_enabled_pipes(struct intel_atomic_state *state,
+ u8 enabled_pipes)
+{
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->hw.enable)
+ enabled_pipes |= BIT(crtc->pipe);
+ else
+ enabled_pipes &= ~BIT(crtc->pipe);
+ }
+
+ return enabled_pipes;
+}
+
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes)
{
@@ -6016,38 +5773,52 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
static int intel_modeset_checks(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
state->modeset = true;
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
return hsw_mode_set_planes_workaround(state);
return 0;
}
+static bool lrr_params_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ const struct drm_display_mode *old_adjusted_mode = &old_crtc_state->hw.adjusted_mode;
+ const struct drm_display_mode *new_adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+
+ return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start ||
+ old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end ||
+ old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal ||
+ old_crtc_state->set_context_latency != new_crtc_state->set_context_latency;
+}
+
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* only allow LRR when the timings stay within the VRR range */
if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
new_crtc_state->update_lrr = false;
- if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
+ if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
crtc->base.base.id, crtc->base.name);
- else
+ } else {
+ if (allow_vblank_delay_fastset(old_crtc_state))
+ new_crtc_state->update_lrr = true;
new_crtc_state->uapi.mode_changed = false;
+ }
if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
- old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
+ if (!lrr_params_changed(old_crtc_state, new_crtc_state))
new_crtc_state->update_lrr = false;
if (intel_crtc_needs_modeset(new_crtc_state))
@@ -6056,161 +5827,19 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->update_pipe = true;
}
-static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- u8 plane_ids_mask)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *plane_state;
-
- if ((plane_ids_mask & BIT(plane->id)) == 0)
- continue;
-
- plane_state = intel_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
- }
-
- return 0;
-}
-
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- return intel_crtc_add_planes_to_state(state, crtc,
- old_crtc_state->enabled_planes |
- new_crtc_state->enabled_planes);
-}
-
-static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
-{
- /* See {hsw,vlv,ivb}_plane_ratio() */
- return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_IVYBRIDGE(dev_priv);
-}
-
-static int intel_crtc_add_joiner_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_crtc *other)
-{
- const struct intel_plane_state __maybe_unused *plane_state;
- struct intel_plane *plane;
- u8 plane_ids = 0;
- int i;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe == crtc->pipe)
- plane_ids |= BIT(plane->id);
- }
-
- return intel_crtc_add_planes_to_state(state, other, plane_ids);
-}
-
-static int intel_joiner_add_affected_planes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct intel_crtc *other;
-
- for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
- crtc_state->joiner_pipes) {
- int ret;
-
- if (crtc == other)
- continue;
-
- ret = intel_crtc_add_joiner_planes(state, crtc, other);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int intel_atomic_check_planes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_plane_state __maybe_unused *plane_state;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- int i, ret;
-
- ret = icl_add_linked_planes(state);
- if (ret)
- return ret;
-
- ret = intel_joiner_add_affected_planes(state);
- if (ret)
- return ret;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_atomic_check(state, plane);
- if (ret) {
- drm_dbg_atomic(&dev_priv->drm,
- "[PLANE:%d:%s] atomic driver check failed\n",
- plane->base.base.id, plane->base.name);
- return ret;
- }
- }
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- u8 old_active_planes, new_active_planes;
-
- ret = icl_check_nv12_planes(state, crtc);
- if (ret)
- return ret;
-
- /*
- * On some platforms the number of active planes affects
- * the planes' minimum cdclk calculation. Add such planes
- * to the state before we compute the minimum cdclk.
- */
- if (!active_planes_affects_min_cdclk(dev_priv))
- continue;
-
- old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
-
- if (hweight8(old_active_planes) == hweight8(new_active_planes))
- continue;
-
- ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state __maybe_unused *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int ret;
ret = intel_crtc_atomic_check(state, crtc);
if (ret) {
- drm_dbg_atomic(&i915->drm,
+ drm_dbg_atomic(display->drm,
"[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.base.id, crtc->base.name);
return ret;
@@ -6257,7 +5886,7 @@ static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
static int intel_atomic_check_joiner(struct intel_atomic_state *state,
struct intel_crtc *primary_crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *primary_crtc_state =
intel_atomic_get_new_crtc_state(state, primary_crtc);
struct intel_crtc *secondary_crtc;
@@ -6266,20 +5895,20 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
return 0;
/* sanity check */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state)))
return -EINVAL;
- if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) {
- drm_dbg_kms(&i915->drm,
+ if (primary_crtc_state->joiner_pipes & ~joiner_pipes(display)) {
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Cannot act as joiner primary "
"(need 0x%x as pipes, only 0x%x possible)\n",
primary_crtc->base.base.id, primary_crtc->base.name,
- primary_crtc_state->joiner_pipes, joiner_pipes(i915));
+ primary_crtc_state->joiner_pipes, joiner_pipes(display));
return -EINVAL;
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
struct intel_crtc_state *secondary_crtc_state;
int ret;
@@ -6290,7 +5919,7 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
/* primary being enabled, secondary was already configured? */
if (secondary_crtc_state->uapi.enable) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] secondary is enabled as normal CRTC, but "
"[CRTC:%d:%s] claiming this CRTC for joiner.\n",
secondary_crtc->base.base.id, secondary_crtc->base.name,
@@ -6309,7 +5938,7 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
drm_crtc_index(&secondary_crtc->base)))
return -EINVAL;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n",
secondary_crtc->base.base.id, secondary_crtc->base.name,
primary_crtc->base.base.id, primary_crtc->base.name);
@@ -6328,12 +5957,12 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
static void kill_joiner_secondaries(struct intel_atomic_state *state,
struct intel_crtc *primary_crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *primary_crtc_state =
intel_atomic_get_new_crtc_state(state, primary_crtc);
struct intel_crtc *secondary_crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
struct intel_crtc_state *secondary_crtc_state =
intel_atomic_get_new_crtc_state(state, secondary_crtc);
@@ -6359,7 +5988,7 @@ static void kill_joiner_secondaries(struct intel_atomic_state *state,
* the intel_crtc_enable_flip_done() function.
*
* As soon as the surface address register is written, flip done interrupt is
- * generated and the requested events are sent to the usersapce in the interrupt
+ * generated and the requested events are sent to the userspace in the interrupt
* handler itself. The timestamp and sequence sent during the flip done event
* correspond to the last vblank and have no relation to the actual time when
* the flip done event was sent.
@@ -6367,7 +5996,7 @@ static void kill_joiner_secondaries(struct intel_atomic_state *state,
static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *old_plane_state;
@@ -6379,14 +6008,14 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return 0;
if (!new_crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] modeset required\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -6397,12 +6026,20 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
* Remove this check once the issues are fixed.
*/
if (new_crtc_state->joiner_pipes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] async flip disallowed with joiner\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
+ /* FIXME: selective fetch should be disabled for async flips */
+ if (new_crtc_state->enable_psr2_sel_fetch) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] async flip disallowed with PSR2 selective fetch\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (plane->pipe != crtc->pipe)
@@ -6416,14 +6053,14 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
* enabled in the atomic IOCTL path.
*/
if (!plane->async_flip) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] async flip not supported\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] no old or new framebuffer\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6435,7 +6072,7 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
const struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
@@ -6448,21 +6085,21 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
return 0;
if (!new_crtc_state->hw.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] modeset required\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Active planes cannot be in async flip\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -6478,7 +6115,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
* if we're really about to ask the hardware to perform
* an async flip. We should never get this far otherwise.
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
new_crtc_state->do_async_flip && !plane->async_flip))
return -EINVAL;
@@ -6493,50 +6130,16 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!plane->async_flip)
continue;
- /*
- * FIXME: This check is kept generic for all platforms.
- * Need to verify this for all gen9 platforms to enable
- * this selectively if required.
- */
- switch (new_plane_state->hw.fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- /*
- * FIXME: Async on Linear buffer is supported on ICL as
- * but with additional alignment and fbc restrictions
- * need to be taken care of. These aren't applicable for
- * gen12+.
- */
- if (DISPLAY_VER(i915) < 12) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n",
- plane->base.base.id, plane->base.name,
- new_plane_state->hw.fb->modifier, DISPLAY_VER(i915));
- return -EINVAL;
- }
- break;
-
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- case I915_FORMAT_MOD_4_TILED:
- case I915_FORMAT_MOD_4_TILED_BMG_CCS:
- case I915_FORMAT_MOD_4_TILED_LNL_CCS:
- break;
- default:
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
+ if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format->format,
+ new_plane_state->hw.fb->modifier)) {
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] pixel format %p4cc / modifier 0x%llx does not support async flip\n",
plane->base.base.id, plane->base.name,
+ &new_plane_state->hw.fb->format->format,
new_plane_state->hw.fb->modifier);
return -EINVAL;
}
- if (new_plane_state->hw.fb->format->num_planes > 1) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] Planar formats do not support async flips\n",
- plane->base.base.id, plane->base.name);
- return -EINVAL;
- }
-
/*
* We turn the first async flip request into a sync flip
* so that we can reconfigure the plane (eg. change modifier).
@@ -6546,7 +6149,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->view.color_plane[0].mapping_stride !=
new_plane_state->view.color_plane[0].mapping_stride) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Stride cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6554,7 +6157,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.fb->modifier !=
new_plane_state->hw.fb->modifier) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6562,7 +6165,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.fb->format !=
new_plane_state->hw.fb->format) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6570,22 +6173,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.rotation !=
new_plane_state->hw.rotation) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
+ if (skl_plane_aux_dist(old_plane_state, 0) !=
+ skl_plane_aux_dist(new_plane_state, 0)) {
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] AUX_DIST cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
!drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6593,21 +6204,21 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.pixel_blend_mode !=
new_plane_state->hw.pixel_blend_mode) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Color range cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6615,7 +6226,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
/* plane decryption is allow to change only in synchronous flips */
if (old_plane_state->decrypt != new_plane_state->decrypt) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6627,26 +6238,44 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_plane_state *plane_state;
struct intel_crtc_state *crtc_state;
+ struct intel_plane *plane;
struct intel_crtc *crtc;
u8 affected_pipes = 0;
u8 modeset_pipes = 0;
int i;
+ /*
+ * Any plane which is in use by the joiner needs its crtc.
+ * Pull those in first as this will not have happened yet
+ * if the plane remains disabled according to uapi.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ crtc = to_intel_crtc(plane_state->hw.crtc);
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ /* Now pull in all joined crtcs */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
affected_pipes |= crtc_state->joiner_pipes;
if (intel_crtc_needs_modeset(crtc_state))
modeset_pipes |= crtc_state->joiner_pipes;
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, affected_pipes) {
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, modeset_pipes) {
int ret;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -6657,7 +6286,7 @@ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, crtc);
+ ret = intel_plane_add_affected(state, crtc);
if (ret)
return ret;
}
@@ -6676,7 +6305,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits,
enum pipe *failed_pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret;
@@ -6701,7 +6330,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
continue;
}
- if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
+ if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
continue;
ret = intel_crtc_prepare_cleared_state(state, crtc);
@@ -6720,7 +6349,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
+ if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
continue;
if (!new_crtc_state->hw.enable)
@@ -6785,12 +6414,10 @@ int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *_state)
{
struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- bool any_ms = false;
if (!intel_display_driver_check_access(display))
return -ENODEV;
@@ -6833,7 +6460,7 @@ int intel_atomic_check(struct drm_device *dev,
continue;
if (intel_crtc_is_joiner_secondary(new_crtc_state)) {
- drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
+ drm_WARN_ON(display->drm, new_crtc_state->uapi.enable);
continue;
}
@@ -6898,22 +6525,22 @@ int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- any_ms = true;
-
- intel_release_shared_dplls(state, crtc);
+ intel_dpll_release(state, crtc);
}
- if (any_ms && !check_digital_port_conflicts(state)) {
- drm_dbg_kms(&dev_priv->drm,
- "rejecting conflicting digital port configuration\n");
+ if (intel_any_crtc_needs_modeset(state) && !check_digital_port_conflicts(state)) {
+ drm_dbg_kms(display->drm, "rejecting conflicting digital port configuration\n");
ret = -EINVAL;
goto fail;
}
- ret = intel_atomic_check_planes(state);
+ ret = intel_plane_atomic_check(state);
if (ret)
goto fail;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ new_crtc_state->min_cdclk = intel_crtc_min_cdclk(new_crtc_state);
+
ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
@@ -6922,21 +6549,14 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_cdclk_atomic_check(state, &any_ms);
+ ret = intel_cdclk_atomic_check(state);
if (ret)
goto fail;
- if (intel_any_crtc_needs_modeset(state))
- any_ms = true;
-
- if (any_ms) {
+ if (intel_any_crtc_needs_modeset(state)) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
-
- ret = intel_modeset_calc_cdclk(state);
- if (ret)
- return ret;
}
ret = intel_pmdemand_atomic_check(state);
@@ -6960,7 +6580,7 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
/* Either full modeset or fastset (or neither), never both */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_crtc_needs_modeset(new_crtc_state) &&
intel_crtc_needs_fastset(new_crtc_state));
@@ -7004,24 +6624,24 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (DISPLAY_VER(display) != 2 || crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
enum pipe pch_transcoder =
intel_crtc_pch_transcoder(crtc);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ intel_set_pch_fifo_underrun_reporting(display, pch_transcoder, true);
}
}
static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* Update pipe size and adjust fitter if needed: the reason for this is
@@ -7034,10 +6654,10 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
intel_set_pipe_src_size(new_crtc_state);
/* on skylake this is done by detaching scalers */
- if (DISPLAY_VER(dev_priv) >= 9) {
+ if (DISPLAY_VER(display) >= 9) {
if (new_crtc_state->pch_pfit.enabled)
skl_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
if (new_crtc_state->pch_pfit.enabled)
ilk_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
@@ -7052,8 +6672,8 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
* HSW/BDW only really need this here for fastboot, after
* that the value should not change without a full modeset.
*/
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
hsw_set_linetime_wm(new_crtc_state);
if (new_crtc_state->update_m_n)
@@ -7067,29 +6687,31 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
static void commit_pipe_pre_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = intel_crtc_needs_modeset(new_crtc_state);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq);
+
/*
* During modesets pipe configuration was programmed as the
* CRTC was enabled.
*/
- if (!modeset && !new_crtc_state->use_dsb) {
+ if (!modeset) {
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_arm(NULL, new_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 || display->platform.broadwell)
bdw_set_pipe_misc(NULL, new_crtc_state);
if (intel_crtc_needs_fastset(new_crtc_state))
intel_pipe_fastset(old_crtc_state, new_crtc_state);
}
- intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
+ intel_psr2_program_trans_man_trk_ctl(NULL, new_crtc_state);
intel_atomic_update_watermarks(state, crtc);
}
@@ -7097,18 +6719,26 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
+
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq);
/*
* Disable the scaler(s) after the plane(s) so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
*/
- if (DISPLAY_VER(dev_priv) >= 9 &&
- !intel_crtc_needs_modeset(new_crtc_state))
- skl_detach_scalers(new_crtc_state);
+ if (DISPLAY_VER(display) >= 9 && !modeset)
+ skl_detach_scalers(NULL, new_crtc_state);
+
+ if (!modeset &&
+ intel_crtc_needs_color_update(new_crtc_state) &&
+ !intel_color_uses_dsb(new_crtc_state) &&
+ HAS_DOUBLE_BUFFERED_LUT(display))
+ intel_color_load_luts(new_crtc_state);
if (intel_crtc_vrr_enabling(state, crtc))
intel_vrr_enable(new_crtc_state);
@@ -7117,7 +6747,7 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
static void intel_enable_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
@@ -7125,7 +6755,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
if (!intel_crtc_needs_modeset(new_crtc_state))
return;
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask_reverse(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(new_crtc_state)) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -7134,7 +6764,9 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(pipe_crtc_state, false);
}
- dev_priv->display.funcs.display->crtc_enable(state, crtc);
+ intel_psr_notify_pipe_change(state, crtc, true);
+
+ display->funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
@@ -7143,7 +6775,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
static void intel_pre_update_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -7152,7 +6784,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (old_crtc_state->inherited ||
intel_crtc_needs_modeset(new_crtc_state)) {
- if (HAS_DPT(i915))
+ if (HAS_DPT(display))
intel_dpt_configure(crtc);
}
@@ -7166,7 +6798,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (intel_crtc_needs_fastset(new_crtc_state))
intel_encoders_update_pipe(state, crtc);
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
intel_crtc_needs_fastset(new_crtc_state))
icl_set_pipe_chicken(new_crtc_state);
@@ -7175,16 +6807,21 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
intel_vrr_set_transcoder_timings(new_crtc_state);
}
+ if (intel_casf_enabling(new_crtc_state, old_crtc_state))
+ intel_casf_enable(new_crtc_state);
+ else if (new_crtc_state->hw.casf_params.strength != old_crtc_state->hw.casf_params.strength)
+ intel_casf_update_strength(new_crtc_state);
+
intel_fbc_update(state, crtc);
- drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
+ drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF));
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state) &&
- !new_crtc_state->use_dsb)
+ !new_crtc_state->use_dsb && !new_crtc_state->use_flipq)
intel_color_commit_noarm(NULL, new_crtc_state);
- if (!new_crtc_state->use_dsb)
+ if (!new_crtc_state->use_dsb && !new_crtc_state->use_flipq)
intel_crtc_planes_update_noarm(NULL, state, crtc);
}
@@ -7196,16 +6833,23 @@ static void intel_update_crtc(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state->use_dsb) {
+ if (new_crtc_state->use_flipq) {
+ intel_flipq_enable(new_crtc_state);
+
+ intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->flipq_event);
+
+ intel_flipq_add(crtc, INTEL_FLIPQ_PLANE_1, 0, INTEL_DSB_0,
+ new_crtc_state->dsb_commit);
+ } else if (new_crtc_state->use_dsb) {
intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event);
- intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ intel_dsb_commit(new_crtc_state->dsb_commit);
} else {
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(state, crtc);
if (new_crtc_state->dsb_commit)
- intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ intel_dsb_commit(new_crtc_state->dsb_commit);
commit_pipe_pre_planes(state, crtc);
@@ -7240,7 +6884,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
@@ -7249,13 +6893,15 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
*/
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(old_crtc_state))
intel_crtc_disable_pipe_crc(pipe_crtc);
- dev_priv->display.funcs.display->crtc_disable(state, crtc);
+ intel_psr_notify_pipe_change(state, crtc, false);
+
+ display->funcs.display->crtc_disable(state, crtc);
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(old_crtc_state)) {
const struct intel_crtc_state *new_pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -7270,7 +6916,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
u8 disable_pipes = 0;
@@ -7337,7 +6983,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
- drm_WARN_ON(&i915->drm, disable_pipes);
+ drm_WARN_ON(display->drm, disable_pipes);
}
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
@@ -7364,7 +7010,7 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
@@ -7506,8 +7152,9 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((update_pipes & BIT(pipe)) == 0)
continue;
- drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, I915_MAX_PIPES, pipe));
+ drm_WARN_ON(display->drm,
+ skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
@@ -7515,8 +7162,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_update_crtc(state, crtc);
}
- drm_WARN_ON(&dev_priv->drm, modeset_pipes);
- drm_WARN_ON(&dev_priv->drm, update_pipes);
+ drm_WARN_ON(display->drm, modeset_pipes);
+ drm_WARN_ON(display->drm, update_pipes);
}
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
@@ -7524,7 +7171,8 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
- int ret, i;
+ long ret;
+ int i;
for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
if (new_plane_state->fence) {
@@ -7561,7 +7209,7 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct intel_atomic_state *state =
container_of(work, struct intel_atomic_state, cleanup_work);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -7569,14 +7217,14 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
intel_atomic_dsb_cleanup(old_crtc_state);
- drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
+ drm_atomic_helper_cleanup_planes(display->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
}
static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_plane *plane;
struct intel_plane_state *plane_state;
int i;
@@ -7613,21 +7261,14 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s
&plane_state->ccval,
sizeof(plane_state->ccval));
/* The above could only fail if the FB obj has an unexpected backing store type. */
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
}
}
static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- intel_color_prepare_commit(state, crtc);
-}
-
-static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -7638,67 +7279,123 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
return;
/* FIXME deal with everything */
- new_crtc_state->use_dsb =
- new_crtc_state->update_planes &&
- !new_crtc_state->vrr.enable &&
+ new_crtc_state->use_flipq =
+ intel_flipq_supported(display) &&
!new_crtc_state->do_async_flip &&
+ !new_crtc_state->vrr.enable &&
!new_crtc_state->has_psr &&
- !new_crtc_state->scaler_state.scaler_users &&
- !old_crtc_state->scaler_state.scaler_users &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state) &&
+ !intel_crtc_needs_color_update(new_crtc_state);
+
+ new_crtc_state->use_dsb =
+ !new_crtc_state->use_flipq &&
+ !new_crtc_state->do_async_flip &&
+ (DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) &&
!intel_crtc_needs_modeset(new_crtc_state) &&
!intel_crtc_needs_fastset(new_crtc_state);
- if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank)
+ intel_color_prepare_commit(state, crtc);
+}
+
+static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int size = new_crtc_state->plane_color_changed ? 8192 : 1024;
+
+ if (!new_crtc_state->use_flipq &&
+ !new_crtc_state->use_dsb &&
+ !new_crtc_state->dsb_color)
return;
/*
* Rough estimate:
* ~64 registers per each plane * 8 planes = 512
* Double that for pipe stuff and other overhead.
+ * ~4913 registers for 3DLUT
+ * ~200 color registers * 3 HDR planes
*/
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
- new_crtc_state->use_dsb ? 1024 : 16);
+ new_crtc_state->use_dsb ||
+ new_crtc_state->use_flipq ? size : 16);
if (!new_crtc_state->dsb_commit) {
+ new_crtc_state->use_flipq = false;
new_crtc_state->use_dsb = false;
intel_color_cleanup_commit(new_crtc_state);
return;
}
- if (new_crtc_state->use_dsb) {
+ if (new_crtc_state->use_flipq || new_crtc_state->use_dsb) {
+ /* Wa_18034343758 */
+ if (new_crtc_state->use_flipq)
+ intel_flipq_wait_dmc_halt(new_crtc_state->dsb_commit, crtc);
+
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state->dsb_commit,
new_crtc_state);
intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit,
state, crtc);
- intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
+ /*
+ * Ensure we have "Frame Change" event when PSR state is
+ * SRDENT(PSR1) or DEEP_SLEEP(PSR2). Otherwise DSB vblank
+ * evasion hangs as PIPEDSL is reading as 0.
+ */
+ intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
+ state, crtc);
+
+ intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
+ if (new_crtc_state->use_dsb)
+ intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_arm(new_crtc_state->dsb_commit,
new_crtc_state);
bdw_set_pipe_misc(new_crtc_state->dsb_commit,
new_crtc_state);
+ intel_psr2_program_trans_man_trk_ctl(new_crtc_state->dsb_commit,
+ new_crtc_state);
intel_crtc_planes_update_arm(new_crtc_state->dsb_commit,
state, crtc);
- if (!new_crtc_state->dsb_color_vblank) {
- intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
- intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
- intel_dsb_interrupt(new_crtc_state->dsb_commit);
- }
+ if (DISPLAY_VER(display) >= 9)
+ skl_detach_scalers(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
+ /* Wa_18034343758 */
+ if (new_crtc_state->use_flipq)
+ intel_flipq_unhalt_dmc(new_crtc_state->dsb_commit, crtc);
}
- if (new_crtc_state->dsb_color_vblank)
+ if (intel_color_uses_chained_dsb(new_crtc_state))
intel_dsb_chain(state, new_crtc_state->dsb_commit,
- new_crtc_state->dsb_color_vblank, true);
+ new_crtc_state->dsb_color, true);
+ else if (intel_color_uses_gosub_dsb(new_crtc_state))
+ intel_dsb_gosub(new_crtc_state->dsb_commit,
+ new_crtc_state->dsb_color);
+
+ if (new_crtc_state->use_dsb && !intel_color_uses_chained_dsb(new_crtc_state)) {
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+
+ intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
+ intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit);
+ intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
+ new_crtc_state);
+ intel_dsb_interrupt(new_crtc_state->dsb_commit);
+ }
intel_dsb_finish(new_crtc_state->dsb_commit);
}
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
@@ -7710,11 +7407,14 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_atomic_commit_fence_wait(state);
- intel_td_flush(dev_priv);
+ intel_td_flush(display);
intel_atomic_prepare_plane_clear_colors(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_fbc_prepare_dirty_rect(state, crtc);
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_atomic_dsb_finish(state, crtc);
drm_atomic_helper_wait_for_dependencies(&state->base);
@@ -7748,7 +7448,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the CSC latched register values with the readout (see
* skl_read_csc() and skl_color_commit_noarm()).
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
@@ -7773,13 +7473,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*/
intel_pmdemand_pre_plane_update(state);
- if (state->modeset) {
- drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
+ if (state->modeset)
+ drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base);
- intel_set_cdclk_pre_plane_update(state);
+ intel_set_cdclk_pre_plane_update(state);
+ if (state->modeset)
intel_modeset_verify_disabled(state);
- }
intel_sagv_pre_plane_update(state);
@@ -7789,10 +7489,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Complete events for now disable pipes here. */
if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
- spin_lock_irq(&dev->event_lock);
+ spin_lock_irq(&display->drm->event_lock);
drm_crtc_send_vblank_event(&crtc->base,
new_crtc_state->uapi.event);
- spin_unlock_irq(&dev->event_lock);
+ spin_unlock_irq(&display->drm->event_lock);
new_crtc_state->uapi.event = NULL;
}
@@ -7808,13 +7508,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.funcs.display->commit_modeset_enables(state);
+ display->funcs.display->commit_modeset_enables(state);
+ /* FIXME probably need to sequence this properly */
intel_program_dpkgc_latency(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
-
intel_wait_for_vblank_workers(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -7826,13 +7524,19 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* - switch over to the vblank wait helper in the core after that since
* we don't need out special handling any more.
*/
- drm_atomic_helper_wait_for_flip_done(dev, &state->base);
+ drm_atomic_helper_wait_for_flip_done(display->drm, &state->base);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->do_async_flip)
intel_crtc_disable_flip_done(state, crtc);
intel_atomic_dsb_wait_commit(new_crtc_state);
+
+ if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb)
+ intel_vrr_check_push_sent(NULL, new_crtc_state);
+
+ if (new_crtc_state->use_flipq)
+ intel_flipq_disable(new_crtc_state);
}
/*
@@ -7852,8 +7556,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* chance of catching underruns with the intermediate watermarks
* vs. the new plane configuration.
*/
- if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (DISPLAY_VER(display) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true);
intel_optimize_watermarks(state, crtc);
}
@@ -7876,18 +7580,19 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*
* FIXME get rid of this funny new->old swapping
*/
- old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
+ old_crtc_state->dsb_color = fetch_and_zero(&new_crtc_state->dsb_color);
old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit);
}
/* Underruns don't always raise interrupts, so check manually */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
+ intel_check_cpu_fifo_underruns(display);
+ intel_check_pch_fifo_underruns(display);
if (state->modeset)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -7906,8 +7611,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* Delay re-enabling DC states by 17 ms to avoid the off->on->off
* toggling overhead at and above 60 FPS.
*/
- intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17);
+ intel_display_rpm_put(display, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -7918,7 +7623,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* down.
*/
INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work);
- queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work);
+ queue_work(display->wq.cleanup, &state->cleanup_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -7967,7 +7672,7 @@ static int intel_atomic_swap_state(struct intel_atomic_state *state)
intel_atomic_swap_global_state(state);
- intel_shared_dpll_swap_state(state);
+ intel_dpll_swap_state(state);
intel_atomic_track_fbs(state);
@@ -7977,11 +7682,11 @@ static int intel_atomic_swap_state(struct intel_atomic_state *state)
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
bool nonblock)
{
+ struct intel_display *display = to_intel_display(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_display_rpm_get(display);
/*
* The intel_legacy_cursor_update() fast path takes care
@@ -8000,7 +7705,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
+ if (DISPLAY_VER(display) < 9 && state->base.legacy_cursor_update) {
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -8013,9 +7718,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
ret = intel_atomic_prepare_commit(state);
if (ret) {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Preparing state failed with %i\n", ret);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -8025,7 +7730,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_atomic_helper_unprepare_planes(dev, &state->base);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -8033,38 +7738,25 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
if (nonblock && state->modeset) {
- queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
+ queue_work(display->wq.modeset, &state->base.commit_work);
} else if (nonblock) {
- queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
+ queue_work(display->wq.flip, &state->base.commit_work);
} else {
if (state->modeset)
- flush_workqueue(dev_priv->display.wq.modeset);
+ flush_workqueue(display->wq.modeset);
intel_atomic_commit_tail(state);
}
return 0;
}
-/**
- * intel_plane_destroy - destroy a plane
- * @plane: plane to destroy
- *
- * Common destruction function for all types of planes (primary, cursor,
- * sprite).
- */
-void intel_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
- kfree(to_intel_plane(plane));
-}
-
static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *source_encoder;
u32 possible_clones = 0;
- for_each_intel_encoder(dev, source_encoder) {
+ for_each_intel_encoder(display->drm, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
possible_clones |= drm_encoder_mask(&source_encoder->base);
}
@@ -8074,78 +7766,77 @@ static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc;
u32 possible_crtcs = 0;
- for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, encoder->pipe_mask)
possible_crtcs |= drm_crtc_mask(&crtc->base);
return possible_crtcs;
}
-static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
+static bool ilk_has_edp_a(struct intel_display *display)
{
- if (!IS_MOBILE(dev_priv))
+ if (!display->platform.mobile)
return false;
- if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
+ if ((intel_de_read(display, DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (display->platform.ironlake && (intel_de_read(display, FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
}
-static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
+static bool intel_ddi_crt_present(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return false;
- if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
+ if (display->platform.haswell_ult || display->platform.broadwell_ult)
return false;
- if (HAS_PCH_LPT_H(dev_priv) &&
- intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ if (HAS_PCH_LPT_H(display) &&
+ intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
- if (!dev_priv->display.vbt.int_crt_support)
+ if (!display->vbt.int_crt_support)
return false;
return true;
}
-bool assert_port_valid(struct drm_i915_private *i915, enum port port)
+bool assert_port_valid(struct intel_display *display, enum port port)
{
- return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)),
+ return !drm_WARN(display->drm, !(DISPLAY_RUNTIME_INFO(display)->port_mask & BIT(port)),
"Platform does not support port %c\n", port_name(port));
}
-void intel_setup_outputs(struct drm_i915_private *dev_priv)
+void intel_setup_outputs(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
intel_pps_unlock_regs_wa(display);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (HAS_DDI(dev_priv)) {
- if (intel_ddi_crt_present(dev_priv))
+ if (HAS_DDI(display)) {
+ if (intel_ddi_crt_present(display))
intel_crt_init(display);
intel_bios_for_each_encoder(display, intel_ddi_init);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- vlv_dsi_init(dev_priv);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton)
+ vlv_dsi_init(display);
+ } else if (HAS_PCH_SPLIT(display)) {
int found;
/*
@@ -8153,38 +7844,38 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* to prevent the registration of both eDP and LVDS and the
* incorrect sharing of the PPS.
*/
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
- dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
+ dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
- if (ilk_has_edp_a(dev_priv))
- g4x_dp_init(dev_priv, DP_A, PORT_A);
+ if (ilk_has_edp_a(display))
+ g4x_dp_init(display, DP_A, PORT_A);
- if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
+ if (intel_de_read(display, PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
+ found = intel_sdvo_init(display, PCH_SDVOB, PORT_B);
if (!found)
- g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
- if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
- g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
+ g4x_hdmi_init(display, PCH_HDMIB, PORT_B);
+ if (!found && (intel_de_read(display, PCH_DP_B) & DP_DETECTED))
+ g4x_dp_init(display, PCH_DP_B, PORT_B);
}
- if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
- g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
+ if (intel_de_read(display, PCH_HDMIC) & SDVO_DETECTED)
+ g4x_hdmi_init(display, PCH_HDMIC, PORT_C);
- if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
- g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
+ if (!dpd_is_edp && intel_de_read(display, PCH_HDMID) & SDVO_DETECTED)
+ g4x_hdmi_init(display, PCH_HDMID, PORT_D);
- if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
- g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
+ if (intel_de_read(display, PCH_DP_C) & DP_DETECTED)
+ g4x_dp_init(display, PCH_DP_C, PORT_C);
- if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
- g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (intel_de_read(display, PCH_DP_D) & DP_DETECTED)
+ g4x_dp_init(display, PCH_DP_D, PORT_D);
+ } else if (display->platform.valleyview || display->platform.cherryview) {
bool has_edp, has_port;
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
+ if (display->platform.valleyview && display->vbt.int_crt_support)
intel_crt_init(display);
/*
@@ -8202,103 +7893,102 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
+ has_edp = intel_dp_is_port_edp(display, PORT_B);
has_port = intel_bios_is_port_present(display, PORT_B);
- if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
- has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
- if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
- g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
+ if (intel_de_read(display, VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= g4x_dp_init(display, VLV_DP_B, PORT_B);
+ if ((intel_de_read(display, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+ g4x_hdmi_init(display, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
+ has_edp = intel_dp_is_port_edp(display, PORT_C);
has_port = intel_bios_is_port_present(display, PORT_C);
- if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
- has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
- if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
- g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
+ if (intel_de_read(display, VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= g4x_dp_init(display, VLV_DP_C, PORT_C);
+ if ((intel_de_read(display, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+ g4x_hdmi_init(display, VLV_HDMIC, PORT_C);
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
/*
* eDP not supported on port D,
* so no need to worry about it
*/
has_port = intel_bios_is_port_present(display, PORT_D);
- if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
- g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
- if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
- g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
+ if (intel_de_read(display, CHV_DP_D) & DP_DETECTED || has_port)
+ g4x_dp_init(display, CHV_DP_D, PORT_D);
+ if (intel_de_read(display, CHV_HDMID) & SDVO_DETECTED || has_port)
+ g4x_hdmi_init(display, CHV_HDMID, PORT_D);
}
- vlv_dsi_init(dev_priv);
- } else if (IS_PINEVIEW(dev_priv)) {
- intel_lvds_init(dev_priv);
+ vlv_dsi_init(display);
+ } else if (display->platform.pineview) {
+ intel_lvds_init(display);
intel_crt_init(display);
- } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
+ } else if (IS_DISPLAY_VER(display, 3, 4)) {
bool found = false;
- if (IS_MOBILE(dev_priv))
- intel_lvds_init(dev_priv);
+ if (display->platform.mobile)
+ intel_lvds_init(display);
intel_crt_init(display);
- if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
- drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
- found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
- if (!found && IS_G4X(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(display->drm, "probing SDVOB\n");
+ found = intel_sdvo_init(display, GEN3_SDVOB, PORT_B);
+ if (!found && display->platform.g4x) {
+ drm_dbg_kms(display->drm,
"probing HDMI on SDVOB\n");
- g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
+ g4x_hdmi_init(display, GEN4_HDMIB, PORT_B);
}
- if (!found && IS_G4X(dev_priv))
- g4x_dp_init(dev_priv, DP_B, PORT_B);
+ if (!found && display->platform.g4x)
+ g4x_dp_init(display, DP_B, PORT_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
- drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
- found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
+ if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(display->drm, "probing SDVOC\n");
+ found = intel_sdvo_init(display, GEN3_SDVOC, PORT_C);
}
- if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
+ if (!found && (intel_de_read(display, GEN3_SDVOC) & SDVO_DETECTED)) {
- if (IS_G4X(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (display->platform.g4x) {
+ drm_dbg_kms(display->drm,
"probing HDMI on SDVOC\n");
- g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
+ g4x_hdmi_init(display, GEN4_HDMIC, PORT_C);
}
- if (IS_G4X(dev_priv))
- g4x_dp_init(dev_priv, DP_C, PORT_C);
+ if (display->platform.g4x)
+ g4x_dp_init(display, DP_C, PORT_C);
}
- if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
- g4x_dp_init(dev_priv, DP_D, PORT_D);
+ if (display->platform.g4x && (intel_de_read(display, DP_D) & DP_DETECTED))
+ g4x_dp_init(display, DP_D, PORT_D);
- if (SUPPORTS_TV(dev_priv))
+ if (SUPPORTS_TV(display))
intel_tv_init(display);
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (IS_I85X(dev_priv))
- intel_lvds_init(dev_priv);
+ } else if (DISPLAY_VER(display) == 2) {
+ if (display->platform.i85x)
+ intel_lvds_init(display);
intel_crt_init(display);
- intel_dvo_init(dev_priv);
+ intel_dvo_init(display);
}
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
encoder->base.possible_crtcs =
intel_encoder_possible_crtcs(encoder);
encoder->base.possible_clones =
intel_encoder_possible_clones(encoder);
}
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
- drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
+ drm_helper_move_panel_connectors_to_head(display->drm);
}
-static int max_dotclock(struct drm_i915_private *i915)
+static int max_dotclock(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
int max_dotclock = display->cdclk.max_dotclk_freq;
if (HAS_ULTRAJOINER(display))
@@ -8312,7 +8002,7 @@ static int max_dotclock(struct drm_i915_private *i915)
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(dev);
int hdisplay_max, htotal_max;
int vdisplay_max, vtotal_max;
@@ -8349,22 +8039,22 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
* Reject clearly excessive dotclocks early to
* avoid having to worry about huge integers later.
*/
- if (mode->clock > max_dotclock(dev_priv))
+ if (mode->clock > max_dotclock(display))
return MODE_CLOCK_HIGH;
/* Transcoder timing limits */
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
hdisplay_max = 16384;
vdisplay_max = 8192;
htotal_max = 16384;
vtotal_max = 8192;
- } else if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell) {
hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
vdisplay_max = 4096;
htotal_max = 8192;
vtotal_max = 8192;
- } else if (DISPLAY_VER(dev_priv) >= 3) {
+ } else if (DISPLAY_VER(display) >= 3) {
hdisplay_max = 4096;
vdisplay_max = 4096;
htotal_max = 8192;
@@ -8388,17 +8078,25 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ /*
+ * WM_LINETIME only goes up to (almost) 64 usec, and also
+ * knowing that the linetime is always bounded will ease the
+ * mind during various calculations.
+ */
+ if (DIV_ROUND_UP(mode->htotal * 1000, mode->clock) > 64)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
-enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
+enum drm_mode_status intel_cpu_transcoder_mode_valid(struct intel_display *display,
const struct drm_display_mode *mode)
{
/*
* Additional transcoder timing limits,
* excluding BXT/GLK DSI transcoders.
*/
- if (DISPLAY_VER(dev_priv) >= 5) {
+ if (DISPLAY_VER(display) >= 5) {
if (mode->hdisplay < 64 ||
mode->htotal - mode->hdisplay < 32)
return MODE_H_ILLEGAL;
@@ -8417,7 +8115,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de
* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 5 || display->platform.g4x) &&
mode->hsync_start == mode->hdisplay)
return MODE_H_ILLEGAL;
@@ -8425,7 +8123,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de
}
enum drm_mode_status
-intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+intel_mode_valid_max_plane_size(struct intel_display *display,
const struct drm_display_mode *mode,
int num_joined_pipes)
{
@@ -8435,7 +8133,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
* intel_mode_valid() should be
* sufficient on older platforms.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return MODE_OK;
/*
@@ -8443,10 +8141,10 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
* plane so let's not advertize modes that are
* too big for that.
*/
- if (DISPLAY_VER(dev_priv) >= 30) {
+ if (DISPLAY_VER(display) >= 30) {
plane_width_max = 6144 * num_joined_pipes;
plane_height_max = 4800;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
plane_width_max = 5120 * num_joined_pipes;
plane_height_max = 4320;
} else {
@@ -8510,32 +8208,32 @@ static const struct intel_display_funcs i9xx_display_funcs = {
/**
* intel_init_display_hooks - initialize the display modesetting hooks
- * @dev_priv: device private
+ * @display: display device private
*/
-void intel_init_display_hooks(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.funcs.display = &skl_display_funcs;
- } else if (HAS_DDI(dev_priv)) {
- dev_priv->display.funcs.display = &ddi_display_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.funcs.display = &pch_split_display_funcs;
- } else if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.funcs.display = &vlv_display_funcs;
+void intel_init_display_hooks(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 9) {
+ display->funcs.display = &skl_display_funcs;
+ } else if (HAS_DDI(display)) {
+ display->funcs.display = &ddi_display_funcs;
+ } else if (HAS_PCH_SPLIT(display)) {
+ display->funcs.display = &pch_split_display_funcs;
+ } else if (display->platform.cherryview ||
+ display->platform.valleyview) {
+ display->funcs.display = &vlv_display_funcs;
} else {
- dev_priv->display.funcs.display = &i9xx_display_funcs;
+ display->funcs.display = &i9xx_display_funcs;
}
}
-int intel_initial_commit(struct drm_device *dev)
+int intel_initial_commit(struct intel_display *display)
{
struct drm_atomic_state *state = NULL;
struct drm_modeset_acquire_ctx ctx;
struct intel_crtc *crtc;
int ret = 0;
- state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
@@ -8545,7 +8243,7 @@ int intel_initial_commit(struct drm_device *dev)
to_intel_atomic_state(state)->internal = true;
retry:
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_crtc_state(state, crtc);
@@ -8554,6 +8252,9 @@ retry:
goto out;
}
+ if (!crtc_state->hw.active)
+ crtc_state->inherited = false;
+
if (crtc_state->hw.active) {
struct intel_encoder *encoder;
@@ -8569,7 +8270,7 @@ retry:
*/
crtc_state->uapi.color_mgmt_changed = true;
- for_each_intel_encoder_mask(dev, encoder,
+ for_each_intel_encoder_mask(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
if (encoder->initial_fastset_check &&
!encoder->initial_fastset_check(encoder, crtc_state)) {
@@ -8707,26 +8408,7 @@ void i830_disable_pipe(struct intel_display *display, enum pipe pipe)
intel_de_posting_read(display, DPLL(display, pipe));
}
-void intel_hpd_poll_fini(struct drm_i915_private *i915)
-{
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- /* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->modeset_retry_work.func &&
- cancel_work_sync(&connector->modeset_retry_work))
- drm_connector_put(&connector->base);
- if (connector->hdcp.shim) {
- cancel_delayed_work_sync(&connector->hdcp.check_work);
- cancel_work_sync(&connector->hdcp.prop_work);
- }
- }
- drm_connector_list_iter_end(&conn_iter);
-}
-
-bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
+bool intel_scanout_needs_vtd_wa(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
+ return IS_DISPLAY_VER(display, 6, 11) && intel_display_vtd_active(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 49a246feb1ae..bcc6ccb69d2b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -30,40 +30,22 @@
#include "i915_reg_defs.h"
#include "intel_display_limits.h"
-enum drm_scaling_filter;
-struct dpll;
struct drm_atomic_state;
-struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
-struct drm_file;
struct drm_format_info;
-struct drm_framebuffer;
-struct drm_i915_private;
-struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
-struct drm_plane;
-struct drm_plane_state;
-struct i915_address_space;
-struct i915_gtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_display;
-struct intel_dp;
struct intel_encoder;
-struct intel_initial_plane_config;
struct intel_link_m_n;
struct intel_plane;
struct intel_plane_state;
struct intel_power_domain_mask;
-struct intel_remapped_info;
-struct intel_rotation_info;
-struct pci_dev;
-struct work_struct;
-
#define pipe_name(p) ((p) + 'A')
@@ -413,24 +395,27 @@ enum phy_fia {
i)
int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+u8 intel_calc_enabled_pipes(struct intel_atomic_state *state,
+ u8 enabled_pipes);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
int bw_overhead,
struct intel_link_m_n *m_n);
-u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
- u32 pixel_format, u64 modifier);
+u32 intel_plane_fb_max_stride(struct intel_display *display,
+ const struct drm_format_info *info,
+ u64 modifier);
+u32 intel_dumb_fb_max_stride(struct drm_device *drm,
+ u32 pixel_format, u64 modifier);
enum drm_mode_status
-intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+intel_mode_valid_max_plane_size(struct intel_display *display,
const struct drm_display_mode *mode,
int num_joined_pipes);
enum drm_mode_status
-intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
+intel_cpu_transcoder_mode_valid(struct intel_display *display,
const struct drm_display_mode *mode);
-enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+enum phy intel_port_to_phy(struct intel_display *display, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
bool is_trans_port_sync_master(const struct intel_crtc_state *state);
u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state);
@@ -450,37 +435,22 @@ bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset);
-void intel_plane_destroy(struct drm_plane *plane);
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct intel_display *display, enum pipe pipe);
void i830_disable_pipe(struct intel_display *display, enum pipe pipe);
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
- const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
- const char *name, u32 reg);
-void intel_init_display_hooks(struct drm_i915_private *dev_priv);
-unsigned int intel_fb_xy_to_linear(int x, int y,
- const struct intel_plane_state *state,
- int plane);
-void intel_add_fb_offsets(int *x, int *y,
- const struct intel_plane_state *state, int plane);
-unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
-unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
+bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
void intel_encoder_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
-bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
-bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
- enum port port);
+bool intel_phy_is_combo(struct intel_display *display, enum phy phy);
+bool intel_phy_is_tc(struct intel_display *display, enum phy phy);
+bool intel_phy_is_snps(struct intel_display *display, enum phy phy);
+enum tc_port intel_port_to_tc(struct intel_display *display, enum port port);
enum phy intel_encoder_to_phy(struct intel_encoder *encoder);
bool intel_encoder_is_combo(struct intel_encoder *encoder);
@@ -489,22 +459,19 @@ bool intel_encoder_is_tc(struct intel_encoder *encoder);
enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder);
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct intel_display *display,
- struct intel_digital_port *dig_port,
- unsigned int expected_mask);
bool intel_fuzzy_clock_check(int clock1, int clock2);
void intel_zero_m_n(struct intel_link_m_n *m_n);
-void intel_set_m_n(struct drm_i915_private *i915,
+void intel_set_m_n(struct intel_display *display,
const struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg);
-void intel_get_m_n(struct drm_i915_private *i915,
+void intel_get_m_n(struct intel_display *display,
struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg);
-bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+bool intel_cpu_transcoder_has_m2_n2(struct intel_display *display,
enum transcoder transcoder);
void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
enum transcoder cpu_transcoder,
@@ -525,13 +492,9 @@ enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
-bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
-
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
@@ -542,17 +505,18 @@ void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
bool visible);
void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
-void intel_update_watermarks(struct drm_i915_private *i915);
-
bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_display_min_pipe_bpp(void);
+int intel_display_max_pipe_bpp(struct intel_display *display);
+
/* modesetting */
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 pipe_mask);
int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
const char *reason);
-int intel_modeset_commit_pipes(struct drm_i915_private *i915,
+int intel_modeset_commit_pipes(struct intel_display *display,
u8 pipe_mask,
struct drm_modeset_acquire_ctx *ctx);
void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
@@ -561,25 +525,22 @@ void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
struct intel_power_domain_mask *domains);
/* interface for intel_display_driver.c */
-void intel_setup_outputs(struct drm_i915_private *i915);
-int intel_initial_commit(struct drm_device *dev);
-void intel_panel_sanitize_ssc(struct drm_i915_private *i915);
-void intel_update_czclk(struct drm_i915_private *i915);
-void intel_atomic_helper_free_state_worker(struct work_struct *work);
+void intel_init_display_hooks(struct intel_display *display);
+void intel_setup_outputs(struct intel_display *display);
+int intel_initial_commit(struct intel_display *display);
+void intel_panel_sanitize_ssc(struct intel_display *display);
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode);
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
bool nonblock);
-void intel_hpd_poll_fini(struct drm_i915_private *i915);
-
/* modesetting asserts */
-void assert_transcoder(struct drm_i915_private *dev_priv,
+void assert_transcoder(struct intel_display *display,
enum transcoder cpu_transcoder, bool state);
#define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true)
#define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false)
-bool assert_port_valid(struct drm_i915_private *i915, enum port port);
+bool assert_port_valid(struct intel_display *display, enum port port);
/*
* Use INTEL_DISPLAY_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw
@@ -596,7 +557,7 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port);
unlikely(__ret_warn_on); \
})
-bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915);
+bool intel_scanout_needs_vtd_wa(struct intel_display *display);
int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
new file mode 100644
index 000000000000..9a47aa38cf82
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include <drm/intel/display_member.h>
+
+#include "intel_display_conversion.h"
+
+struct intel_display *__drm_to_display(struct drm_device *drm)
+{
+ /*
+ * Note: This relies on both struct drm_i915_private and struct
+ * xe_device having the struct drm_device and struct intel_display *
+ * members at the same relative offsets, as defined by struct
+ * __intel_generic_device.
+ *
+ * See also INTEL_DISPLAY_MEMBER_STATIC_ASSERT().
+ */
+ struct __intel_generic_device *d = container_of(drm, struct __intel_generic_device, drm);
+
+ return d->display;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
index ad8545c8055d..d497bc58a73f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.h
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -8,15 +8,9 @@
#ifndef __INTEL_DISPLAY_CONVERSION__
#define __INTEL_DISPLAY_CONVERSION__
-/*
- * Transitional macro to optionally convert struct drm_i915_private * to struct
- * intel_display *, also accepting the latter.
- */
-#define __to_intel_display(p) \
- _Generic(p, \
- const struct drm_i915_private *: (&((const struct drm_i915_private *)(p))->display), \
- struct drm_i915_private *: (&((struct drm_i915_private *)(p))->display), \
- const struct intel_display *: (p), \
- struct intel_display *: (p))
+struct drm_device;
+struct intel_display;
+
+struct intel_display *__drm_to_display(struct drm_device *drm);
#endif /* __INTEL_DISPLAY_CONVERSION__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 554870d2494b..9b8414b77c15 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -21,17 +21,15 @@
#include "intel_display_limits.h"
#include "intel_display_params.h"
#include "intel_display_power.h"
+#include "intel_dmc_wl.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_dmc_wl.h"
+#include "intel_pch.h"
#include "intel_wm_types.h"
-struct task_struct;
-
-struct drm_i915_private;
struct drm_property;
struct drm_property_blob;
struct i915_audio_component;
@@ -43,8 +41,9 @@ struct intel_cdclk_vals;
struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display_parent_interface;
struct intel_dmc;
-struct intel_dpll_funcs;
+struct intel_dpll_global_funcs;
struct intel_dpll_mgr;
struct intel_fbdev;
struct intel_fdi_funcs;
@@ -52,6 +51,7 @@ struct intel_hotplug_funcs;
struct intel_initial_plane_config;
struct intel_opregion;
struct intel_overlay;
+struct task_struct;
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
@@ -80,7 +80,7 @@ struct intel_display_funcs {
/* functions used for watermark calcs for display. */
struct intel_wm_funcs {
/* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
+ void (*update_wm)(struct intel_display *display);
int (*compute_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*initial_watermarks)(struct intel_atomic_state *state,
@@ -90,7 +90,8 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
- void (*get_hw_state)(struct drm_i915_private *i915);
+ void (*get_hw_state)(struct intel_display *display);
+ void (*sanitize)(struct intel_display *display);
};
struct intel_audio_state {
@@ -122,11 +123,11 @@ struct intel_audio {
* intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per
* dpll, because on some platforms plls share registers.
*/
-struct intel_dpll {
+struct intel_dpll_global {
struct mutex lock;
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ int num_dpll;
+ struct intel_dpll dplls[I915_NUM_PLLS];
const struct intel_dpll_mgr *mgr;
struct {
@@ -141,14 +142,13 @@ struct intel_dpll {
};
struct intel_frontbuffer_tracking {
+ /* protects busy_bits */
spinlock_t lock;
/*
- * Tracking bits for delayed frontbuffer flushing du to gpu activity or
- * scheduled flips.
+ * Tracking bits for delayed frontbuffer flushing due to gpu activity.
*/
unsigned busy_bits;
- unsigned flip_bits;
};
struct intel_hotplug {
@@ -159,6 +159,7 @@ struct intel_hotplug {
struct {
unsigned long last_jiffies;
int count;
+ int blocked_count;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
@@ -169,8 +170,8 @@ struct intel_hotplug {
u32 retry_bits;
struct delayed_work reenable_work;
- u32 long_port_mask;
- u32 short_port_mask;
+ u32 long_hpd_pin_mask;
+ u32 short_hpd_pin_mask;
struct work_struct dig_port_work;
struct work_struct poll_init_work;
@@ -178,7 +179,7 @@ struct intel_hotplug {
/*
* Queuing of hotplug_work, reenable_work and poll_init_work is
- * enabled. Protected by drm_i915_private::irq_lock.
+ * enabled. Protected by intel_display::irq::lock.
*/
bool detection_work_enabled;
@@ -287,6 +288,12 @@ struct intel_display {
/* Platform (and subplatform, if any) identification */
struct intel_display_platforms platform;
+ /* Intel PCH: where the south display engine lives */
+ enum intel_pch pch_type;
+
+ /* Parent, or core, driver functions exposed to display */
+ const struct intel_display_parent_interface *parent;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -296,7 +303,7 @@ struct intel_display {
const struct intel_cdclk_funcs *cdclk;
/* Display pll funcs */
- const struct intel_dpll_funcs *dpll;
+ const struct intel_dpll_global_funcs *dpll;
/* irq display functions */
const struct intel_hotplug_funcs *hotplug;
@@ -366,6 +373,10 @@ struct intel_display {
} dbuf;
struct {
+ struct intel_global_obj obj;
+ } dbuf_bw;
+
+ struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
@@ -386,7 +397,6 @@ struct intel_display {
struct {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
- struct work_struct suspend_work;
} fbdev;
struct {
@@ -425,7 +435,7 @@ struct intel_display {
* reused when sending message to gsc cs.
* this is only populated post Meteorlake
*/
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct intel_hdcp_gsc_context *gsc_context;
/* Mutex to protect the above hdcp related values. */
struct mutex hdcp_mutex;
} hdcp;
@@ -453,6 +463,9 @@ struct intel_display {
} ips;
struct {
+ /* protects the irq masks */
+ spinlock_t lock;
+
/*
* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -465,15 +478,35 @@ struct intel_display {
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
- int vblank_wa_num_pipes;
+ int vblank_enable_count;
- struct work_struct vblank_dc_work;
+ struct work_struct vblank_notify_work;
- u32 de_irq_mask[I915_MAX_PIPES];
+ /*
+ * Cached value of VLV/CHV IMR to avoid reads in updating the
+ * bitfield.
+ */
+ u32 vlv_imr_mask;
+ /*
+ * Cached value of gen 5-7 DE IMR to avoid reads in updating the
+ * bitfield.
+ */
+ u32 ilk_de_imr_mask;
+ /*
+ * Cached value of BDW+ DE pipe IMR to avoid reads in updating
+ * the bitfield.
+ */
+ u32 de_pipe_imr_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
} irq;
struct {
+ /* protected by wm.wm_mutex */
+ u16 linetime[I915_MAX_PIPES];
+ bool disable[I915_MAX_PIPES];
+ } pkgc;
+
+ struct {
wait_queue_head_t waitqueue;
/* mutex to protect pmdemand programming sequence */
@@ -512,6 +545,8 @@ struct intel_display {
/* restore state for suspend/resume and display reset */
struct drm_atomic_state *modeset_state;
struct drm_modeset_acquire_ctx reset_ctx;
+ /* modeset stuck tracking for reset */
+ atomic_t pending_fb_pin;
u32 saveDSPARB;
u32 saveSWF0[16];
u32 saveSWF1[16];
@@ -531,6 +566,11 @@ struct intel_display {
} sagv;
struct {
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex lock;
+ } sbi;
+
+ struct {
/*
* DG2: Mask of PHYs that were not calibrated by the firmware
* and should not be used.
@@ -549,6 +589,11 @@ struct intel_display {
} state;
struct {
+ unsigned int hpll_freq;
+ unsigned int czclk_freq;
+ } vlv_clock;
+
+ struct {
/* ordered wq for modesets */
struct workqueue_struct *modeset;
@@ -557,12 +602,15 @@ struct intel_display {
/* hipri wq for commit cleanups */
struct workqueue_struct *cleanup;
+
+ /* unordered workqueue for all display unordered work */
+ struct workqueue_struct *unordered;
} wq;
/* Grouping using named structs. Keep sorted. */
struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
struct intel_audio audio;
- struct intel_dpll dpll;
+ struct intel_dpll_global dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
struct intel_hotplug hotplug;
@@ -572,6 +620,8 @@ struct intel_display {
struct intel_vbt_data vbt;
struct intel_dmc_wl wl;
struct intel_wm wm;
+
+ struct work_struct psr_dc5_dc6_wa_work;
};
#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 3eb7565cd83c..9bbfdae8d024 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -4,15 +4,19 @@
*/
#include <linux/debugfs.h>
+#include <linux/string_choices.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "hsw_ips.h"
-#include "i915_irq.h"
#include "i915_reg.h"
+#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
#include "intel_bo.h"
#include "intel_crtc.h"
@@ -22,6 +26,8 @@
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -35,28 +41,27 @@
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_vdsc.h"
#include "intel_wm.h"
+#include "intel_tc.h"
static struct intel_display *node_to_intel_display(struct drm_info_node *node)
{
return to_intel_display(node->minor->dev);
}
-static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
-{
- return to_i915(node->minor->dev);
-}
-
static int intel_display_caps(struct seq_file *m, void *data)
{
struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
+ drm_printf(&p, "PCH type: %d\n", INTEL_PCH_TYPE(display));
+
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
intel_display_params_dump(&display->params, display->drm->driver->name, &p);
@@ -66,44 +71,41 @@ static int intel_display_caps(struct seq_file *m, void *data)
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
- spin_lock(&dev_priv->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
- dev_priv->display.fb_tracking.busy_bits);
+ display->fb_tracking.busy_bits);
- seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- dev_priv->display.fb_tracking.flip_bits);
-
- spin_unlock(&dev_priv->display.fb_tracking.lock);
+ spin_unlock(&display->fb_tracking.lock);
return 0;
}
static int i915_sr_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
intel_wakeref_t wakeref;
bool sr_enabled = false;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
/* no global SR status; inspect per-plane WM */;
- else if (HAS_PCH_SPLIT(dev_priv))
- sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE;
- else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
- IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
- else if (IS_I915GM(dev_priv))
- sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
- else if (IS_PINEVIEW(dev_priv))
- sr_enabled = intel_de_read(dev_priv, DSPFW3(dev_priv)) & PINEVIEW_SELF_REFRESH_EN;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+ else if (HAS_PCH_SPLIT(display))
+ sr_enabled = intel_de_read(display, WM1_LP_ILK) & WM_LP_ENABLE;
+ else if (display->platform.i965gm || display->platform.g4x ||
+ display->platform.i945g || display->platform.i945gm)
+ sr_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (display->platform.i915gm)
+ sr_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
+ else if (display->platform.pineview)
+ sr_enabled = intel_de_read(display, DSPFW3(display)) & PINEVIEW_SELF_REFRESH_EN;
+ else if (display->platform.valleyview || display->platform.cherryview)
+ sr_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+ intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
seq_printf(m, "self-refresh: %s\n", str_enabled_disabled(sr_enabled));
@@ -112,12 +114,11 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev);
+ fbdev_fb = intel_fbdev_framebuffer(display->fbdev.fbdev);
if (fbdev_fb) {
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
@@ -129,10 +130,9 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
intel_bo_describe(m, intel_fb_bo(&fbdev_fb->base));
seq_putc(m, '\n');
}
-#endif
- mutex_lock(&dev_priv->drm.mode_config.fb_lock);
- drm_for_each_fb(drm_fb, &dev_priv->drm) {
+ mutex_lock(&display->drm->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb == fbdev_fb)
continue;
@@ -147,16 +147,16 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
intel_bo_describe(m, intel_fb_bo(&fb->base));
seq_putc(m, '\n');
}
- mutex_unlock(&dev_priv->drm.mode_config.fb_lock);
+ mutex_unlock(&display->drm->mode_config.fb_lock);
return 0;
}
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
- intel_display_power_debug(i915, m);
+ intel_display_power_debug(display, m);
return 0;
}
@@ -176,14 +176,14 @@ static void intel_encoder_info(struct seq_file *m,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
encoder->base.base.id, encoder->base.name);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_state *conn_state =
connector->state;
@@ -211,38 +211,6 @@ static void intel_panel_info(struct seq_file *m,
intel_seq_print_mode(m, 2, fixed_mode);
}
-static void intel_hdcp_info(struct seq_file *m,
- struct intel_connector *intel_connector,
- bool remote_req)
-{
- bool hdcp_cap = false, hdcp2_cap = false;
-
- if (!intel_connector->hdcp.shim) {
- seq_puts(m, "No Connector Support");
- goto out;
- }
-
- if (remote_req) {
- intel_hdcp_get_remote_capability(intel_connector,
- &hdcp_cap,
- &hdcp2_cap);
- } else {
- hdcp_cap = intel_hdcp_get_capability(intel_connector);
- hdcp2_cap = intel_hdcp2_get_capability(intel_connector);
- }
-
- if (hdcp_cap)
- seq_puts(m, "HDCP1.4 ");
- if (hdcp2_cap)
- seq_puts(m, "HDCP2.2 ");
-
- if (!hdcp_cap && !hdcp2_cap)
- seq_puts(m, "None");
-
-out:
- seq_puts(m, "\n");
-}
-
static void intel_dp_info(struct seq_file *m, struct intel_connector *connector)
{
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
@@ -277,6 +245,8 @@ static void intel_connector_info(struct seq_file *m,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *mode;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct intel_digital_port *dig_port = NULL;
seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
connector->base.id, connector->name,
@@ -295,24 +265,24 @@ static void intel_connector_info(struct seq_file *m,
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (intel_connector->mst_port)
+ if (intel_connector->mst.dp)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
+ dig_port = dp_to_dig_port(intel_attached_dp(intel_connector));
break;
case DRM_MODE_CONNECTOR_HDMIA:
intel_hdmi_info(m, intel_connector);
+ dig_port = hdmi_to_dig_port(intel_attached_hdmi(intel_connector));
break;
default:
break;
}
- seq_puts(m, "\tHDCP version: ");
- if (intel_connector->mst_port) {
- intel_hdcp_info(m, intel_connector, true);
- seq_puts(m, "\tMST Hub HDCP version: ");
- }
- intel_hdcp_info(m, intel_connector, false);
+ if (dig_port != NULL && intel_encoder_is_tc(&dig_port->base))
+ intel_tc_info(&p, dig_port);
+
+ intel_hdcp_info(m, intel_connector);
seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
@@ -363,8 +333,8 @@ static const char *plane_visibility(const struct intel_plane_state *plane_state)
if (plane_state->uapi.visible)
return "visible";
- if (plane_state->planar_slave)
- return "planar-slave";
+ if (plane_state->is_y_plane)
+ return "Y plane";
return "hidden";
}
@@ -397,7 +367,7 @@ static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
if (plane_state->planar_linked_plane)
seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
- plane_state->planar_slave ? "slave" : "master");
+ plane_state->is_y_plane ? "Y plane" : "UV plane");
}
static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
@@ -425,10 +395,10 @@ static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
plane->base.base.id, plane->base.name,
plane_type(plane->base.type));
@@ -571,7 +541,7 @@ static void crtc_updates_add(struct intel_crtc *crtc)
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -595,6 +565,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n",
DRM_RECT_ARG(&crtc_state->pipe_src),
str_yes_no(crtc_state->dither), crtc_state->pipe_bpp);
+ seq_printf(m, "\tport_clock=%d, lane_count=%d\n",
+ crtc_state->port_clock, crtc_state->lane_count);
intel_scaler_info(m, crtc);
@@ -605,7 +577,7 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
intel_vdsc_state_dump(&p, 1, crtc_state);
- for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ for_each_intel_encoder_mask(display->drm, encoder,
crtc_state->uapi.encoder_mask)
intel_encoder_info(m, crtc, encoder);
@@ -620,88 +592,77 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
intel_crtc_info(m, crtc);
seq_printf(m, "\n");
seq_printf(m, "Connector info\n");
seq_printf(m, "--------------\n");
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
intel_connector_info(m, connector);
drm_connector_list_iter_end(&conn_iter);
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_display_capabilities(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- intel_display_device_info_print(DISPLAY_INFO(i915),
- DISPLAY_RUNTIME_INFO(i915), &p);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
drm_printf(&p, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
- dev_priv->display.dpll.ref_clks.nssc,
- dev_priv->display.dpll.ref_clks.ssc);
+ display->dpll.ref_clks.nssc,
+ display->dpll.ref_clks.ssc);
- for_each_shared_dpll(dev_priv, pll, i) {
+ for_each_dpll(display, pll, i) {
drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index,
pll->info->name, pll->info->id);
drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
pll->state.pipe_mask, pll->active_mask,
str_yes_no(pll->on));
drm_printf(&p, " tracked hardware state:\n");
- intel_dpll_dump_hw_state(dev_priv, &p, &pll->state.hw_state);
+ intel_dpll_dump_hw_state(display, &p, &pll->state.hw_state);
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return 0;
}
static int i915_ddb_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct skl_ddb_entry *entry;
struct intel_crtc *crtc;
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return -ENODEV;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -721,38 +682,35 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
entry->end, skl_ddb_entry_size(entry));
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return 0;
}
static bool
-intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
+intel_lpsp_power_well_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
- struct intel_display *display = &i915->display;
- intel_wakeref_t wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(display,
- power_well_id);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ is_enabled = intel_display_power_well_is_enabled(display,
+ power_well_id);
return is_enabled;
}
static int i915_lpsp_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
bool lpsp_enabled = false;
- if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) {
- lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2);
- } else if (IS_DISPLAY_VER(i915, 11, 12)) {
- lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL);
+ if (DISPLAY_VER(display) >= 13 || IS_DISPLAY_VER(display, 9, 10)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(display, SKL_DISP_PW_2);
+ } else if (IS_DISPLAY_VER(display, 11, 12)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(display, ICL_DISP_PW_3);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(display, HSW_DISP_PW_GLOBAL);
} else {
seq_puts(m, "LPSP: not supported\n");
return 0;
@@ -765,13 +723,13 @@ static int i915_lpsp_status(struct seq_file *m, void *unused)
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct intel_encoder *intel_encoder;
struct intel_digital_port *dig_port;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -787,7 +745,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
dig_port->base.base.base.id,
dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
+ drm_dp_mst_dump_topology(m, &dig_port->dp.mst.mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -799,7 +757,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct drm_i915_private *dev_priv = filp->private_data;
+ struct intel_display *display = filp->private_data;
struct intel_crtc *crtc;
int ret;
bool reset;
@@ -811,7 +769,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
if (!reset)
return cnt;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct drm_crtc_commit *commit;
struct intel_crtc_state *crtc_state;
@@ -828,7 +786,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
}
if (!ret && crtc_state->hw.active) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Re-arming FIFO underruns on pipe %c\n",
pipe_name(crtc->pipe));
@@ -841,7 +799,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
}
- intel_fbc_reset_underrun(&dev_priv->display);
+ intel_fbc_reset_underrun(display);
return cnt;
}
@@ -860,68 +818,39 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
- {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
{"i915_lpsp_status", i915_lpsp_status, 0},
};
-void intel_display_debugfs_register(struct drm_i915_private *i915)
+void intel_display_debugfs_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_minor *minor = i915->drm.primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
- to_i915(minor->dev), &i915_fifo_underrun_reset_ops);
+ debugfs_create_file("i915_fifo_underrun_reset", 0644, debugfs_root,
+ display, &i915_fifo_underrun_reset_ops);
drm_debugfs_create_files(intel_display_debugfs_list,
ARRAY_SIZE(intel_display_debugfs_list),
- minor->debugfs_root, minor);
+ debugfs_root, display->drm->primary);
intel_bios_debugfs_register(display);
intel_cdclk_debugfs_register(display);
intel_dmc_debugfs_register(display);
intel_dp_test_debugfs_register(display);
intel_fbc_debugfs_register(display);
- intel_hpd_debugfs_register(i915);
+ intel_hpd_debugfs_register(display);
intel_opregion_debugfs_register(display);
intel_psr_debugfs_register(display);
- intel_wm_debugfs_register(i915);
+ intel_wm_debugfs_register(display);
intel_display_debugfs_params(display);
}
-static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
-{
- struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- int ret;
-
- ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
- if (ret)
- return ret;
-
- if (!connector->base.encoder ||
- connector->base.status != connector_status_connected) {
- ret = -ENODEV;
- goto out;
- }
-
- seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
- connector->base.base.id);
- intel_hdcp_info(m, connector, false);
-
-out:
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
-
- return ret;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
int connector_type = connector->base.connector_type;
bool lpsp_capable = false;
@@ -932,24 +861,24 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
if (connector->base.status != connector_status_connected)
return -ENODEV;
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
lpsp_capable = encoder->port <= PORT_B;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
/*
* Actually TGL can drive LPSP on port till DDI_C
* but there is no physical connected DDI_C on TGL sku's,
- * even driver is not initilizing DDI_C port for gen12.
+ * even driver is not initializing DDI_C port for gen12.
*/
lpsp_capable = encoder->port <= PORT_B;
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
lpsp_capable = (connector_type == DRM_MODE_CONNECTOR_DSI ||
connector_type == DRM_MODE_CONNECTOR_eDP);
- else if (IS_DISPLAY_VER(i915, 9, 10))
+ else if (IS_DISPLAY_VER(display, 9, 10))
lpsp_capable = (encoder->port == PORT_A &&
(connector_type == DRM_MODE_CONNECTOR_DSI ||
connector_type == DRM_MODE_CONNECTOR_eDP ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort));
- else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ else if (display->platform.haswell || display->platform.broadwell)
lpsp_capable = connector_type == DRM_MODE_CONNECTOR_eDP;
seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
@@ -961,7 +890,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_crtc *crtc;
struct intel_dp *intel_dp;
struct drm_modeset_acquire_ctx ctx;
@@ -973,7 +902,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
do {
try_again = false;
- ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
&ctx);
if (ret) {
if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
@@ -1013,6 +942,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
DP_DSC_YCbCr444)));
seq_printf(m, "DSC_Sink_BPP_Precision: %d\n",
drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd));
+ seq_printf(m, "DSC_Sink_Max_Slice_Count: %d\n",
+ drm_dp_dsc_sink_max_slice_count((connector->dp.dsc_dpcd), intel_dp_is_edp(intel_dp)));
seq_printf(m, "Force_DSC_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
@@ -1032,7 +963,7 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool dsc_enable = false;
@@ -1041,15 +972,15 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
if (len == 0)
return 0;
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Copied %zu bytes from user to force DSC\n", len);
ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
if (ret < 0)
return ret;
- drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
- (dsc_enable) ? "true" : "false");
+ drm_dbg(display->drm, "Got %s for DSC Enable\n",
+ str_true_false(dsc_enable));
intel_dp->force_dsc_en = dsc_enable;
*offp += len;
@@ -1075,7 +1006,7 @@ static const struct file_operations i915_dsc_fec_support_fops = {
static int i915_dsc_bpc_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
@@ -1084,7 +1015,7 @@ static int i915_dsc_bpc_show(struct seq_file *m, void *data)
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (ret)
return ret;
@@ -1097,7 +1028,7 @@ static int i915_dsc_bpc_show(struct seq_file *m, void *data)
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
-out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+out: drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return ret;
}
@@ -1141,7 +1072,7 @@ static const struct file_operations i915_dsc_bpc_fops = {
static int i915_dsc_output_format_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
@@ -1150,7 +1081,7 @@ static int i915_dsc_output_format_show(struct seq_file *m, void *data)
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (ret)
return ret;
@@ -1164,7 +1095,7 @@ static int i915_dsc_output_format_show(struct seq_file *m, void *data)
seq_printf(m, "DSC_Output_Format: %s\n",
intel_output_format_name(crtc_state->output_format));
-out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+out: drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return ret;
}
@@ -1208,7 +1139,7 @@ static const struct file_operations i915_dsc_output_format_fops = {
static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_dp *intel_dp;
@@ -1217,7 +1148,7 @@ static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (ret)
return ret;
@@ -1232,7 +1163,7 @@ static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
str_yes_no(intel_dp->force_dsc_fractional_bpp_en));
out:
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return ret;
}
@@ -1243,8 +1174,8 @@ static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = m->private;
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool dsc_fractional_bpp_enable = false;
int ret;
@@ -1252,15 +1183,15 @@ static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
if (len == 0)
return 0;
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Copied %zu bytes from user to force fractional bpp for DSC\n", len);
ret = kstrtobool_from_user(ubuf, len, &dsc_fractional_bpp_enable);
if (ret < 0)
return ret;
- drm_dbg(&i915->drm, "Got %s for DSC Fractional BPP Enable\n",
- (dsc_fractional_bpp_enable) ? "true" : "false");
+ drm_dbg(display->drm, "Got %s for DSC Fractional BPP Enable\n",
+ str_true_false(dsc_fractional_bpp_enable));
intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
*offp += len;
@@ -1388,7 +1319,7 @@ static const struct file_operations i915_joiner_fops = {
*/
void intel_connector_debugfs_add(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct dentry *root = connector->base.debugfs_entry;
int connector_type = connector->base.connector_type;
@@ -1397,20 +1328,15 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
return;
intel_drrs_connector_debugfs_add(connector);
+ intel_hdcp_connector_debugfs_add(connector);
intel_pps_connector_debugfs_add(connector);
intel_psr_connector_debugfs_add(connector);
intel_alpm_lobf_debugfs_add(connector);
intel_dp_link_training_debugfs_add(connector);
+ intel_link_bw_connector_debugfs_add(connector);
- if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- debugfs_create_file("i915_hdcp_sink_capability", 0444, root,
- connector, &i915_hdcp_sink_capability_fops);
- }
-
- if (DISPLAY_VER(i915) >= 11 &&
- ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst_port) ||
+ if (DISPLAY_VER(display) >= 11 &&
+ ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst.dp) ||
connector_type == DRM_MODE_CONNECTOR_eDP)) {
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index e1f479b7acd1..82af2f608111 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -6,16 +6,16 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc;
+struct intel_display;
#ifdef CONFIG_DEBUG_FS
-void intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_display_debugfs_register(struct intel_display *display);
void intel_connector_debugfs_add(struct intel_connector *connector);
void intel_crtc_debugfs_add(struct intel_crtc *crtc);
#else
-static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
+static inline void intel_display_debugfs_register(struct intel_display *display) {}
static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index ec3ed29a83c9..de62b774272d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -8,8 +8,8 @@
#include <drm/drm_drv.h>
+#include "intel_display_core.h"
#include "intel_display_debugfs_params.h"
-#include "i915_drv.h"
#include "intel_display_params.h"
/* int param */
@@ -153,14 +153,14 @@ intel_display_debugfs_create_uint(const char *name, umode_t mode,
/* add a subdirectory with files for each intel display param */
void intel_display_debugfs_params(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
struct dentry *dir;
char dirname[16];
snprintf(dirname, sizeof(dirname), "%s_params", display->drm->driver->name);
- dir = debugfs_lookup(dirname, minor->debugfs_root);
+ dir = debugfs_lookup(dirname, debugfs_root);
if (!dir)
- dir = debugfs_create_dir(dirname, minor->debugfs_root);
+ dir = debugfs_create_dir(dirname, debugfs_root);
if (IS_ERR(dir))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 68cb7f9b9ef3..1170afaa8680 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -3,11 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/intel/pciids.h>
-#include <drm/drm_color_mgmt.h>
#include <linux/pci.h>
-#include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+#include <drm/intel/pciids.h>
+
#include "i915_reg.h"
#include "intel_cx0_phy_regs.h"
#include "intel_de.h"
@@ -16,6 +18,7 @@
#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -1351,12 +1354,31 @@ static const struct intel_display_device_info xe2_lpd_display = {
.__runtime_defaults.has_dbuf_overlap_detection = true,
};
+static const struct intel_display_device_info wcl_display = {
+ XE_LPDP_FEATURES,
+
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.pipe_mask =
+ BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime_defaults.fbc_mask =
+ BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) | BIT(INTEL_FBC_C),
+ .__runtime_defaults.port_mask =
+ BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2),
+};
+
static const struct intel_display_device_info xe2_hpd_display = {
XE_LPDP_FEATURES,
.__runtime_defaults.port_mask = BIT(PORT_A) |
BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
};
+static const u16 mtl_u_ids[] = {
+ INTEL_MTL_U_IDS(ID),
+ INTEL_ARL_U_IDS(ID),
+ 0
+};
+
/*
* Do not initialize the .info member of the platform desc for GMD ID based
* platforms. Their display will be probed automatically based on the IP version
@@ -1364,6 +1386,13 @@ static const struct intel_display_device_info xe2_hpd_display = {
*/
static const struct platform_desc mtl_desc = {
PLATFORM(meteorlake),
+ .subplatforms = (const struct subplatform_desc[]) {
+ {
+ SUBPLATFORM(meteorlake, u),
+ .pciidlist = mtl_u_ids,
+ },
+ {},
+ }
};
static const struct platform_desc lnl_desc = {
@@ -1375,8 +1404,20 @@ static const struct platform_desc bmg_desc = {
PLATFORM_GROUP(dgfx),
};
+static const u16 wcl_ids[] = {
+ INTEL_WCL_IDS(ID),
+ 0
+};
+
static const struct platform_desc ptl_desc = {
PLATFORM(pantherlake),
+ .subplatforms = (const struct subplatform_desc[]) {
+ {
+ SUBPLATFORM(pantherlake, wildcatlake),
+ .pciidlist = wcl_ids,
+ },
+ {},
+ }
};
__diag_pop();
@@ -1453,6 +1494,7 @@ static const struct {
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
+ INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
};
static const struct {
@@ -1464,6 +1506,8 @@ static const struct {
{ 14, 1, &xe2_hpd_display },
{ 20, 0, &xe2_lpd_display },
{ 30, 0, &xe2_lpd_display },
+ { 30, 2, &wcl_display },
+ { 35, 0, &xe2_lpd_display },
};
static const struct intel_display_device_info *
@@ -1604,18 +1648,25 @@ static void display_platforms_or(struct intel_display_platforms *dst,
bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits());
}
-struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
+ const struct intel_display_parent_interface *parent)
{
- struct intel_display *display = to_intel_display(pdev);
+ struct intel_display *display;
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
const struct subplatform_desc *subdesc;
enum intel_step step;
+ display = kzalloc(sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return ERR_PTR(-ENOMEM);
+
/* Add drm device backpointer as early as possible. */
display->drm = pci_get_drvdata(pdev);
+ display->parent = parent;
+
intel_display_params_copy(&display->params);
if (has_no_display(pdev)) {
@@ -1693,12 +1744,15 @@ no_display:
void intel_display_device_remove(struct intel_display *display)
{
+ if (!display)
+ return;
+
intel_display_params_free(&display->params);
+ kfree(display);
}
static void __intel_display_device_info_runtime_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(display);
enum pipe pipe;
@@ -1762,7 +1816,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
goto display_fused_off;
}
- if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(display)) {
u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(display, SFUSE_STRAP);
@@ -1777,7 +1831,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (HAS_PCH_CPT(i915) &&
+ (HAS_PCH_CPT(display) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(display->drm,
"Display fused off, disabling\n");
@@ -1907,6 +1961,11 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
+bool intel_display_device_present(struct intel_display *display)
+{
+ return display && HAS_DISPLAY(display);
+}
+
/*
* Assuming the device has display hardware, should it be enabled?
*
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 9a333d9e6601..b559ef43d547 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -9,11 +9,11 @@
#include <linux/bitops.h>
#include <linux/types.h>
-#include "intel_display_conversion.h"
#include "intel_display_limits.h"
struct drm_printer;
struct intel_display;
+struct intel_display_parent_interface;
struct pci_dev;
/*
@@ -96,12 +96,15 @@ struct pci_dev;
func(dg2_g12) \
/* Display ver 14 (based on GMD ID) */ \
func(meteorlake) \
+ func(meteorlake_u) \
/* Display ver 20 (based on GMD ID) */ \
func(lunarlake) \
/* Display ver 14.1 (based on GMD ID) */ \
func(battlemage) \
/* Display ver 30 (based on GMD ID) */ \
- func(pantherlake)
+ func(pantherlake) \
+ func(pantherlake_wildcatlake)
+
#define __MEMBER(name) unsigned long name:1;
#define __COUNT(x) 1 +
@@ -140,11 +143,17 @@ struct intel_display_platforms {
func(overlay_needs_physical); \
func(supports_tv);
+#define HAS_128B_Y_TILING(__display) (!(__display)->platform.i915g && !(__display)->platform.i915gm)
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_AUX_CCS(__display) (IS_DISPLAY_VER(__display, 9, 12) || (__display)->platform.alderlake_p || (__display)->platform.meteorlake)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
+#define HAS_CASF(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
+#define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13)
#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
#define HAS_DBUF_OVERLAP_DETECTION(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dbuf_overlap_detection)
@@ -152,20 +161,25 @@ struct intel_display_platforms {
#define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0)
#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
-#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_BUFFERED_M_N(__display) (IS_DISPLAY_VER((__display), 9, 14) || (__display)->platform.broadwell)
+#define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
-#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
+#define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display))
#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
+#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
-#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_FDI(__display) (IS_DISPLAY_VER((__display), 5, 8) && !HAS_GMCH(__display))
+#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
@@ -174,6 +188,7 @@ struct intel_display_platforms {
#define HAS_MBUS_JOINING(__display) ((__display)->platform.alderlake_p || DISPLAY_VER(__display) >= 14)
#define HAS_MSO(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_OVERLAY(__display) (DISPLAY_INFO(__display)->has_overlay)
+#define HAS_PIPEDMC(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
@@ -182,14 +197,10 @@ struct intel_display_platforms {
#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
BIT(trans)) != 0)
#define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_ULTRAJOINER(__display) ((DISPLAY_VER(__display) >= 20 || \
- ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
- HAS_DSC(__display))
+#define HAS_ULTRAJOINER(__display) (((__display)->platform.dgfx && \
+ DISPLAY_VER(__display) == 14) && HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
-#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
-#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
@@ -218,8 +229,8 @@ struct intel_display_platforms {
(IS_DISPLAY_VERx100((__display), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__display), (from), (until)))
-#define DISPLAY_INFO(__display) (__to_intel_display(__display)->info.__device_info)
-#define DISPLAY_RUNTIME_INFO(__display) (&__to_intel_display(__display)->info.__runtime_info)
+#define DISPLAY_INFO(__display) ((__display)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(__display) (&(__display)->info.__runtime_info)
#define DISPLAY_VER(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver)
#define DISPLAY_VERx100(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver * 100 + \
@@ -230,9 +241,20 @@ struct intel_display_platforms {
#define INTEL_DISPLAY_STEP(__display) (DISPLAY_RUNTIME_INFO(__display)->step)
#define IS_DISPLAY_STEP(__display, since, until) \
- (drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
+ (drm_WARN_ON((__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
+#define ARLS_HOST_BRIDGE_PCI_ID1 0x7D1C
+#define ARLS_HOST_BRIDGE_PCI_ID2 0x7D2D
+#define ARLS_HOST_BRIDGE_PCI_ID3 0x7D2E
+#define ARLS_HOST_BRIDGE_PCI_ID4 0x7D2F
+
+#define IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(id) \
+ (((id) == ARLS_HOST_BRIDGE_PCI_ID1) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID2) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID3) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID4))
+
struct intel_display_runtime_info {
struct intel_display_ip_ver {
u16 ver;
@@ -290,8 +312,10 @@ struct intel_display_device_info {
} color;
};
+bool intel_display_device_present(struct intel_display *display);
bool intel_display_device_enabled(struct intel_display *display);
-struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
+ const struct intel_display_parent_interface *parent);
void intel_display_device_remove(struct intel_display *display);
void intel_display_device_info_runtime_init(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index a7f4b0ce0740..7e000ba3e08b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -14,10 +14,12 @@
#include <drm/drm_client_event.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_privacy_screen_consumer.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "i915_drv.h"
+#include "i915_utils.h" /* for i915_inject_probe_failure() */
#include "i9xx_wm.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
@@ -27,11 +29,15 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_cursor.h"
+#include "intel_dbuf_bw.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_dkl_phy.h"
#include "intel_dmc.h"
@@ -43,6 +49,7 @@
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
+#include "intel_flipq.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -54,6 +61,7 @@
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_vga.h"
#include "intel_wm.h"
@@ -82,19 +90,12 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
void intel_display_driver_init_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_cdclk_state *cdclk_state;
-
if (!HAS_DISPLAY(display))
return;
- cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
+ intel_cdclk_read_hw(display);
- intel_update_cdclk(display);
- intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
-
- intel_display_wa_apply(i915);
+ intel_display_wa_apply(display);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
@@ -148,24 +149,12 @@ static void intel_mode_config_init(struct intel_display *display)
mode_config->max_height = 2048;
}
- if (display->platform.i845g || display->platform.i865g) {
- mode_config->cursor_width = display->platform.i845g ? 64 : 512;
- mode_config->cursor_height = 1023;
- } else if (display->platform.i830 || display->platform.i85x ||
- display->platform.i915g || display->platform.i915gm) {
- mode_config->cursor_width = 64;
- mode_config->cursor_height = 64;
- } else {
- mode_config->cursor_width = 256;
- mode_config->cursor_height = 256;
- }
+ intel_cursor_mode_config_init(display);
}
static void intel_mode_config_cleanup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_atomic_global_obj_cleanup(i915);
+ intel_atomic_global_obj_cleanup(display);
drm_mode_config_cleanup(display->drm);
}
@@ -183,7 +172,8 @@ static void intel_plane_possible_crtcs_init(struct intel_display *display)
void intel_display_driver_early_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_pch_detect(display);
if (!HAS_DISPLAY(display))
return;
@@ -195,14 +185,14 @@ void intel_display_driver_early_probe(struct intel_display *display)
mutex_init(&display->pps.mutex);
mutex_init(&display->hdcp.hdcp_mutex);
- intel_display_irq_init(i915);
- intel_dkl_phy_init(i915);
+ intel_display_irq_init(display);
+ intel_dkl_phy_init(display);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
- intel_audio_hooks_init(i915);
- intel_dpll_init_clock_hook(i915);
- intel_init_display_hooks(i915);
- intel_fdi_init_hook(i915);
+ intel_audio_hooks_init(display);
+ intel_dpll_init_clock_hook(display);
+ intel_init_display_hooks(display);
+ intel_fdi_init_hook(display);
intel_dmc_wl_init(display);
}
@@ -228,46 +218,78 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_bios;
+ intel_psr_dc5_dc6_wa_init(display);
+
/* FIXME: completely on the wrong abstraction layer */
ret = intel_power_domains_init(display);
if (ret < 0)
goto cleanup_vga;
- intel_pmdemand_init_early(i915);
+ intel_pmdemand_init_early(display);
intel_power_domains_init_hw(display, false);
if (!HAS_DISPLAY(display))
return 0;
- intel_dmc_init(display);
+ display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
+ if (!display->hotplug.dp_wq) {
+ ret = -ENOMEM;
+ goto cleanup_vga_client_pw_domain_dmc;
+ }
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ if (!display->wq.modeset) {
+ ret = -ENOMEM;
+ goto cleanup_wq_dp;
+ }
+
display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ if (!display->wq.flip) {
+ ret = -ENOMEM;
+ goto cleanup_wq_modeset;
+ }
+
display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
+ if (!display->wq.cleanup) {
+ ret = -ENOMEM;
+ goto cleanup_wq_flip;
+ }
+
+ display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
+ if (!display->wq.unordered) {
+ ret = -ENOMEM;
+ goto cleanup_wq_cleanup;
+ }
+
+ intel_dmc_init(display);
intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
ret = intel_color_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
- ret = intel_dbuf_init(i915);
+ ret = intel_dbuf_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
- ret = intel_bw_init(i915);
+ ret = intel_dbuf_bw_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
- ret = intel_pmdemand_init(i915);
+ ret = intel_bw_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_unordered;
+
+ ret = intel_pmdemand_init(display);
+ if (ret)
+ goto cleanup_wq_unordered;
intel_init_quirks(display);
@@ -275,6 +297,16 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
return 0;
+cleanup_wq_unordered:
+ destroy_workqueue(display->wq.unordered);
+cleanup_wq_cleanup:
+ destroy_workqueue(display->wq.cleanup);
+cleanup_wq_flip:
+ destroy_workqueue(display->wq.flip);
+cleanup_wq_modeset:
+ destroy_workqueue(display->wq.modeset);
+cleanup_wq_dp:
+ destroy_workqueue(display->hotplug.dp_wq);
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
@@ -317,11 +349,9 @@ static void set_display_access(struct intel_display *display,
*/
void intel_display_driver_enable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
set_display_access(display, true, NULL);
- intel_hpd_enable_detection_work(i915);
+ intel_hpd_enable_detection_work(display);
}
/**
@@ -343,9 +373,7 @@ void intel_display_driver_enable_user_access(struct intel_display *display)
*/
void intel_display_driver_disable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_hpd_disable_detection_work(i915);
+ intel_hpd_disable_detection_work(display);
set_display_access(display, false, current);
}
@@ -399,7 +427,6 @@ void intel_display_driver_resume_access(struct intel_display *display)
*/
bool intel_display_driver_check_access(struct intel_display *display)
{
- char comm[TASK_COMM_LEN];
char current_task[TASK_COMM_LEN + 16];
char allowed_task[TASK_COMM_LEN + 16] = "none";
@@ -408,12 +435,11 @@ bool intel_display_driver_check_access(struct intel_display *display)
return true;
snprintf(current_task, sizeof(current_task), "%s[%d]",
- get_task_comm(comm, current),
- task_pid_vnr(current));
+ current->comm, task_pid_vnr(current));
if (display->access.allowed_task)
snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
- get_task_comm(comm, display->access.allowed_task),
+ display->access.allowed_task->comm,
task_pid_vnr(display->access.allowed_task));
drm_dbg_kms(display->drm,
@@ -426,16 +452,15 @@ bool intel_display_driver_check_access(struct intel_display *display)
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
int ret;
if (!HAS_DISPLAY(display))
return 0;
- intel_wm_init(i915);
+ intel_wm_init(display);
- intel_panel_sanitize_ssc(i915);
+ intel_panel_sanitize_ssc(display);
intel_pps_setup(display);
@@ -446,27 +471,24 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
INTEL_NUM_PIPES(display) > 1 ? "s" : "");
for_each_pipe(display, pipe) {
- ret = intel_crtc_init(i915, pipe);
+ ret = intel_crtc_init(display, pipe);
if (ret)
goto err_mode_config;
}
intel_plane_possible_crtcs_init(display);
- intel_shared_dpll_init(i915);
- intel_fdi_pll_freq_update(i915);
+ intel_dpll_init(display);
+ intel_fdi_pll_freq_update(display);
- intel_update_czclk(i915);
intel_display_driver_init_hw(display);
- intel_dpll_update_ref_clks(i915);
+ intel_dpll_update_ref_clks(display);
if (display->cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(display);
intel_hti_init(display);
- /* Just disable it once at startup */
- intel_vga_disable(display);
- intel_setup_outputs(i915);
+ intel_setup_outputs(display);
ret = intel_dp_tunnel_mgr_init(display);
if (ret)
@@ -475,7 +497,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_display_driver_disable_user_access(display);
drm_modeset_lock_all(display->drm);
- intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
+ intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(display);
drm_modeset_unlock_all(display->drm);
@@ -487,7 +509,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(display))
- ilk_wm_sanitize(i915);
+ ilk_wm_sanitize(display);
return 0;
@@ -502,7 +524,6 @@ err_mode_config:
/* part #3: call after gem init */
int intel_display_driver_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!HAS_DISPLAY(display))
@@ -515,29 +536,30 @@ int intel_display_driver_probe(struct intel_display *display)
*/
intel_hdcp_component_init(display);
+ intel_flipq_init(display);
+
/*
* Force all active planes to recompute their states. So that on
* mode_setcrtc after probe, all the intel_plane_state variables
* are already calculated and there is no assert_plane warnings
* during bootup.
*/
- ret = intel_initial_commit(display->drm);
+ ret = intel_initial_commit(display);
if (ret)
drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(i915);
+ intel_hpd_init(display);
- skl_watermark_ipc_init(i915);
+ skl_watermark_ipc_init(display);
return 0;
}
void intel_display_driver_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
"i915 display info:");
@@ -548,13 +570,13 @@ void intel_display_driver_register(struct intel_display *display)
intel_opregion_register(display);
intel_acpi_video_register(display);
- intel_audio_init(i915);
+ intel_audio_init(display);
intel_display_driver_enable_user_access(display);
- intel_audio_register(i915);
+ intel_audio_register(display);
- intel_display_debugfs_register(i915);
+ intel_display_debugfs_register(display);
/*
* We need to coordinate the hotplugs with the asynchronous
@@ -562,39 +584,38 @@ void intel_display_driver_register(struct intel_display *display)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(display->drm);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
- intel_fbdev_setup(i915);
+ intel_fbdev_setup(display);
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
+
+ intel_register_dsm_handler();
}
/* part #1: call before irq uninstall */
void intel_display_driver_remove(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
flush_workqueue(display->wq.flip);
flush_workqueue(display->wq.modeset);
flush_workqueue(display->wq.cleanup);
+ flush_workqueue(display->wq.unordered);
/*
* MST topology needs to be suspended so we don't have any calls to
* fbdev after it's finalized. MST will be destroyed later as part of
* drm_mode_config_cleanup()
*/
- intel_dp_mst_suspend(i915);
+ intel_dp_mst_suspend(display);
}
/* part #2: call after irq uninstall */
void intel_display_driver_remove_noirq(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
@@ -604,12 +625,12 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(i915);
+ intel_hpd_poll_fini(display);
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
- flush_workqueue(i915->unordered_wq);
+ flush_workqueue(display->wq.unordered);
intel_hdcp_component_fini(display);
@@ -621,9 +642,11 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
intel_gmbus_teardown(display);
+ destroy_workqueue(display->hotplug.dp_wq);
destroy_workqueue(display->wq.flip);
destroy_workqueue(display->wq.modeset);
destroy_workqueue(display->wq.cleanup);
+ destroy_workqueue(display->wq.unordered);
intel_fbc_cleanup(display);
}
@@ -642,11 +665,11 @@ void intel_display_driver_remove_nogem(struct intel_display *display)
void intel_display_driver_unregister(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
+ intel_unregister_dsm_handler();
+
drm_client_dev_unregister(display->drm);
/*
@@ -658,7 +681,7 @@ void intel_display_driver_unregister(struct intel_display *display)
intel_display_driver_disable_user_access(display);
- intel_audio_deinit(i915);
+ intel_audio_deinit(display);
drm_atomic_helper_shutdown(display->drm);
@@ -672,7 +695,6 @@ void intel_display_driver_unregister(struct intel_display *display)
*/
int intel_display_driver_suspend(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_atomic_state *state;
int ret;
@@ -690,7 +712,7 @@ int intel_display_driver_suspend(struct intel_display *display)
/* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
flush_workqueue(display->wq.cleanup);
- intel_dp_mst_suspend(i915);
+ intel_dp_mst_suspend(display);
return ret;
}
@@ -700,13 +722,11 @@ __intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
- intel_modeset_setup_hw_state(i915, ctx);
- intel_vga_redisable(display);
+ intel_modeset_setup_hw_state(display, ctx);
if (!state)
return 0;
@@ -738,7 +758,6 @@ __intel_display_driver_resume(struct intel_display *display,
void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -747,7 +766,7 @@ void intel_display_driver_resume(struct intel_display *display)
return;
/* MST sideband requires HPD interrupts enabled */
- intel_dp_mst_resume(i915);
+ intel_dp_mst_resume(display);
display->restore.modeset_state = NULL;
if (state)
@@ -766,7 +785,7 @@ void intel_display_driver_resume(struct intel_display *display)
if (!ret)
ret = __intel_display_driver_resume(display, state, &ctx);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 069043f9d894..43b27deb4a26 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -3,9 +3,9 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -13,8 +13,13 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_dmc_wl.h"
#include "intel_dp_aux.h"
#include "intel_dsb.h"
#include "intel_fdi_regs.h"
@@ -22,14 +27,100 @@
#include "intel_gmbus.h"
#include "intel_hotplug_irq.h"
#include "intel_pipe_crc_regs.h"
+#include "intel_plane.h"
#include "intel_pmdemand.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_uncore.h"
static void
-intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs,
+ u32 imr_val, u32 ier_val)
+{
+ intel_dmc_wl_get(display, regs.imr);
+ intel_dmc_wl_get(display, regs.ier);
+ intel_dmc_wl_get(display, regs.iir);
+
+ gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val);
+
+ intel_dmc_wl_put(display, regs.iir);
+ intel_dmc_wl_put(display, regs.ier);
+ intel_dmc_wl_put(display, regs.imr);
+}
+
+static void
+intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs)
+{
+ intel_dmc_wl_get(display, regs.imr);
+ intel_dmc_wl_get(display, regs.ier);
+ intel_dmc_wl_get(display, regs.iir);
+
+ gen2_irq_reset(to_intel_uncore(display->drm), regs);
+
+ intel_dmc_wl_put(display, regs.iir);
+ intel_dmc_wl_put(display, regs.ier);
+ intel_dmc_wl_put(display, regs.imr);
+}
+
+static void
+intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg)
+{
+ intel_dmc_wl_get(display, reg);
+
+ gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg);
+
+ intel_dmc_wl_put(display, reg);
+}
+
+struct pipe_fault_handler {
+ bool (*handle)(struct intel_crtc *crtc, enum plane_id plane_id);
+ u32 fault;
+ enum plane_id plane_id;
+};
+
+static bool handle_plane_fault(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_plane_error error = {};
+ struct intel_plane *plane;
+
+ plane = intel_crtc_get_plane(crtc, plane_id);
+ if (!plane || !plane->capture_error)
+ return false;
+
+ plane->capture_error(crtc, plane, &error);
+
+ drm_err_ratelimited(display->drm,
+ "[CRTC:%d:%s][PLANE:%d:%s] fault (CTL=0x%x, SURF=0x%x, SURFLIVE=0x%x)\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ error.ctl, error.surf, error.surflive);
+
+ return true;
+}
+
+static void intel_pipe_fault_irq_handler(struct intel_display *display,
+ const struct pipe_fault_handler *handlers,
+ enum pipe pipe, u32 fault_errors)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+ const struct pipe_fault_handler *handler;
+
+ for (handler = handlers; handler && handler->fault; handler++) {
+ if ((fault_errors & handler->fault) == 0)
+ continue;
+
+ if (handler->handle(crtc, handler->plane_id))
+ fault_errors &= ~handler->fault;
+ }
+
+ WARN_ONCE(fault_errors, "[CRTC:%d:%s] unreported faults 0x%x\n",
+ crtc->base.base.id, crtc->base.name, fault_errors);
+}
+
+static void
+intel_handle_vblank(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
drm_crtc_handle_vblank(&crtc->base);
@@ -37,174 +128,177 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
/**
* ilk_update_display_irq - update DEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- new_val = dev_priv->irq_mask;
+ new_val = display->irq.ilk_de_imr_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->irq_mask &&
- !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
- dev_priv->irq_mask = new_val;
- intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
- intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
+ if (new_val != display->irq.ilk_de_imr_mask &&
+ !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
+ display->irq.ilk_de_imr_mask = new_val;
+ intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask);
+ intel_de_posting_read(display, DEIMR);
}
}
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_enable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, bits);
+ ilk_update_display_irq(display, bits, bits);
}
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_disable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, 0);
+ ilk_update_display_irq(display, bits, 0);
}
/**
* bdw_update_port_irq - update DE port interrupt
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+void bdw_update_port_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
u32 old_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
+ old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
new_val = old_val;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != old_val) {
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
- intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
+ intel_de_write(display, GEN8_DE_PORT_IMR, new_val);
+ intel_de_posting_read(display, GEN8_DE_PORT_IMR);
}
}
/**
* bdw_update_pipe_irq - update DE pipe interrupt
- * @dev_priv: driver private
+ * @display: display device
* @pipe: pipe whose interrupt to update
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+static void bdw_update_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->display.irq.de_irq_mask[pipe];
+ new_val = display->irq.de_pipe_imr_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
- dev_priv->display.irq.de_irq_mask[pipe] = new_val;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe),
- dev_priv->display.irq.de_irq_mask[pipe]);
- intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
+ if (new_val != display->irq.de_pipe_imr_mask[pipe]) {
+ display->irq.de_pipe_imr_mask[pipe] = new_val;
+ intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_pipe_imr_mask[pipe]);
+ intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
-void bdw_enable_pipe_irq(struct drm_i915_private *i915,
+void bdw_enable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, bits);
+ bdw_update_pipe_irq(display, pipe, bits, bits);
}
-void bdw_disable_pipe_irq(struct drm_i915_private *i915,
+void bdw_disable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, 0);
+ bdw_update_pipe_irq(display, pipe, bits, 0);
}
/**
* ibx_display_interrupt_update - update SDEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
- intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
+ intel_de_write(display, SDEIMR, sdeimr);
+ intel_de_posting_read(display, SDEIMR);
}
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, bits);
+ ibx_display_interrupt_update(display, bits, bits);
}
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, 0);
+ ibx_display_interrupt_update(display, bits, 0);
}
-u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+u32 i915_pipestat_enable_mask(struct intel_display *display,
enum pipe pipe)
{
- u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe];
+ u32 status_mask = display->irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
goto out;
/*
* On pipe A we don't support the PSR interrupt yet,
* on pipe B and C the same bit MBZ.
*/
- if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ if (drm_WARN_ON_ONCE(display->drm,
status_mask & PIPE_A_PSR_STATUS_VLV))
return 0;
/*
* On pipe B and C we don't support the PSR interrupt yet, on pipe
* A the same bit is for perf counters which we don't use either.
*/
- if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ if (drm_WARN_ON_ONCE(display->drm,
status_mask & PIPE_B_PSR_STATUS_VLV))
return 0;
@@ -217,7 +311,7 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
out:
- drm_WARN_ONCE(&dev_priv->drm,
+ drm_WARN_ONCE(display->drm,
enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
@@ -226,97 +320,91 @@ out:
return enable_mask;
}
-void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+void i915_enable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
- enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+ display->irq.pipestat_irq_mask[pipe] |= status_mask;
+ enable_mask = i915_pipestat_enable_mask(display, pipe);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
- intel_uncore_posting_read(&dev_priv->uncore, reg);
+ intel_de_write(display, reg, enable_mask | status_mask);
+ intel_de_posting_read(display, reg);
}
-void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+void i915_disable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
- enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+ display->irq.pipestat_irq_mask[pipe] &= ~status_mask;
+ enable_mask = i915_pipestat_enable_mask(display, pipe);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
- intel_uncore_posting_read(&dev_priv->uncore, reg);
+ intel_de_write(display, reg, enable_mask | status_mask);
+ intel_de_posting_read(display, reg);
}
static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_I85X(i915))
+ if (display->platform.i85x)
return true;
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
return true;
- return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
+ return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile;
}
-/**
- * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev_priv: i915 device private
- */
-void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
+/* enable ASLE pipestat for OpRegion */
+static void i915_enable_asle_pipestat(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (!intel_opregion_asle_present(display))
return;
if (!i915_has_legacy_blc_interrupt(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (DISPLAY_VER(dev_priv) >= 4)
- i915_enable_pipestat(dev_priv, PIPE_A,
+ i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+ if (DISPLAY_VER(display) >= 4)
+ i915_enable_pipestat(display, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
@@ -333,7 +421,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* don't trust that one either.
*/
if (pipe_crc->skipped <= 0 ||
- (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -346,20 +434,19 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4) {}
#endif
-static void flip_done_handler(struct drm_i915_private *i915,
+static void flip_done_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
- spin_lock(&i915->drm.event_lock);
+ spin_lock(&display->drm->event_lock);
if (crtc->flip_done_event) {
trace_intel_crtc_flip_done(crtc);
@@ -367,80 +454,77 @@ static void flip_done_handler(struct drm_i915_private *i915,
crtc->flip_done_event = NULL;
}
- spin_unlock(&i915->drm.event_lock);
+ spin_unlock(&display->drm->event_lock);
}
-static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void hsw_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_HSW(pipe)),
+ display_pipe_crc_irq_handler(display, pipe,
+ intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void ivb_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
+ display_pipe_crc_irq_handler(display, pipe,
+ intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
u32 res1, res2;
- if (DISPLAY_VER(dev_priv) >= 3)
- res1 = intel_uncore_read(&dev_priv->uncore,
- PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 3)
+ res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe));
else
res1 = 0;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = intel_uncore_read(&dev_priv->uncore,
- PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(dev_priv, pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
+ display_pipe_crc_irq_handler(display, pipe,
+ intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)),
res1, res2);
}
-static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+static void i9xx_pipestat_irq_reset(struct intel_display *display)
{
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
- intel_uncore_write(&dev_priv->uncore,
- PIPESTAT(dev_priv, pipe),
- PIPESTAT_INT_STATUS_MASK |
- PIPE_FIFO_UNDERRUN_STATUS);
+ for_each_pipe(display, pipe) {
+ intel_de_write(display,
+ PIPESTAT(display, pipe),
+ PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
+ display->irq.pipestat_irq_mask[pipe] = 0;
}
}
-void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+void i9xx_pipestat_irq_ack(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
enum pipe pipe;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->display.irq.vlv_display_irqs_enabled) {
- spin_unlock(&dev_priv->irq_lock);
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled) {
+ spin_unlock(&display->irq.lock);
return;
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
i915_reg_t reg;
u32 status_mask, enable_mask, iir_bit = 0;
@@ -468,14 +552,14 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
+ status_mask |= display->irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
- reg = PIPESTAT(dev_priv, pipe);
- pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
- enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+ reg = PIPESTAT(display, pipe);
+ pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
+ enable_mask = i915_pipestat_enable_mask(display, pipe);
/*
* Clear the PIPE*STAT regs before the IIR
@@ -487,57 +571,55 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
* an interrupt is still pending.
*/
if (pipe_stats[pipe]) {
- intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
+ intel_de_write(display, reg, pipe_stats[pipe]);
+ intel_de_write(display, reg, enable_mask);
}
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
-void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i915_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(display);
}
-void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i965_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -547,42 +629,40 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void valleyview_pipestat_irq_handler(struct intel_display *display,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
intel_gmbus_irq_handler(display);
}
-static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void ibx_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ drm_dbg(display->drm, "PCH audio power change on port %d\n",
port_name(port));
}
@@ -593,85 +673,130 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
+ drm_dbg(display->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
+ intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
+ intel_pch_fifo_underrun_irq_handler(display, PIPE_A);
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
+ intel_pch_fifo_underrun_irq_handler(display, PIPE_B);
+}
+
+static u32 ivb_err_int_pipe_fault_mask(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return ERR_INT_SPRITE_A_FAULT |
+ ERR_INT_PRIMARY_A_FAULT |
+ ERR_INT_CURSOR_A_FAULT;
+ case PIPE_B:
+ return ERR_INT_SPRITE_B_FAULT |
+ ERR_INT_PRIMARY_B_FAULT |
+ ERR_INT_CURSOR_B_FAULT;
+ case PIPE_C:
+ return ERR_INT_SPRITE_C_FAULT |
+ ERR_INT_PRIMARY_C_FAULT |
+ ERR_INT_CURSOR_C_FAULT;
+ default:
+ return 0;
+ }
}
-static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
+static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
+ { .fault = ERR_INT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = ERR_INT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = ERR_INT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = ERR_INT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = ERR_INT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = ERR_INT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = ERR_INT_SPRITE_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = ERR_INT_PRIMARY_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = ERR_INT_CURSOR_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static void ivb_err_int_handler(struct intel_display *display)
{
- u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
+ u32 err_int = intel_de_read(display, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
+
+ if (err_int & ERR_INT_INVALID_GTT_PTE)
+ drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
+
+ if (err_int & ERR_INT_INVALID_PTE_DATA)
+ drm_err_ratelimited(display->drm, "Invalid PTE data\n");
+
+ for_each_pipe(display, pipe) {
+ u32 fault_errors;
- for_each_pipe(dev_priv, pipe) {
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev_priv))
- ivb_pipe_crc_irq_handler(dev_priv, pipe);
+ if (display->platform.ivybridge)
+ ivb_pipe_crc_irq_handler(display, pipe);
else
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
}
+
+ fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
+ if (fault_errors)
+ intel_pipe_fault_irq_handler(display, ivb_pipe_fault_handlers,
+ pipe, fault_errors);
}
- intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
+ intel_de_write(display, GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
+static void cpt_serr_int_handler(struct intel_display *display)
{
- u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
+ u32 serr_int = intel_de_read(display, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
- intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_pch_fifo_underrun_irq_handler(display, pipe);
- intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
+ intel_de_write(display, SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void cpt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ drm_dbg(display->drm, "PCH audio power change on port %c\n",
port_name(port));
}
@@ -682,30 +807,79 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
+ drm_dbg(display->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
+ drm_dbg(display->drm, "Audio CP change interrupt\n");
if (pch_iir & SDE_FDI_MASK_CPT) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
+ intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev_priv);
+ cpt_serr_int_handler(display);
+}
+
+static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return GTT_FAULT_SPRITE_A_FAULT |
+ GTT_FAULT_PRIMARY_A_FAULT |
+ GTT_FAULT_CURSOR_A_FAULT;
+ case PIPE_B:
+ return GTT_FAULT_SPRITE_B_FAULT |
+ GTT_FAULT_PRIMARY_B_FAULT |
+ GTT_FAULT_CURSOR_B_FAULT;
+ default:
+ return 0;
+ }
+}
+
+static const struct pipe_fault_handler ilk_pipe_fault_handlers[] = {
+ { .fault = GTT_FAULT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = GTT_FAULT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = GTT_FAULT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = GTT_FAULT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = GTT_FAULT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = GTT_FAULT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static void ilk_gtt_fault_irq_handler(struct intel_display *display)
+{
+ enum pipe pipe;
+ u32 gtt_fault;
+
+ gtt_fault = intel_de_read(display, ILK_GTT_FAULT);
+ intel_de_write(display, ILK_GTT_FAULT, gtt_fault);
+
+ if (gtt_fault & GTT_FAULT_INVALID_GTT_PTE)
+ drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
+
+ if (gtt_fault & GTT_FAULT_INVALID_PTE_DATA)
+ drm_err_ratelimited(display->drm, "Invalid PTE data\n");
+
+ for_each_pipe(display, pipe) {
+ u32 fault_errors;
+
+ fault_errors = gtt_fault & ilk_gtt_fault_pipe_fault_mask(pipe);
+ if (fault_errors)
+ intel_pipe_fault_irq_handler(display, ilk_pipe_fault_handlers,
+ pipe, fault_errors);
+ }
}
-void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+static void _ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
intel_dp_aux_irq_handler(display);
@@ -714,60 +888,61 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
intel_opregion_asle_intr(display);
if (de_iir & DE_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
- for_each_pipe(dev_priv, pipe) {
+ if (de_iir & DE_GTT_FAULT)
+ ilk_gtt_fault_irq_handler(display);
+
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
+ u32 pch_iir = intel_de_read(display, SDEIIR);
- if (HAS_PCH_CPT(dev_priv))
- cpt_irq_handler(dev_priv, pch_iir);
+ if (HAS_PCH_CPT(display))
+ cpt_irq_handler(display, pch_iir);
else
- ibx_irq_handler(dev_priv, pch_iir);
+ ibx_irq_handler(display, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
- intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
+ intel_de_write(display, SDEIIR, pch_iir);
}
- if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
- gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
+ if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT)
+ ilk_display_rps_irq_handler(display);
}
-void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+static void _ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev_priv);
+ ivb_err_int_handler(display);
if (de_iir & DE_EDP_PSR_INT_HSW) {
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_iir;
- psr_iir = intel_uncore_rmw(&dev_priv->uncore,
- EDP_PSR_IIR, 0, 0);
+ psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0);
intel_psr_irq_handler(intel_dp, psr_iir);
break;
}
@@ -779,35 +954,82 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
}
/* check event from PCH */
- if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
+ if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) {
+ u32 pch_iir = intel_de_read(display, SDEIIR);
- cpt_irq_handler(dev_priv, pch_iir);
+ cpt_irq_handler(display, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
- intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
+ intel_de_write(display, SDEIIR, pch_iir);
+ }
+}
+
+void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier)
+{
+ /* disable master interrupt before clearing iir */
+ *de_ier = intel_de_read_fw(display, DEIER);
+ intel_de_write_fw(display, DEIER, *de_ier & ~DE_MASTER_IRQ_CONTROL);
+
+ /*
+ * Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will be stored on its back queue, and then we'll be able
+ * to process them after we restore SDEIER (as soon as we restore it,
+ * we'll get an interrupt if SDEIIR still has something to process due
+ * to its back queue).
+ */
+ if (!HAS_PCH_NOP(display)) {
+ *sde_ier = intel_de_read_fw(display, SDEIER);
+ intel_de_write_fw(display, SDEIER, 0);
+ } else {
+ *sde_ier = 0;
}
}
-static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier)
+{
+ intel_de_write_fw(display, DEIER, de_ier);
+
+ if (sde_ier)
+ intel_de_write_fw(display, SDEIER, sde_ier);
+}
+
+bool ilk_display_irq_handler(struct intel_display *display)
+{
+ u32 de_iir;
+ bool handled = false;
+
+ de_iir = intel_de_read_fw(display, DEIIR);
+ if (de_iir) {
+ intel_de_write_fw(display, DEIIR, de_iir);
+ if (DISPLAY_VER(display) >= 7)
+ _ivb_display_irq_handler(display, de_iir);
+ else
+ _ilk_display_irq_handler(display, de_iir);
+ handled = true;
+ }
+
+ return handled;
+}
+
+static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return 0;
- else if (DISPLAY_VER(dev_priv) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB;
- else if (DISPLAY_VER(dev_priv) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -817,7 +1039,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC2 |
TGL_DE_PORT_AUX_USBC3 |
TGL_DE_PORT_AUX_USBC4;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -829,12 +1051,12 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC6;
mask = GEN8_AUX_CHANNEL_A;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (DISPLAY_VER(dev_priv) == 11) {
+ if (DISPLAY_VER(display) == 11) {
mask |= ICL_AUX_CHANNEL_F;
mask |= ICL_AUX_CHANNEL_E;
}
@@ -842,11 +1064,17 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
return mask;
}
-static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (DISPLAY_VER(display) >= 14)
+ if (DISPLAY_VER(display) >= 20)
+ return MTL_PLANE_ATS_FAULT |
+ GEN9_PIPE_CURSOR_FAULT |
+ GEN11_PIPE_PLANE5_FAULT |
+ GEN9_PIPE_PLANE4_FAULT |
+ GEN9_PIPE_PLANE3_FAULT |
+ GEN9_PIPE_PLANE2_FAULT |
+ GEN9_PIPE_PLANE1_FAULT;
+ else if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
GEN12_PIPEDMC_FAULT |
@@ -856,7 +1084,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
+ else if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE5_FAULT |
@@ -895,15 +1123,116 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN8_PIPE_PRIMARY_FAULT;
}
-static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
+static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ drm_err_ratelimited(display->drm,
+ "[CRTC:%d:%s] PLANE ATS fault\n",
+ crtc->base.base.id, crtc->base.name);
+
+ return true;
+}
+
+static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ drm_err_ratelimited(display->drm,
+ "[CRTC:%d:%s] PIPEDMC ATS fault\n",
+ crtc->base.base.id, crtc->base.name);
+
+ return true;
+}
+
+static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ drm_err_ratelimited(display->drm,
+ "[CRTC:%d:%s] PIPEDMC fault\n",
+ crtc->base.base.id, crtc->base.name);
+
+ return true;
+}
+
+static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = {
+ { .fault = MTL_PLANE_ATS_FAULT, .handle = handle_plane_ats_fault, },
+ { .fault = MTL_PIPEDMC_ATS_FAULT, .handle = handle_pipedmc_ats_fault, },
+ { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, },
+ { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
+ { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
+ { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
+ { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
+ { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
+ { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler tgl_pipe_fault_handlers[] = {
+ { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, },
+ { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, },
+ { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, },
+ { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
+ { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
+ { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
+ { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
+ { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
+ { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler icl_pipe_fault_handlers[] = {
+ { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, },
+ { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, },
+ { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
+ { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
+ { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
+ { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
+ { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
+ { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler skl_pipe_fault_handlers[] = {
+ { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
+ { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
+ { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
+ { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
+ { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler bdw_pipe_fault_handlers[] = {
+ { .fault = GEN8_PIPE_SPRITE_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = GEN8_PIPE_PRIMARY_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = GEN8_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler *
+gen8_pipe_fault_handlers(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_pipe_fault_handlers;
+ else if (DISPLAY_VER(display) >= 12)
+ return tgl_pipe_fault_handlers;
+ else if (DISPLAY_VER(display) >= 11)
+ return icl_pipe_fault_handlers;
+ else if (DISPLAY_VER(display) >= 9)
+ return skl_pipe_fault_handlers;
+ else
+ return bdw_pipe_fault_handlers;
+}
+
+static void intel_pmdemand_irq_handler(struct intel_display *display)
{
- wake_up_all(&dev_priv->display.pmdemand.waitqueue);
+ wake_up_all(&display->pmdemand.waitqueue);
}
static void
-gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+gen8_de_misc_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &dev_priv->display;
bool found = false;
if (HAS_DBUF_OVERLAP_DETECTION(display)) {
@@ -913,21 +1242,20 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (iir & (XELPDP_PMDEMAND_RSP |
XELPDP_PMDEMAND_RSPTOUT_ERR)) {
if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Error waiting for Punit PM Demand Response\n");
- intel_pmdemand_irq_handler(dev_priv);
+ intel_pmdemand_irq_handler(display);
found = true;
}
if (iir & XELPDP_RM_TIMEOUT) {
- u32 val = intel_uncore_read(&dev_priv->uncore,
- RM_TIMEOUT_REG_CAPTURE);
- drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
+ u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
+ drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val);
found = true;
}
} else if (iir & GEN8_DE_MISC_GSE) {
@@ -940,16 +1268,16 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 psr_iir;
i915_reg_t iir_reg;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- iir_reg = TRANS_PSR_IIR(dev_priv,
- intel_dp->psr.transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ iir_reg = TRANS_PSR_IIR(display,
+ intel_dp->psr.transcoder);
else
iir_reg = EDP_PSR_IIR;
- psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
+ psr_iir = intel_de_rmw(display, iir_reg, 0, 0);
if (psr_iir)
found = true;
@@ -957,16 +1285,16 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_psr_irq_handler(intel_dp, psr_iir);
/* prior GEN12 only have one EDP PSR */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
break;
}
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
+ drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
-static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
+static void gen11_dsi_te_interrupt_handler(struct intel_display *display,
u32 te_trigger)
{
enum pipe pipe = INVALID_PIPE;
@@ -978,8 +1306,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = intel_uncore_read(&dev_priv->uncore,
- TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -991,17 +1318,16 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
/* Check if DSI configured in command mode */
- val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
+ val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
- drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
+ drm_err(display->drm, "DSI trancoder not configured in command mode\n");
return;
}
/* Get PIPE for handling VBLANK event */
- val = intel_uncore_read(&dev_priv->uncore,
- TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -1013,31 +1339,31 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
pipe = PIPE_C;
break;
default:
- drm_err(&dev_priv->drm, "Invalid PIPE\n");
+ drm_err(display->drm, "Invalid PIPE\n");
return;
}
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
+ intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
}
-static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_PLANE1_FLIP_DONE;
else
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
-static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
+static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir)
{
u32 pica_ier = 0;
*pica_iir = 0;
- *pch_iir = intel_de_read(i915, SDEIIR);
+ *pch_iir = intel_de_read(display, SDEIIR);
if (!*pch_iir)
return;
@@ -1047,148 +1373,150 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
- drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
+ drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL);
- pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
- *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
- intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
+ pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
+ *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
+ intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir);
}
- intel_de_write(i915, SDEIIR, *pch_iir);
+ intel_de_write(display, SDEIIR, *pch_iir);
if (pica_ier)
- intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
+ intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
}
-void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
{
- struct intel_display *display = &dev_priv->display;
u32 iir;
enum pipe pipe;
- drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
+ drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display));
if (master_ctl & GEN8_DE_MISC_IRQ) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
+ iir = intel_de_read(display, GEN8_DE_MISC_IIR);
if (iir) {
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
- gen8_de_misc_irq_handler(dev_priv, iir);
+ intel_de_write(display, GEN8_DE_MISC_IIR, iir);
+ gen8_de_misc_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE MISC)!\n");
}
}
- if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
+ if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ iir = intel_de_read(display, GEN11_DE_HPD_IIR);
if (iir) {
- intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
- gen11_hpd_irq_handler(dev_priv, iir);
+ intel_de_write(display, GEN11_DE_HPD_IIR, iir);
+ gen11_hpd_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied, (DE HPD)!\n");
}
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
+ iir = intel_de_read(display, GEN8_DE_PORT_IIR);
if (iir) {
bool found = false;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
+ intel_de_write(display, GEN8_DE_PORT_IIR, iir);
- if (iir & gen8_de_port_aux_mask(dev_priv)) {
+ if (iir & gen8_de_port_aux_mask(display)) {
intel_dp_aux_irq_handler(display);
found = true;
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
+ bxt_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
}
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
(iir & BXT_DE_PORT_GMBUS)) {
intel_gmbus_irq_handler(display);
found = true;
}
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
if (te_trigger) {
- gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
+ gen11_dsi_te_interrupt_handler(display, te_trigger);
found = true;
}
}
if (!found)
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"Unexpected DE Port interrupt\n");
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PORT)!\n");
}
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
+ iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- drm_err_ratelimited(&dev_priv->drm,
- "The master control interrupt lied (DE PIPE)!\n");
+ drm_err_ratelimited(display->drm,
+ "The master control interrupt lied (DE PIPE %c)!\n",
+ pipe_name(pipe));
continue;
}
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
+ intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
- if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
- flip_done_handler(dev_priv, pipe);
+ if (iir & gen8_de_pipe_flip_done_mask(display))
+ flip_done_handler(display, pipe);
- if (HAS_DSB(dev_priv)) {
+ if (HAS_DSB(display)) {
if (iir & GEN12_DSB_INT(INTEL_DSB_0))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_0);
if (iir & GEN12_DSB_INT(INTEL_DSB_1))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_1);
if (iir & GEN12_DSB_INT(INTEL_DSB_2))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
}
+ if (HAS_PIPEDMC(display) && iir & GEN12_PIPEDMC_INTERRUPT)
+ intel_pipedmc_irq_handler(display, pipe);
+
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
- fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
+ fault_errors = iir & gen8_de_pipe_fault_mask(display);
if (fault_errors)
- drm_err_ratelimited(&dev_priv->drm,
- "Fault errors on pipe %c: 0x%08x\n",
- pipe_name(pipe),
- fault_errors);
+ intel_pipe_fault_irq_handler(display,
+ gen8_pipe_fault_handlers(display),
+ pipe, fault_errors);
}
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
+ if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) &&
master_ctl & GEN8_DE_PCH_IRQ) {
u32 pica_iir;
@@ -1197,71 +1525,73 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
+ gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir);
if (iir) {
if (pica_iir)
- xelpdp_pica_irq_handler(dev_priv, pica_iir);
+ xelpdp_pica_irq_handler(display, pica_iir);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- spt_irq_handler(dev_priv, iir);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_handler(display, iir);
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ spt_irq_handler(display, iir);
else
- cpt_irq_handler(dev_priv, iir);
+ cpt_irq_handler(display, iir);
} else {
/*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"The master control interrupt lied (SDE)!\n");
}
}
}
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
{
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
return 0;
- iir = intel_de_read(i915, GEN11_GU_MISC_IIR);
+ intel_display_rpm_assert_block(display);
+
+ iir = intel_de_read(display, GEN11_GU_MISC_IIR);
if (likely(iir))
- intel_de_write(i915, GEN11_GU_MISC_IIR, iir);
+ intel_de_write(display, GEN11_GU_MISC_IIR, iir);
+
+ intel_display_rpm_assert_unblock(display);
return iir;
}
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir)
{
- struct intel_display *display = &i915->display;
-
if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(display);
}
-void gen11_display_irq_handler(struct drm_i915_private *i915)
+void gen11_display_irq_handler(struct intel_display *display)
{
u32 disp_ctl;
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_block(display);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
*/
- disp_ctl = intel_de_read(i915, GEN11_DISPLAY_INT_CTL);
+ disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
- intel_de_write(i915, GEN11_DISPLAY_INT_CTL, 0);
- gen8_de_irq_handler(i915, disp_ctl);
- intel_de_write(i915, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
+ gen8_de_irq_handler(display, disp_ctl);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_unblock(display);
}
-static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
{
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
/*
* Vblank/CRC interrupts fail to wake the device up from C2+.
@@ -1269,114 +1599,116 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
* the problem. There is a small power cost so we do this
* only when vblank/CRC interrupts are actually enabled.
*/
- if (i915->display.irq.vblank_enabled++ == 0)
- intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ if (display->irq.vblank_enabled++ == 0)
+ intel_de_write(display, SCPD0,
+ _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
{
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
- if (--i915->display.irq.vblank_enabled == 0)
- intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ if (--display->irq.vblank_enabled == 0)
+ intel_de_write(display, SCPD0,
+ _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
{
- spin_lock_irq(&i915->drm.vblank_time_lock);
+ spin_lock_irq(&display->drm->vblank_time_lock);
if (enable)
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
else
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
- spin_unlock_irq(&i915->drm.vblank_time_lock);
+ spin_unlock_irq(&display->drm->vblank_time_lock);
}
int i8xx_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i8xx_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
return i8xx_enable_vblank(crtc);
}
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
i8xx_disable_vblank(crtc);
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
}
int i965_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i965_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int ilk_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_enable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even though there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(crtc);
return 0;
@@ -1384,21 +1716,21 @@ int ilk_enable_vblank(struct drm_crtc *crtc)
void ilk_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_disable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_display *display = to_intel_display(intel_crtc);
enum port port;
if (!(intel_crtc->mode_flags &
@@ -1411,52 +1743,43 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
else
port = PORT_A;
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
- enable ? 0 : DSI_TE_EVENT);
+ intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT);
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
+ intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
return true;
}
-static void intel_display_vblank_dc_work(struct work_struct *work)
+static void intel_display_vblank_notify_work(struct work_struct *work)
{
struct intel_display *display =
- container_of(work, typeof(*display), irq.vblank_dc_work);
- int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
+ container_of(work, typeof(*display), irq.vblank_notify_work);
+ int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count);
- /*
- * NOTE: intel_display_power_set_target_dc_state is used only by PSR
- * code for DC3CO handling. DC3CO target state is currently disabled in
- * PSR code. If DC3CO is taken into use we need take that into account
- * here as well.
- */
- intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
- DC_STATE_EN_UPTO_DC6);
+ intel_psr_notify_vblank_enable_disable(display, vblank_enable_count);
}
int bdw_enable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, true))
return 0;
- if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0)
+ schedule_work(&display->irq.vblank_notify_work);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even if there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated, so check only for PSR.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(&crtc->base);
return 0;
@@ -1466,201 +1789,407 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, false))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
+
+ if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0)
+ schedule_work(&display->irq.vblank_notify_work);
+}
+
+static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return SPRITEB_INVALID_GTT_STATUS |
+ SPRITEA_INVALID_GTT_STATUS |
+ PLANEA_INVALID_GTT_STATUS |
+ CURSORA_INVALID_GTT_STATUS;
+ case PIPE_B:
+ return SPRITED_INVALID_GTT_STATUS |
+ SPRITEC_INVALID_GTT_STATUS |
+ PLANEB_INVALID_GTT_STATUS |
+ CURSORB_INVALID_GTT_STATUS;
+ case PIPE_C:
+ return SPRITEF_INVALID_GTT_STATUS |
+ SPRITEE_INVALID_GTT_STATUS |
+ PLANEC_INVALID_GTT_STATUS |
+ CURSORC_INVALID_GTT_STATUS;
+ default:
+ return 0;
+ }
+}
+
+static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = {
+ { .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static void vlv_page_table_error_irq_ack(struct intel_display *display, u32 *dpinvgtt)
+{
+ u32 status, enable, tmp;
+
+ tmp = intel_de_read(display, DPINVGTT);
+
+ enable = tmp >> 16;
+ status = tmp & 0xffff;
- if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ /*
+ * Despite what the docs claim, the status bits seem to get
+ * stuck permanently (similar the old PGTBL_ER register), so
+ * we have to disable and ignore them once set. They do get
+ * reset if the display power well goes down, so no need to
+ * track the enable mask explicitly.
+ */
+ *dpinvgtt = status & enable;
+ enable &= ~status;
+
+ /* customary ack+disable then re-enable to guarantee an edge */
+ intel_de_write(display, DPINVGTT, status);
+ intel_de_write(display, DPINVGTT, enable << 16);
}
-static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void vlv_page_table_error_irq_handler(struct intel_display *display, u32 dpinvgtt)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ enum pipe pipe;
- if (IS_CHERRYVIEW(dev_priv))
- intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+ for_each_pipe(display, pipe) {
+ u32 fault_errors;
+
+ fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe);
+ if (fault_errors)
+ intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers,
+ pipe, fault_errors);
+ }
+}
+
+void vlv_display_error_irq_ack(struct intel_display *display,
+ u32 *eir, u32 *dpinvgtt)
+{
+ u32 emr;
+
+ *eir = intel_de_read(display, VLV_EIR);
+
+ if (*eir & VLV_ERROR_PAGE_TABLE)
+ vlv_page_table_error_irq_ack(display, dpinvgtt);
+
+ intel_de_write(display, VLV_EIR, *eir);
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits.
+ */
+ emr = intel_de_read(display, VLV_EMR);
+ intel_de_write(display, VLV_EMR, 0xffffffff);
+ intel_de_write(display, VLV_EMR, emr);
+}
+
+void vlv_display_error_irq_handler(struct intel_display *display,
+ u32 eir, u32 dpinvgtt)
+{
+ drm_dbg(display->drm, "Master Error, EIR 0x%08x\n", eir);
+
+ if (eir & VLV_ERROR_PAGE_TABLE)
+ vlv_page_table_error_irq_handler(display, dpinvgtt);
+}
+
+static void _vlv_display_irq_reset(struct intel_display *display)
+{
+ if (display->platform.cherryview)
+ intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
- intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
+ intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
- i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ gen2_error_reset(to_intel_uncore(display->drm),
+ VLV_ERROR_REGS);
- i9xx_pipestat_irq_reset(dev_priv);
+ i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
- gen2_irq_reset(uncore, VLV_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ i9xx_pipestat_irq_reset(display);
+
+ intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
+ display->irq.vlv_imr_mask = ~0u;
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_reset(struct intel_display *display)
{
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- _vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void i9xx_display_irq_reset(struct drm_i915_private *i915)
+void i9xx_display_irq_reset(struct intel_display *display)
{
- if (I915_HAS_HOTPLUG(i915)) {
- i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
- intel_uncore_rmw(&i915->uncore,
- PORT_HOTPLUG_STAT(i915), 0, 0);
+ if (HAS_HOTPLUG(display)) {
+ i915_hotplug_interrupt_update(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
}
- i9xx_pipestat_irq_reset(i915);
+ i9xx_pipestat_irq_reset(display);
+}
+
+u32 i9xx_display_irq_enable_mask(struct intel_display *display)
+{
+ u32 enable_mask;
+
+ enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+
+ if (DISPLAY_VER(display) >= 3)
+ enable_mask |= I915_ASLE_INTERRUPT;
+
+ if (HAS_HOTPLUG(display))
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+
+ return enable_mask;
+}
+
+void i915_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
}
-void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+void i965_display_irq_postinstall(struct intel_display *display)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
+}
+static u32 vlv_error_mask(void)
+{
+ /* TODO enable other errors too? */
+ return VLV_ERROR_PAGE_TABLE;
+}
+
+static void _vlv_display_irq_postinstall(struct intel_display *display)
+{
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ if (display->platform.cherryview)
+ intel_de_write(display, DPINVGTT,
+ DPINVGTT_STATUS_MASK_CHV |
+ DPINVGTT_EN_MASK_CHV);
+ else
+ intel_de_write(display, DPINVGTT,
+ DPINVGTT_STATUS_MASK_VLV |
+ DPINVGTT_EN_MASK_VLV);
+
+ gen2_error_init(to_intel_uncore(display->drm),
+ VLV_ERROR_REGS, ~vlv_error_mask());
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(display, pipe)
+ i915_enable_pipestat(display, pipe, pipestat_mask);
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT;
+ I915_LPE_PIPE_B_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, display->irq.vlv_imr_mask != ~0u);
- dev_priv->irq_mask = ~enable_mask;
+ display->irq.vlv_imr_mask = ~enable_mask;
- gen2_irq_init(uncore, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ intel_display_irq_regs_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask);
}
-void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_postinstall(struct intel_display *display)
+{
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_postinstall(display);
+ spin_unlock_irq(&display->irq.lock);
+}
+
+static void ibx_display_irq_reset(struct intel_display *display)
+{
+ if (HAS_PCH_NOP(display))
+ return;
+
+ gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
+
+ if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
+ intel_de_write(display, SERR_INT, 0xffffffff);
+}
+
+void ilk_display_irq_reset(struct intel_display *display)
+{
+ struct intel_uncore *uncore = to_intel_uncore(display->drm);
+
+ gen2_irq_reset(uncore, DE_IRQ_REGS);
+ display->irq.ilk_de_imr_mask = ~0u;
+
+ if (DISPLAY_VER(display) == 7)
+ intel_de_write(display, GEN7_ERR_INT, 0xffffffff);
+
+ if (display->platform.haswell) {
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
+ }
+
+ ibx_display_irq_reset(display);
+}
+
+void gen8_display_irq_reset(struct intel_display *display)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
- if (intel_display_power_is_enabled(dev_priv,
+ for_each_pipe(display, pipe)
+ if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
- gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
+
+ if (HAS_PCH_SPLIT(display))
+ ibx_display_irq_reset(display);
}
-void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
+void gen11_display_irq_reset(struct intel_display *display)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
- if (!intel_display_power_is_enabled(dev_priv, domain))
+ if (!intel_display_power_is_enabled(display, domain))
continue;
- intel_uncore_write(uncore,
- TRANS_PSR_IMR(dev_priv, trans),
- 0xffffffff);
- intel_uncore_write(uncore,
- TRANS_PSR_IIR(dev_priv, trans),
- 0xffffffff);
+ intel_de_write(display,
+ TRANS_PSR_IMR(display, trans),
+ 0xffffffff);
+ intel_de_write(display,
+ TRANS_PSR_IIR(display, trans),
+ 0xffffffff);
}
} else {
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
}
- for_each_pipe(dev_priv, pipe)
- if (intel_display_power_is_enabled(dev_priv,
+ for_each_pipe(display, pipe)
+ if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
- gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (DISPLAY_VER(dev_priv) >= 14)
- gen2_irq_reset(uncore, PICAINTERRUPT_IRQ_REGS);
+ if (DISPLAY_VER(display) >= 14)
+ intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
else
- gen2_irq_reset(uncore, GEN11_DE_HPD_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- gen2_irq_reset(uncore, SDE_IRQ_REGS);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_post_enable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ for_each_pipe_masked(display, pipe, pipe_mask)
+ intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
+ display->irq.de_pipe_imr_mask[pipe],
+ ~display->irq.de_pipe_imr_mask[pipe] | extra_ier);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_pre_disable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ for_each_pipe_masked(display, pipe, pipe_mask)
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -1677,58 +2206,65 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
* to avoid races with the irq handler, assuming we have MSI. Shared legacy
* interrupts could still race.
*/
-static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct intel_display *display)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
u32 mask;
- if (HAS_PCH_NOP(dev_priv))
+ if (HAS_PCH_NOP(display))
return;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
- else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
+ else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
else
mask = SDE_GMBUS_CPT;
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_enable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
+
+ if (display->irq.vlv_display_irqs_enabled)
+ goto out;
- dev_priv->display.irq.vlv_display_irqs_enabled = true;
+ display->irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- _vlv_display_irq_reset(dev_priv);
- vlv_display_irq_postinstall(dev_priv);
+ _vlv_display_irq_reset(display);
+ _vlv_display_irq_postinstall(display);
}
+
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_disable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
+
+ if (!display->irq.vlv_display_irqs_enabled)
+ goto out;
- dev_priv->display.irq.vlv_display_irqs_enabled = false;
+ display->irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- _vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(display);
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct intel_uncore *uncore = &i915->uncore;
u32 display_mask, extra_mask;
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -1738,7 +2274,8 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
DE_DP_A_HOTPLUG_IVB);
} else {
- display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE |
+ DE_PCH_EVENT | DE_GTT_FAULT |
DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
DE_PIPEA_CRC_DONE | DE_POISON);
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
@@ -1748,60 +2285,57 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
DE_DP_A_HOTPLUG);
}
- if (IS_HASWELL(i915)) {
- gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ if (display->platform.haswell) {
+ intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
- if (IS_IRONLAKE_M(i915))
+ if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
- i915->irq_mask = ~display_mask;
+ display->irq.ilk_de_imr_mask = ~display_mask;
- ibx_irq_postinstall(i915);
+ ibx_irq_postinstall(display);
- gen2_irq_init(uncore, DE_IRQ_REGS, i915->irq_mask,
- display_mask | extra_mask);
+ intel_display_irq_regs_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask,
+ display_mask | extra_mask);
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915);
-static void icp_irq_postinstall(struct drm_i915_private *i915);
+static void mtp_irq_postinstall(struct intel_display *display);
+static void icp_irq_postinstall(struct intel_display *display);
-void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen8_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+ u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
- u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
+ u32 de_port_masked = gen8_de_port_aux_mask(display);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VER(dev_priv) >= 14)
- mtp_irq_postinstall(dev_priv);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
- else if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev_priv);
+ if (DISPLAY_VER(display) >= 14)
+ mtp_irq_postinstall(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_postinstall(display);
+ else if (HAS_PCH_SPLIT(display))
+ ibx_irq_postinstall(display);
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_masked |= BXT_DE_PORT_GMBUS;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum port port;
if (intel_bios_is_dsi_present(display, &port))
@@ -1811,110 +2345,142 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_DBUF_OVERLAP_DETECTION(display))
de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
- if (HAS_DSB(dev_priv))
+ if (HAS_DSB(display))
de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
+ /* TODO figure PIPEDMC interrupts for pre-LNL */
+ if (DISPLAY_VER(display) >= 20)
+ de_pipe_masked |= GEN12_PIPEDMC_INTERRUPT;
+
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
de_port_enables = de_port_masked;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
- else if (IS_BROADWELL(dev_priv))
+ else if (display->platform.broadwell)
de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
- if (!intel_display_power_is_enabled(dev_priv, domain))
+ if (!intel_display_power_is_enabled(display, domain))
continue;
- gen2_assert_iir_is_zero(uncore,
- TRANS_PSR_IIR(dev_priv, trans));
+ intel_display_irq_regs_assert_irr_is_zero(display,
+ TRANS_PSR_IIR(display, trans));
}
} else {
- gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
}
- for_each_pipe(dev_priv, pipe) {
- dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ for_each_pipe(display, pipe) {
+ display->irq.de_pipe_imr_mask[pipe] = ~de_pipe_masked;
- if (intel_display_power_is_enabled(dev_priv,
+ if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- de_pipe_enables);
+ intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
+ display->irq.de_pipe_imr_mask[pipe],
+ de_pipe_enables);
}
- gen2_irq_init(uncore, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables);
- gen2_irq_init(uncore, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked);
+ intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked,
+ de_port_enables);
+ intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
+ de_misc_masked);
- if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
+ if (IS_DISPLAY_VER(display, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
- gen2_irq_init(uncore, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
- de_hpd_enables);
+ intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
+ de_hpd_enables);
}
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct intel_display *display)
{
- struct intel_uncore *uncore = &i915->uncore;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
XELPDP_TBT_HOTPLUG_MASK;
- gen2_irq_init(uncore, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
- de_hpd_enables);
+ intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
+ de_hpd_enables);
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
-static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct intel_display *display)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
u32 mask = SDE_GMBUS_ICP;
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen11_de_irq_postinstall(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
- intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+void dg1_de_irq_postinstall(struct intel_display *display)
{
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(i915);
- intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
+ gen8_de_irq_postinstall(display);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+}
+
+void intel_display_irq_init(struct intel_display *display)
+{
+ spin_lock_init(&display->irq.lock);
+
+ display->drm->vblank_disable_immediate = true;
+
+ intel_hotplug_irq_init(display);
+
+ INIT_WORK(&display->irq.vblank_notify_work,
+ intel_display_vblank_notify_work);
}
-void intel_display_irq_init(struct drm_i915_private *i915)
+struct intel_display_irq_snapshot {
+ u32 derrmr;
+};
+
+struct intel_display_irq_snapshot *
+intel_display_irq_snapshot_capture(struct intel_display *display)
{
- i915->drm.vblank_disable_immediate = true;
+ struct intel_display_irq_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
- intel_hotplug_irq_init(i915);
+ if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display))
+ snapshot->derrmr = intel_de_read(display, DERRMR);
+
+ return snapshot;
+}
+
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
- INIT_WORK(&i915->display.irq.vblank_dc_work,
- intel_display_vblank_dc_work);
+ drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index b077712b7be1..84acd31948cf 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -11,28 +11,30 @@
#include "intel_display_limits.h"
enum pipe;
-struct drm_i915_private;
struct drm_crtc;
+struct drm_printer;
+struct intel_display;
+struct intel_display_irq_snapshot;
-void valleyview_enable_display_irqs(struct drm_i915_private *i915);
-void valleyview_disable_display_irqs(struct drm_i915_private *i915);
+void valleyview_enable_display_irqs(struct intel_display *display);
+void valleyview_disable_display_irqs(struct intel_display *display);
-void ilk_update_display_irq(struct drm_i915_private *i915,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits);
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits);
+void ilk_enable_display_irq(struct intel_display *display, u32 bits);
+void ilk_disable_display_irq(struct intel_display *display, u32 bits);
-void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask);
-void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
-void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask);
+void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
+void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
-void ibx_display_interrupt_update(struct drm_i915_private *i915,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits);
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits);
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask);
+void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask);
+void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
@@ -45,38 +47,48 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl);
-void gen11_display_irq_handler(struct drm_i915_private *i915);
+void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier);
+void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier);
+bool ilk_display_irq_handler(struct intel_display *display);
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
+void gen11_display_irq_handler(struct intel_display *display);
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl);
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir);
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
-void i9xx_display_irq_reset(struct drm_i915_private *i915);
-void vlv_display_irq_reset(struct drm_i915_private *i915);
-void gen8_display_irq_reset(struct drm_i915_private *i915);
-void gen11_display_irq_reset(struct drm_i915_private *i915);
+void i9xx_display_irq_reset(struct intel_display *display);
+void ilk_display_irq_reset(struct intel_display *display);
+void vlv_display_irq_reset(struct intel_display *display);
+void gen8_display_irq_reset(struct intel_display *display);
+void gen11_display_irq_reset(struct intel_display *display);
-void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void ilk_de_irq_postinstall(struct drm_i915_private *i915);
-void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void gen11_de_irq_postinstall(struct drm_i915_private *i915);
-void dg1_de_irq_postinstall(struct drm_i915_private *i915);
+u32 i9xx_display_irq_enable_mask(struct intel_display *display);
+void i915_display_irq_postinstall(struct intel_display *display);
+void i965_display_irq_postinstall(struct intel_display *display);
+void vlv_display_irq_postinstall(struct intel_display *display);
+void ilk_de_irq_postinstall(struct intel_display *display);
+void gen8_de_irq_postinstall(struct intel_display *display);
+void gen11_de_irq_postinstall(struct intel_display *display);
+void dg1_de_irq_postinstall(struct intel_display *display);
-u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe);
-void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_enable_asle_pipestat(struct drm_i915_private *i915);
+u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe);
+void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
+void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
-void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
+void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]);
-void intel_display_irq_init(struct drm_i915_private *i915);
+void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt);
+void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt);
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
+void intel_display_irq_init(struct intel_display *display);
+
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable);
+
+struct intel_display_irq_snapshot *intel_display_irq_snapshot_capture(struct intel_display *display);
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot, struct drm_printer *p);
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_jiffies.h b/drivers/gpu/drm/i915/display/intel_display_jiffies.h
new file mode 100644
index 000000000000..c060c567e262
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_jiffies.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_JIFFIES_H__
+#define __INTEL_DISPLAY_JIFFIES_H__
+
+#include <linux/jiffies.h>
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
+
+#endif /* __INTEL_DISPLAY_JIFFIES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
index f0fa27e365ab..cb3c9c665c44 100644
--- a/drivers/gpu/drm/i915/display/intel_display_limits.h
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -138,4 +138,13 @@ enum hpd_pin {
HPD_NUM_PINS
};
+enum intel_color_block {
+ INTEL_PLANE_CB_PRE_CSC_LUT,
+ INTEL_PLANE_CB_CSC,
+ INTEL_PLANE_CB_POST_CSC_LUT,
+ INTEL_PLANE_CB_3DLUT,
+
+ INTEL_CB_MAX
+};
+
#endif /* __INTEL_DISPLAY_LIMITS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index dc666aefa362..2aed110c5b09 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -3,8 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string_choices.h>
+
+#include <drm/drm_print.h>
+
#include "intel_display_params.h"
-#include "i915_drv.h"
#define intel_display_param_named(name, T, perm, desc) \
module_param_named(name, intel_display_modparams.name, T, perm); \
@@ -57,6 +62,9 @@ intel_display_param_named_unsafe(enable_dpt, bool, 0400,
intel_display_param_named_unsafe(enable_dsb, bool, 0400,
"Enable display state buffer (DSB) (default: true)");
+intel_display_param_named_unsafe(enable_flipq, bool, 0400,
+ "Enable DMC flip queue (default: false)");
+
intel_display_param_named_unsafe(enable_sagv, bool, 0400,
"Enable system agent voltage/frequency scaling (SAGV) (default: true)");
@@ -112,6 +120,9 @@ intel_display_param_named_unsafe(enable_psr, int, 0400,
"(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
"Default: -1 (use per-chip default)");
+intel_display_param_named_unsafe(enable_panel_replay, int, 0400,
+ "Enable Panel Replay (0=disabled, 1=enabled). Default: -1 (use per-chip default)");
+
intel_display_param_named(psr_safest_params, bool, 0400,
"Replace PSR VBT parameters by the safest and not optimal ones. This "
"is helpful to detect if PSR issues are related to bad values set in "
@@ -125,7 +136,7 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
intel_display_param_named_unsafe(enable_dmc_wl, int, 0400,
"Enable DMC wakelock "
- "(-1=use per-chip default, 0=disabled, 1=enabled) "
+ "(-1=use per-chip default, 0=disabled, 1=enabled, 2=match any register, 3=always locked) "
"Default: -1");
__maybe_unused
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 5317138e6044..b01bc5700c52 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -31,6 +31,7 @@ struct drm_printer;
param(int, enable_dc, -1, 0400) \
param(bool, enable_dpt, true, 0400) \
param(bool, enable_dsb, true, 0600) \
+ param(bool, enable_flipq, false, 0600) \
param(bool, enable_sagv, true, 0600) \
param(int, disable_power_well, -1, 0400) \
param(bool, enable_ips, true, 0600) \
@@ -45,6 +46,7 @@ struct drm_printer;
param(bool, enable_dp_mst, true, 0600) \
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
+ param(int, enable_panel_replay, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
param(int, enable_dmc_wl, -1, 0400) \
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 34465d56def0..2a4cc1dcc293 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,8 +3,13 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/iopoll.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -16,7 +21,10 @@
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
@@ -204,7 +212,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(display->drm->dev))
+ if (intel_display_rpm_suspended(display))
return false;
is_enabled = true;
@@ -224,7 +232,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
/**
* intel_display_power_is_enabled - check for a power domain
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to check
*
* This function can be used to check the hw power domain state. It is mostly
@@ -239,10 +247,9 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
* Returns:
* True when the power domain is enabled, false otherwise.
*/
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
bool ret;
@@ -323,6 +330,35 @@ unlock:
mutex_unlock(&power_domains->lock);
}
+/**
+ * intel_display_power_get_current_dc_state - Set target dc state.
+ * @display: display device
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+u32 intel_display_power_get_current_dc_state(struct intel_display *display)
+{
+ struct i915_power_well *power_well;
+ struct i915_power_domains *power_domains = &display->power.domains;
+ u32 current_dc_state = DC_STATE_DISABLE;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
+
+ if (drm_WARN_ON(display->drm, !power_well))
+ goto unlock;
+
+ current_dc_state = intel_power_well_is_enabled(display, power_well) ?
+ DC_STATE_DISABLE : power_domains->target_dc_state;
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+
+ return current_dc_state;
+}
+
static void __async_put_domains_mask(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
@@ -456,7 +492,6 @@ static bool
intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -474,8 +509,8 @@ intel_display_power_grab_async_put_ref(struct intel_display *display,
goto out_verify;
cancel_async_put_work(power_domains, false);
- intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
- fetch_and_zero(&power_domains->async_put_wakeref));
+ intel_display_rpm_put_raw(display,
+ fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
verify_async_put_domains_state(power_domains);
@@ -500,7 +535,7 @@ __intel_display_power_get_domain(struct intel_display *display,
/**
* intel_display_power_get - grab a power domain reference
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
*
* This function grabs a power domain reference for @domain and ensures that the
@@ -510,12 +545,13 @@ __intel_display_power_get_domain(struct intel_display *display,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ struct ref_tracker *wakeref;
+
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(display, domain);
@@ -526,7 +562,7 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
/**
* intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
*
* This function grabs a power domain reference for @domain and ensures that the
@@ -537,15 +573,14 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
* call to intel_display_power_put() to release the reference again.
*/
intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get_if_in_use(display);
if (!wakeref)
return NULL;
@@ -561,7 +596,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
if (!is_enabled) {
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
wakeref = NULL;
}
@@ -624,12 +659,10 @@ release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_display *display = container_of(power_domains,
struct intel_display,
power.domains);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get_noresume(rpm);
+ wakeref = intel_display_rpm_get_noresume(display);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
@@ -637,7 +670,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
__intel_display_power_put_domain(display, domain);
}
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static void
@@ -645,11 +678,10 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct intel_display *display = container_of(work, struct intel_display,
power.domains.async_put_work.work);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
- intel_wakeref_t old_work_wakeref = NULL;
+ struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL;
+
+ new_work_wakeref = intel_display_rpm_get_raw(display);
mutex_lock(&power_domains->lock);
@@ -689,14 +721,14 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
- intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ intel_display_rpm_put_raw(display, old_work_wakeref);
if (new_work_wakeref)
- intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+ intel_display_rpm_put_raw(display, new_work_wakeref);
}
/**
* __intel_display_power_put_async - release a power domain reference asynchronously
- * @i915: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
* @wakeref: wakeref acquired for the reference that is being released
* @delay_ms: delay of powering down the power domain
@@ -707,15 +739,15 @@ out_verify:
* The power down is delayed by @delay_ms if this is >= 0, or by a default
* 100 ms otherwise.
*/
-void __intel_display_power_put_async(struct drm_i915_private *i915,
+void __intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct intel_display *display = &i915->display;
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+ struct ref_tracker *work_wakeref;
+
+ work_wakeref = intel_display_rpm_get_raw(display);
delay_ms = delay_ms >= 0 ? delay_ms : 100;
@@ -747,14 +779,14 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(rpm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
* intel_display_power_flush_work - flushes the async display power disabling work
- * @i915: i915 device instance
+ * @display: display device instance
*
* Flushes any pending work that was scheduled by a preceding
* intel_display_power_put_async() call, completing the disabling of the
@@ -764,9 +796,8 @@ out_verify:
* function returns; to ensure that the work handler isn't running use
* intel_display_power_flush_work_sync() instead.
*/
-void intel_display_power_flush_work(struct drm_i915_private *i915)
+void intel_display_power_flush_work(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -787,7 +818,7 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
}
/**
@@ -800,10 +831,9 @@ out_verify:
static void
intel_display_power_flush_work_sync(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
cancel_async_put_work(power_domains, true);
verify_async_put_domains_state(power_domains);
@@ -814,7 +844,7 @@ intel_display_power_flush_work_sync(struct intel_display *display)
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
/**
* intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
* @wakeref: wakeref acquired for the reference that is being released
*
@@ -822,19 +852,17 @@ intel_display_power_flush_work_sync(struct intel_display *display)
* intel_display_power_get() and might power down the corresponding hardware
* block right away if this is the last reference.
*/
-void intel_display_power_put(struct drm_i915_private *dev_priv,
+void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct intel_display *display = &dev_priv->display;
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
#else
/**
* intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
*
* This function drops the power domain reference obtained by
@@ -842,30 +870,27 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
* block right away if this is the last reference.
*
* This function is only for the power domain code's internal use to suppress wakeref
- * tracking when the correspondig debug kconfig option is disabled, should not
+ * tracking when the corresponding debug kconfig option is disabled, should not
* be used otherwise.
*/
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &dev_priv->display;
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_display_rpm_put_unchecked(display);
}
#endif
void
-intel_display_power_get_in_set(struct drm_i915_private *i915,
+intel_display_power_get_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &i915->display;
intel_wakeref_t __maybe_unused wf;
drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
- wf = intel_display_power_get(i915, domain);
+ wf = intel_display_power_get(display, domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
power_domain_set->wakerefs[domain] = wf;
#endif
@@ -873,16 +898,15 @@ intel_display_power_get_in_set(struct drm_i915_private *i915,
}
bool
-intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+intel_display_power_get_in_set_if_enabled(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &i915->display;
intel_wakeref_t wf;
drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
- wf = intel_display_power_get_if_enabled(i915, domain);
+ wf = intel_display_power_get_if_enabled(display, domain);
if (!wf)
return false;
@@ -895,11 +919,10 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
}
void
-intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+intel_display_power_put_mask_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
struct intel_power_domain_mask *mask)
{
- struct intel_display *display = &i915->display;
enum intel_display_power_domain domain;
drm_WARN_ON(display->drm,
@@ -911,7 +934,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
#endif
- intel_display_power_put(i915, domain, wf);
+ intel_display_power_put(display, domain, wf);
clear_bit(domain, power_domain_set->mask.bits);
}
}
@@ -999,7 +1022,7 @@ static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
* intel_power_domains_init - initializes the power domain structures
* @display: display device instance
*
- * Initializes the power domain structures for @dev_priv depending upon the
+ * Initializes the power domain structures for @display depending upon the
* supported platform.
*/
int intel_power_domains_init(struct intel_display *display)
@@ -1061,10 +1084,9 @@ static void gen9_dbuf_slice_set(struct intel_display *display,
slice, str_enable_disable(enable));
}
-void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+void gen9_dbuf_slices_update(struct intel_display *display,
u8 req_slices)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
enum dbuf_slice slice;
@@ -1095,40 +1117,34 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
static void gen9_dbuf_enable(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u8 slices_mask;
- display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(display);
slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
if (DISPLAY_VER(display) >= 14)
- intel_pmdemand_program_dbuf(dev_priv, slices_mask);
+ intel_pmdemand_program_dbuf(display, slices_mask);
/*
* Just power up at least 1 slice, we will
* figure out later which slices we have and what we need.
*/
- gen9_dbuf_slices_update(dev_priv, slices_mask);
+ gen9_dbuf_slices_update(display, slices_mask);
}
static void gen9_dbuf_disable(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- gen9_dbuf_slices_update(dev_priv, 0);
+ gen9_dbuf_slices_update(display, 0);
if (DISPLAY_VER(display) >= 14)
- intel_pmdemand_program_dbuf(dev_priv, 0);
+ intel_pmdemand_program_dbuf(display, 0);
}
static void gen12_dbuf_slices_config(struct intel_display *display)
{
enum dbuf_slice slice;
- if (display->platform.alderlake_p)
- return;
-
for_each_dbuf_slice(display, slice)
intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
@@ -1160,7 +1176,7 @@ static void icl_mbus_init(struct intel_display *display)
if (DISPLAY_VER(display) == 12)
abox_regs |= BIT(0);
- for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
+ for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
}
@@ -1245,10 +1261,8 @@ static u32 hsw_read_dcomp(struct intel_display *display)
static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.haswell) {
- if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
+ if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
intel_de_write(display, D_COMP_BDW, val);
@@ -1268,6 +1282,7 @@ static void hsw_disable_lcpll(struct intel_display *display,
bool switch_to_fclk, bool allow_power_down)
{
u32 val;
+ int ret;
assert_can_disable_lcpll(display);
@@ -1277,8 +1292,9 @@ static void hsw_disable_lcpll(struct intel_display *display,
val |= LCPLL_CD_SOURCE_FCLK;
intel_de_write(display, LCPLL_CTL, val);
- if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ ret = intel_de_wait_for_set_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
+ if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
val = intel_de_read(display, LCPLL_CTL);
@@ -1288,7 +1304,7 @@ static void hsw_disable_lcpll(struct intel_display *display,
intel_de_write(display, LCPLL_CTL, val);
intel_de_posting_read(display, LCPLL_CTL);
- if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
drm_err(display->drm, "LCPLL still locked\n");
val = hsw_read_dcomp(display);
@@ -1296,8 +1312,10 @@ static void hsw_disable_lcpll(struct intel_display *display,
hsw_write_dcomp(display, val);
ndelay(100);
- if (wait_for((hsw_read_dcomp(display) &
- D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ ret = poll_timeout_us(val = hsw_read_dcomp(display),
+ (val & D_COMP_RCOMP_IN_PROGRESS) == 0,
+ 100, 1000, false);
+ if (ret)
drm_err(display->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
@@ -1314,6 +1332,7 @@ static void hsw_restore_lcpll(struct intel_display *display)
{
struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
u32 val;
+ int ret;
val = intel_de_read(display, LCPLL_CTL);
@@ -1342,14 +1361,15 @@ static void hsw_restore_lcpll(struct intel_display *display)
val &= ~LCPLL_PLL_DISABLE;
intel_de_write(display, LCPLL_CTL, val);
- if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set_ms(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
drm_err(display->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ ret = intel_de_wait_for_clear_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
+ if (ret)
drm_err(display->drm,
"Switching back to LCPLL failed\n");
}
@@ -1385,26 +1405,24 @@ static void hsw_restore_lcpll(struct intel_display *display)
*/
static void hsw_enable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
drm_dbg_kms(display->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv))
+ if (HAS_PCH_LPT_LP(display))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
hsw_disable_lcpll(display, true, true);
}
static void hsw_disable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
drm_dbg_kms(display->drm, "Disabling package C8+\n");
hsw_restore_lcpll(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/* Many display registers don't survive PC8+ */
#ifdef I915 /* FIXME */
@@ -1418,6 +1436,9 @@ static void intel_pch_reset_handshake(struct intel_display *display,
i915_reg_t reg;
u32 reset_bits;
+ if (DISPLAY_VER(display) >= 35)
+ return;
+
if (display->platform.ivybridge) {
reg = GEN7_MSG_CTL;
reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
@@ -1435,14 +1456,13 @@ static void intel_pch_reset_handshake(struct intel_display *display,
static void skl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1598,9 +1618,7 @@ static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
static void tgl_bw_buddy_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- enum intel_dram_type type = dev_priv->dram_info.type;
- u8 num_channels = dev_priv->dram_info.num_channels;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct buddy_page_mask *table;
unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
int config, i;
@@ -1617,18 +1635,18 @@ static void tgl_bw_buddy_init(struct intel_display *display)
table = tgl_buddy_page_masks;
for (config = 0; table[config].page_mask != 0; config++)
- if (table[config].num_channels == num_channels &&
- table[config].type == type)
+ if (table[config].num_channels == dram_info->num_channels &&
+ table[config].type == dram_info->type)
break;
if (table[config].page_mask == 0) {
drm_dbg_kms(display->drm,
"Unknown memory configuration; disabling address buddy logic.\n");
- for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
+ for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
intel_de_write(display, BW_BUDDY_CTL(i),
BW_BUDDY_DISABLE);
} else {
- for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
+ for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
table[config].page_mask);
@@ -1644,26 +1662,25 @@ static void tgl_bw_buddy_init(struct intel_display *display)
static void icl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
- INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1)
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
/* 2. Initialize all combo phys */
- intel_combo_phy_init(dev_priv);
+ intel_combo_phy_init(display);
/*
* 3. Enable Power Well 1 (PG1).
@@ -1681,7 +1698,7 @@ static void icl_display_core_init(struct intel_display *display,
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(display);
- if (DISPLAY_VER(display) >= 12)
+ if (DISPLAY_VER(display) == 12 || display->platform.dg2)
gen12_dbuf_slices_config(display);
/* 5. Enable DBUF. */
@@ -1696,7 +1713,7 @@ static void icl_display_core_init(struct intel_display *display,
/* 8. Ensure PHYs have completed calibration and adaptation */
if (display->platform.dg2)
- intel_snps_phy_wait_for_calibration(dev_priv);
+ intel_snps_phy_wait_for_calibration(display);
/* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
if (DISPLAY_VERx100(display) == 1401)
@@ -1726,7 +1743,6 @@ static void icl_display_core_init(struct intel_display *display,
static void icl_display_core_uninit(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
@@ -1736,7 +1752,7 @@ static void icl_display_core_uninit(struct intel_display *display)
gen9_disable_dc_states(display);
intel_dmc_disable_program(display);
- /* 1. Disable all display engine functions -> aready done */
+ /* 1. Disable all display engine functions -> already done */
/* 2. Disable DBUF */
gen9_dbuf_disable(display);
@@ -1759,7 +1775,7 @@ static void icl_display_core_uninit(struct intel_display *display)
mutex_unlock(&power_domains->lock);
/* 5. */
- intel_combo_phy_uninit(dev_priv);
+ intel_combo_phy_uninit(display);
}
static void chv_phy_control_init(struct intel_display *display)
@@ -1879,12 +1895,11 @@ static void vlv_cmnlane_wa(struct intel_display *display)
static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
- vlv_punit_get(dev_priv);
- ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
- vlv_punit_put(dev_priv);
+ vlv_punit_get(display->drm);
+ ret = (vlv_punit_read(display->drm, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ vlv_punit_put(display->drm);
return ret;
}
@@ -1929,7 +1944,6 @@ static void intel_power_domains_verify_state(struct intel_display *display);
*/
void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
power_domains->initializing = true;
@@ -1953,9 +1967,9 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
assert_isp_power_gated(display);
} else if (display->platform.broadwell || display->platform.haswell) {
hsw_assert_cdclk(display);
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
} else if (display->platform.ivybridge) {
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
}
/*
@@ -1966,12 +1980,12 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_display_power_get(display, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
if (!display->params.disable_power_well) {
drm_WARN_ON(display->drm, power_domains->disable_wakeref);
- display->power.domains.disable_wakeref = intel_display_power_get(i915,
+ display->power.domains.disable_wakeref = intel_display_power_get(display,
POWER_DOMAIN_INIT);
}
intel_power_domains_sync_hw(display);
@@ -1992,13 +2006,12 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
void intel_power_domains_driver_remove(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!display->params.disable_power_well)
- intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ intel_display_power_put(display, POWER_DOMAIN_INIT,
fetch_and_zero(&display->power.domains.disable_wakeref));
intel_display_power_flush_work_sync(display);
@@ -2006,7 +2019,7 @@ void intel_power_domains_driver_remove(struct intel_display *display)
intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -2054,11 +2067,10 @@ void intel_power_domains_sanitize_state(struct intel_display *display)
*/
void intel_power_domains_enable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(display);
}
@@ -2071,12 +2083,11 @@ void intel_power_domains_enable(struct intel_display *display)
*/
void intel_power_domains_disable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_display_power_get(display, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(display);
}
@@ -2094,12 +2105,11 @@ void intel_power_domains_disable(struct intel_display *display)
*/
void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
/*
* In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
@@ -2110,7 +2120,7 @@ void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
*/
if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
intel_dmc_has_payload(display)) {
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
intel_power_domains_verify_state(display);
return;
}
@@ -2120,10 +2130,10 @@ void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
* power wells if power domains must be deinitialized for suspend.
*/
if (!display->params.disable_power_well)
- intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ intel_display_power_put(display, POWER_DOMAIN_INIT,
fetch_and_zero(&display->power.domains.disable_wakeref));
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
intel_power_domains_verify_state(display);
if (DISPLAY_VER(display) >= 11)
@@ -2148,7 +2158,6 @@ void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
*/
void intel_power_domains_resume(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
if (power_domains->display_core_suspended) {
@@ -2157,10 +2166,8 @@ void intel_power_domains_resume(struct intel_display *display)
} else {
drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_display_power_get(display, POWER_DOMAIN_INIT);
}
-
- intel_power_domains_verify_state(display);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -2255,8 +2262,6 @@ static void intel_power_domains_verify_state(struct intel_display *display)
void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
intel_power_domains_suspend(display, s2idle);
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
@@ -2267,14 +2272,12 @@ void intel_display_power_suspend_late(struct intel_display *display, bool s2idle
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
display->platform.broxton) {
gen9_sanitize_dc_state(display);
@@ -2284,8 +2287,8 @@ void intel_display_power_resume_early(struct intel_display *display)
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
intel_power_domains_resume(display);
}
@@ -2327,14 +2330,16 @@ void intel_display_power_resume(struct intel_display *display)
}
}
-void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
+void intel_display_power_debug(struct intel_display *display, struct seq_file *m)
{
- struct intel_display *display = &i915->display;
struct i915_power_domains *power_domains = &display->power.domains;
int i;
mutex_lock(&power_domains->lock);
+ seq_printf(m, "Runtime power status: %s\n",
+ str_enabled_disabled(!power_domains->init_wakeref));
+
seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
for (i = 0; i < power_domains->power_well_count; i++) {
struct i915_power_well *power_well;
@@ -2510,9 +2515,8 @@ intel_port_domains_for_port(struct intel_display *display, enum port port)
}
enum intel_display_power_domain
-intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
+intel_display_power_ddi_io_domain(struct intel_display *display, enum port port)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
@@ -2522,9 +2526,8 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
}
enum intel_display_power_domain
-intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
+intel_display_power_ddi_lanes_domain(struct intel_display *display, enum port port)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
@@ -2549,9 +2552,8 @@ intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
}
enum intel_display_power_domain
-intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_display_power_aux_io_domain(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
@@ -2561,9 +2563,8 @@ intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux
}
enum intel_display_power_domain
-intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_display_power_legacy_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
@@ -2573,9 +2574,8 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch
}
enum intel_display_power_domain
-intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_display_power_tbt_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 7b294eec4431..f8813b0e16df 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -13,7 +13,6 @@
enum aux_ch;
enum port;
-struct drm_i915_private;
struct i915_power_well;
struct intel_display;
struct intel_encoder;
@@ -118,12 +117,13 @@ enum intel_display_power_domain {
POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM,
};
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE(pipe) \
+ ((enum intel_display_power_domain)((pipe) - PIPE_A + POWER_DOMAIN_PIPE_A))
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A)
+ ((enum intel_display_power_domain)((pipe) - PIPE_A + POWER_DOMAIN_PIPE_PANEL_FITTER_A))
#define POWER_DOMAIN_TRANSCODER(tran) \
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
+ (enum intel_display_power_domain)((tran) - TRANSCODER_A + POWER_DOMAIN_TRANSCODER_A))
struct intel_power_domain_mask {
DECLARE_BITMAP(bits, POWER_DOMAIN_NUM);
@@ -183,103 +183,104 @@ void intel_display_power_suspend(struct intel_display *display);
void intel_display_power_resume(struct intel_display *display);
void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
+u32 intel_display_power_get_current_dc_state(struct intel_display *display);
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain);
intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
-void __intel_display_power_put_async(struct drm_i915_private *i915,
+void __intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
int delay_ms);
-void intel_display_power_flush_work(struct drm_i915_private *i915);
+void intel_display_power_flush_work(struct intel_display *display);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_display_power_put(struct drm_i915_private *dev_priv,
+void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref);
static inline void
-intel_display_power_put_async(struct drm_i915_private *i915,
+intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put_async(i915, domain, wakeref, -1);
+ __intel_display_power_put_async(display, domain, wakeref, -1);
}
static inline void
-intel_display_power_put_async_delay(struct drm_i915_private *i915,
+intel_display_power_put_async_delay(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
int delay_ms)
{
- __intel_display_power_put_async(i915, domain, wakeref, delay_ms);
+ __intel_display_power_put_async(display, domain, wakeref, delay_ms);
}
#else
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain);
static inline void
-intel_display_power_put(struct drm_i915_private *i915,
+intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- intel_display_power_put_unchecked(i915, domain);
+ intel_display_power_put_unchecked(display, domain);
}
static inline void
-intel_display_power_put_async(struct drm_i915_private *i915,
+intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, -1);
+ __intel_display_power_put_async(display, domain, INTEL_WAKEREF_DEF, -1);
}
static inline void
-intel_display_power_put_async_delay(struct drm_i915_private *i915,
+intel_display_power_put_async_delay(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
int delay_ms)
{
- __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, delay_ms);
+ __intel_display_power_put_async(display, domain, INTEL_WAKEREF_DEF, delay_ms);
}
#endif
void
-intel_display_power_get_in_set(struct drm_i915_private *i915,
+intel_display_power_get_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain);
bool
-intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+intel_display_power_get_in_set_if_enabled(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain);
void
-intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+intel_display_power_put_mask_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
struct intel_power_domain_mask *mask);
static inline void
-intel_display_power_put_all_in_set(struct drm_i915_private *i915,
+intel_display_power_put_all_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set)
{
- intel_display_power_put_mask_in_set(i915, power_domain_set, &power_domain_set->mask);
+ intel_display_power_put_mask_in_set(display, power_domain_set, &power_domain_set->mask);
}
-void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m);
+void intel_display_power_debug(struct intel_display *display, struct seq_file *m);
enum intel_display_power_domain
-intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port);
+intel_display_power_ddi_lanes_domain(struct intel_display *display, enum port port);
enum intel_display_power_domain
-intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port);
+intel_display_power_ddi_io_domain(struct intel_display *display, enum port port);
enum intel_display_power_domain
-intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+intel_display_power_aux_io_domain(struct intel_display *display, enum aux_ch aux_ch);
enum intel_display_power_domain
-intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+intel_display_power_legacy_aux_domain(struct intel_display *display, enum aux_ch aux_ch);
enum intel_display_power_domain
-intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+intel_display_power_tbt_aux_domain(struct intel_display *display, enum aux_ch aux_ch);
/*
* FIXME: We should probably switch this to a 0-based scheme to be consistent
@@ -293,15 +294,15 @@ enum dbuf_slice {
I915_MAX_DBUF_SLICES
};
-void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+void gen9_dbuf_slices_update(struct intel_display *display,
u8 req_slices);
-#define with_intel_display_power(i915, domain, wf) \
- for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL)
+#define with_intel_display_power(display, domain, wf) \
+ for ((wf) = intel_display_power_get((display), (domain)); (wf); \
+ intel_display_power_put_async((display), (domain), (wf)), (wf) = NULL)
-#define with_intel_display_power_if_enabled(i915, domain, wf) \
- for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL)
+#define with_intel_display_power_if_enabled(display, domain, wf) \
+ for ((wf) = intel_display_power_get_if_enabled((display), (domain)); (wf); \
+ intel_display_power_put_async((display), (domain), (wf)), (wf) = NULL)
#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index fb2df5c382d8..9b49952994ce 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -3,14 +3,14 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
-#include "i915_reg.h"
-
-#include "vlv_sideband_reg.h"
+#include <drm/drm_print.h>
+#include "intel_display_core.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "vlv_iosf_sb_reg.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
@@ -1516,7 +1516,11 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_D),
.has_fuses = true,
- }, {
+ },
+};
+
+static const struct i915_power_well_desc xelpdp_power_wells_aux[] = {
+ {
.instances = &I915_PW_INSTANCES(
I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
@@ -1534,6 +1538,7 @@ static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
I915_DECL_PW_DOMAINS(xe2lpd_pwdoms_pica_tc,
@@ -1584,6 +1589,7 @@ static const struct i915_power_well_desc_list xe2lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe2lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
/*
@@ -1677,16 +1683,6 @@ static const struct i915_power_well_desc xe3lpd_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_D),
.has_fuses = true,
- }, {
- .instances = &I915_PW_INSTANCES(
- I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
- I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
- I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
- I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
- I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
- I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
- ),
- .ops = &xelpdp_aux_power_well_ops,
},
};
@@ -1696,6 +1692,7 @@ I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off,
XE3LPD_PW_C_POWER_DOMAINS,
XE3LPD_PW_D_POWER_DOMAINS,
POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
POWER_DOMAIN_INIT);
static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = {
@@ -1714,6 +1711,65 @@ static const struct i915_power_well_desc_list xe3lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(xe3lpd_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
+};
+
+static const struct i915_power_well_desc wcl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xe3lpd_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xe3lpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xe3lpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc wcl_power_wells_aux[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list wcl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
+ I915_PW_DESCRIPTORS(wcl_power_wells_main),
+ I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(wcl_power_wells_aux),
};
static void init_power_well_domains(const struct i915_power_well_instance *inst,
@@ -1823,7 +1879,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
return 0;
}
- if (DISPLAY_VER(display) >= 30)
+ if (DISPLAY_VERx100(display) == 3002)
+ return set_power_wells(power_domains, wcl_power_wells);
+ else if (DISPLAY_VER(display) >= 30)
return set_power_wells(power_domains, xe3lpd_power_wells);
else if (DISPLAY_VER(display) >= 20)
return set_power_wells(power_domains, xe2lpd_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index f45a4f9ba23c..f4f7e73acc87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -3,6 +3,10 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/iopoll.h>
+
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -13,6 +17,8 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -24,12 +30,25 @@
#include "intel_hotplug.h"
#include "intel_pcode.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "skl_watermark.h"
#include "vlv_dpio_phy_regs.h"
+#include "vlv_iosf_sb_reg.h"
#include "vlv_sideband.h"
-#include "vlv_sideband_reg.h"
+
+/*
+ * PG0 is HW controlled, so doesn't have a corresponding power well control knob
+ *
+ * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4
+ */
+static enum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx)
+{
+ int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1;
+
+ return pw_idx - pw1_idx + SKL_PG1;
+}
struct i915_power_well_regs {
i915_reg_t bios;
@@ -186,22 +205,18 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (has_vga)
intel_vga_reset_io_mem(display);
if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_post_enable(display, irq_pipe_mask);
}
static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
}
#define ICL_AUX_PW_TO_PHY(pw_idx) \
@@ -278,8 +293,8 @@ static void hsw_wait_for_power_well_enable(struct intel_display *display,
}
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_de_wait_for_set(display, regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
+ if (intel_de_wait_for_set_ms(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
drm_dbg_kms(display->drm, "%s power well enable timeout\n",
intel_power_well_name(power_well));
@@ -309,8 +324,8 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- bool disabled;
u32 reqs;
+ int ret;
/*
* Bspec doesn't require waiting for PWs to get disabled, but still do
@@ -321,12 +336,18 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(intel_de_read(display, regs->driver) &
- HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1);
- if (disabled)
+ reqs = hsw_power_well_requesters(display, regs, pw_idx);
+
+ ret = intel_de_wait_for_clear_ms(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ reqs ? 0 : 1);
+ if (!ret)
return;
+ /* Refresh requesters in case they popped up during the wait. */
+ if (!reqs)
+ reqs = hsw_power_well_requesters(display, regs, pw_idx);
+
drm_dbg_kms(display->drm,
"%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
intel_power_well_name(power_well),
@@ -338,8 +359,8 @@ static void gen9_wait_for_power_well_fuses(struct intel_display *display,
{
/* Timeout 5us for PG#0, for other PGs 1us */
drm_WARN_ON(display->drm,
- intel_de_wait_for_set(display, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ intel_de_wait_for_set_ms(display, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct intel_display *display,
@@ -351,8 +372,7 @@ static void hsw_power_well_enable(struct intel_display *display,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ pg = pw_idx_to_pg(display, pw_idx);
/* Wa_16013190616:adlp */
if (display->platform.alderlake_p && pg == SKL_PG1)
@@ -376,8 +396,8 @@ static void hsw_power_well_enable(struct intel_display *display,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ pg = pw_idx_to_pg(display, pw_idx);
+
gen9_wait_for_power_well_fuses(display, pg);
}
@@ -483,12 +503,10 @@ static void icl_tc_port_assert_ref_held(struct intel_display *display,
static void icl_tc_cold_exit(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret, tries = 0;
while (1) {
- ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
- 250, 1);
+ ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0);
if (ret != -EAGAIN || ++tries == 3)
break;
msleep(1);
@@ -499,7 +517,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
msleep(1);
/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
- drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ drm_dbg_kms(display->drm, "TC cold block %s\n", ret ? "failed" :
"succeeded");
}
@@ -507,12 +525,13 @@ static void
icl_tc_phy_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
+ u32 val;
+ int ret;
icl_tc_port_assert_ref_held(display, power_well, dig_port);
@@ -539,10 +558,11 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display,
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
- if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) &
- DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(display->drm,
- "Timeout waiting TC uC health\n");
+ ret = poll_timeout_us(val = intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)),
+ val & DKL_CMN_UC_DW27_UC_HEALTH,
+ 100, 1000, false);
+ if (ret)
+ drm_warn(display->drm, "Timeout waiting TC uC health\n");
}
}
@@ -550,10 +570,9 @@ static void
icl_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
return icl_tc_phy_aux_power_well_enable(display, power_well);
else if (display->platform.icelake)
return icl_combo_phy_aux_power_well_enable(display,
@@ -566,10 +585,9 @@ static void
icl_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
return hsw_power_well_disable(display, power_well);
else if (display->platform.icelake)
return icl_combo_phy_aux_power_well_disable(display,
@@ -755,8 +773,9 @@ void gen9_sanitize_dc_state(struct intel_display *display)
void gen9_set_dc_state(struct intel_display *display, u32 state)
{
struct i915_power_domains *power_domains = &display->power.domains;
- u32 val;
+ bool dc6_was_enabled, enable_dc6;
u32 mask;
+ u32 val;
if (!HAS_DISPLAY(display))
return;
@@ -765,6 +784,9 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
state & ~power_domains->allowed_dc_mask))
state &= power_domains->allowed_dc_mask;
+ if (!power_domains->initializing)
+ intel_psr_notify_dc5_dc6(display);
+
val = intel_de_read(display, DC_STATE_EN);
mask = gen9_dc_mask(display);
drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
@@ -775,11 +797,19 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
power_domains->dc_state, val & mask);
+ enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
+ dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
+ if (!dc6_was_enabled && enable_dc6)
+ intel_dmc_update_dc6_allowed_count(display, true);
+
val &= ~mask;
val |= state;
gen9_write_dc_state(display, val);
+ if (!enable_dc6 && dc6_was_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
power_domains->dc_state = val & mask;
}
@@ -802,7 +832,6 @@ static void tgl_disable_dc3co(struct intel_display *display)
static void assert_can_enable_dc5(struct intel_display *display)
{
- struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
@@ -819,9 +848,10 @@ static void assert_can_enable_dc5(struct intel_display *display)
(intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
- assert_dmc_loaded(display);
+ assert_display_rpm_held(display);
+
+ assert_main_dmc_loaded(display);
}
void gen9_enable_dc5(struct intel_display *display)
@@ -852,7 +882,7 @@ static void assert_can_enable_dc6(struct intel_display *display)
DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n");
- assert_dmc_loaded(display);
+ assert_main_dmc_loaded(display);
}
void skl_enable_dc6(struct intel_display *display)
@@ -962,8 +992,7 @@ static bool gen9_dc_off_power_well_enabled(struct intel_display *display,
static void gen9_assert_dbuf_enabled(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(display);
u8 enabled_dbuf_slices = display->dbuf.enabled_slices;
drm_WARN(display->drm,
@@ -975,7 +1004,6 @@ static void gen9_assert_dbuf_enabled(struct intel_display *display)
void gen9_disable_dc_states(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_cdclk_config cdclk_config = {};
u32 old_state = power_domains->dc_state;
@@ -1015,7 +1043,7 @@ void gen9_disable_dc_states(struct intel_display *display)
* PHY's HW context for port B is lost after DC transitions,
* so we need to restore it manually.
*/
- intel_combo_phy_init(dev_priv);
+ intel_combo_phy_init(display);
}
static void gen9_dc_off_power_well_enable(struct intel_display *display,
@@ -1096,39 +1124,39 @@ static void i830_pipes_power_well_sync_hw(struct intel_display *display,
static void vlv_set_power_well(struct intel_display *display,
struct i915_power_well *power_well, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
u32 mask;
u32 state;
u32 ctrl;
+ u32 val;
+ int ret;
mask = PUNIT_PWRGT_MASK(pw_idx);
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
PUNIT_PWRGT_PWR_GATE(pw_idx);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
+ val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS);
+ if ((val & mask) == state)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL);
ctrl &= ~mask;
ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+ vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl);
- if (wait_for(COND, 100))
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS),
+ (val & mask) == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
+ vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
out:
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void vlv_power_well_enable(struct intel_display *display,
@@ -1146,7 +1174,6 @@ static void vlv_power_well_disable(struct intel_display *display,
static bool vlv_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
bool enabled = false;
u32 mask;
@@ -1156,9 +1183,9 @@ static bool vlv_power_well_enabled(struct intel_display *display,
mask = PUNIT_PWRGT_MASK(pw_idx);
ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ state = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask;
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1172,10 +1199,10 @@ static bool vlv_power_well_enabled(struct intel_display *display,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL) & mask;
drm_WARN_ON(display->drm, ctrl != state);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
return enabled;
}
@@ -1188,7 +1215,7 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
@@ -1206,7 +1233,6 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
static void vlv_display_power_well_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1230,9 +1256,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
vlv_init_display_clock_gating(display);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(display);
/*
* During driver initialization/resume we can avoid restoring the
@@ -1241,8 +1265,8 @@ static void vlv_display_power_well_init(struct intel_display *display)
if (display->power.domains.initializing)
return;
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
/* Re-enable the ADPA, if we have one */
for_each_intel_encoder(display->drm, encoder) {
@@ -1250,7 +1274,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
intel_crt_reset(&encoder->base);
}
- intel_vga_redisable_power_on(display);
+ intel_vga_disable(display);
intel_pps_unlock_regs_wa(display);
}
@@ -1259,9 +1283,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(display);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -1270,7 +1292,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
/* Prevent us from re-enabling polling on accident in late suspend */
if (!display->drm->dev->power.is_suspended)
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
}
static void vlv_display_power_well_enable(struct intel_display *display,
@@ -1314,11 +1336,10 @@ static void vlv_dpio_cmn_power_well_enable(struct intel_display *display,
static void vlv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
for_each_pipe(display, pipe)
- assert_pll_disabled(dev_priv, pipe);
+ assert_pll_disabled(display, pipe);
/* Assert common reset */
intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0);
@@ -1337,6 +1358,7 @@ static void assert_chv_phy_status(struct intel_display *display)
u32 phy_control = display->power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
+ u32 val;
/*
* The BIOS can leave the PHY is some weird state
@@ -1424,12 +1446,11 @@ static void assert_chv_phy_status(struct intel_display *display)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_de_wait(display, DISPLAY_PHY_STATUS,
- phy_status_mask, phy_status, 10))
+ if (intel_de_wait_ms(display, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10, &val))
drm_err(display->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, display->power.chv_phy_control);
+ val & phy_status_mask, phy_status, display->power.chv_phy_control);
}
#undef BITS_SET
@@ -1437,7 +1458,6 @@ static void assert_chv_phy_status(struct intel_display *display)
static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
u32 tmp;
@@ -1456,35 +1476,35 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
vlv_set_power_well(display, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy), 1))
+ if (intel_de_wait_for_set_ms(display, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
drm_err(display->drm, "Display PHY %d is not power up\n",
phy);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW28);
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
* too, so maybe it saves some power even though
* CL2 doesn't exist?
*/
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW30, tmp);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(display, DISPLAY_PHY_CONTROL,
@@ -1500,7 +1520,6 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
@@ -1510,11 +1529,11 @@ static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
+ assert_pll_disabled(display, PIPE_A);
+ assert_pll_disabled(display, PIPE_B);
} else {
phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
+ assert_pll_disabled(display, PIPE_C);
}
display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
@@ -1536,7 +1555,6 @@ static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, val, expected, actual;
/*
@@ -1554,9 +1572,9 @@ static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_ph
else
reg = CHV_CMN_DW6_CH1;
- vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, phy, reg);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ val = vlv_dpio_read(display->drm, phy, reg);
+ vlv_dpio_put(display->drm);
/*
* This assumes !override is only used when the port is disabled.
@@ -1666,14 +1684,13 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
bool enabled;
u32 state, ctrl;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ state = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1686,10 +1703,10 @@ static bool chv_pipe_power_well_enabled(struct intel_display *display,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
drm_WARN_ON(display->drm, ctrl << 16 != state);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
return enabled;
}
@@ -1698,36 +1715,36 @@ static void chv_set_pipe_power_well(struct intel_display *display,
struct i915_power_well *power_well,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
+ int ret;
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
- vlv_punit_get(dev_priv);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+ vlv_punit_get(display->drm);
- if (COND)
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
+ if ((ctrl & DP_SSS_MASK(pipe)) == state)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl);
- if (wait_for(COND, 100))
+ ret = poll_timeout_us(ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (ctrl & DP_SSS_MASK(pipe)) == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+ vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM));
#undef COND
out:
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void chv_pipe_power_well_sync_hw(struct intel_display *display,
@@ -1756,7 +1773,6 @@ static void chv_pipe_power_well_disable(struct intel_display *display,
static void
tgl_tc_cold_request(struct intel_display *display, bool block)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u8 tries = 0;
int ret;
@@ -1773,7 +1789,7 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
- ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
@@ -1789,10 +1805,9 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
}
if (ret)
- drm_err(&i915->drm, "TC cold %sblock failed\n",
- block ? "" : "un");
+ drm_err(display->drm, "TC cold %sblock failed\n", block ? "" : "un");
else
- drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ drm_dbg_kms(display->drm, "TC cold %sblock succeeded\n",
block ? "" : "un");
}
@@ -1834,11 +1849,10 @@ tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
static void xelpdp_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
icl_tc_port_assert_ref_held(display, power_well,
aux_ch_to_digital_port(display, aux_ch));
@@ -1852,18 +1866,36 @@ static void xelpdp_aux_power_well_enable(struct intel_display *display,
* expected to just wait a fixed 600us after raising the request
* bit.
*/
- usleep_range(600, 1200);
+ if (DISPLAY_VER(display) >= 35) {
+ if (intel_de_wait_for_set_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2))
+ drm_warn(display->drm,
+ "Timeout waiting for PHY %c AUX channel power to be up\n",
+ phy_name(phy));
+ } else {
+ usleep_range(600, 1200);
+ }
}
static void xelpdp_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
0);
- usleep_range(10, 30);
+
+ if (DISPLAY_VER(display) >= 35) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1))
+ drm_warn(display->drm,
+ "Timeout waiting for PHY %c AUX channel to powerdown\n",
+ phy_name(phy));
+ } else {
+ usleep_range(10, 30);
+ }
}
static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
@@ -1881,8 +1913,8 @@ static void xe2lpd_pica_power_well_enable(struct intel_display *display,
intel_de_write(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_REQUEST);
- if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL,
- XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
+ if (intel_de_wait_for_set_ms(display, XE2LPD_PICA_PW_CTL,
+ XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
drm_dbg_kms(display->drm, "pica power well enable timeout\n");
drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
@@ -1894,8 +1926,8 @@ static void xe2lpd_pica_power_well_disable(struct intel_display *display,
{
intel_de_write(display, XE2LPD_PICA_PW_CTL, 0);
- if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL,
- XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
+ if (intel_de_wait_for_clear_ms(display, XE2LPD_PICA_PW_CTL,
+ XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
drm_dbg_kms(display->drm, "pica power well disable timeout\n");
drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index 338379dae44c..ec8e508d0593 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -60,7 +60,7 @@ struct i915_power_well_instance {
/* unique identifier for this power well */
enum i915_power_well_id id;
/*
- * Arbitraty data associated with this power well. Platform and power
+ * Arbitrary data associated with this power well. Platform and power
* well specific.
*/
union {
@@ -77,7 +77,7 @@ struct i915_power_well_instance {
struct {
/*
* request/status flag index in the power well
- * constrol/status registers.
+ * control/status registers.
*/
u8 idx;
} hsw;
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
new file mode 100644
index 000000000000..9d71e26a4fa2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -0,0 +1,2934 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_REGS_H__
+#define __INTEL_DISPLAY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
+#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
+#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
+
+#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
+#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1 << 1)
+#define DPIO_CMNRST (1 << 0)
+
+#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
+#define MIPIO_RST_CTRL (1 << 2)
+
+#define _BXT_PHY_CTL_DDI_A 0x64C00
+#define _BXT_PHY_CTL_DDI_B 0x64C10
+#define _BXT_PHY_CTL_DDI_C 0x64C20
+#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
+#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
+#define BXT_PHY_LANE_ENABLED (1 << 8)
+#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
+ _BXT_PHY_CTL_DDI_B)
+
+#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_EDP 0x64C80
+#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
+#define COMMON_RESET_DIS (1 << 31)
+#define BXT_PHY_CTL_FAMILY(phy) \
+ _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
+ _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
+
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1 _MMIO(0x4F074)
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK 0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT 23
+#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
+
+#define ILK_GTT_FAULT _MMIO(0x44040) /* ilk/snb */
+#define GTT_FAULT_INVALID_GTT_PTE (1 << 7)
+#define GTT_FAULT_INVALID_PTE_DATA (1 << 6)
+#define GTT_FAULT_CURSOR_B_FAULT (1 << 5)
+#define GTT_FAULT_CURSOR_A_FAULT (1 << 4)
+#define GTT_FAULT_SPRITE_B_FAULT (1 << 3)
+#define GTT_FAULT_SPRITE_A_FAULT (1 << 2)
+#define GTT_FAULT_PRIMARY_B_FAULT (1 << 1)
+#define GTT_FAULT_PRIMARY_A_FAULT (1 << 0)
+
+#define DERRMR _MMIO(0x44050)
+/* Note that HBLANK events are reserved on bdw+ */
+#define DERRMR_PIPEA_SCANLINE (1 << 0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
+#define DERRMR_PIPEA_VBLANK (1 << 3)
+#define DERRMR_PIPEA_HBLANK (1 << 5)
+#define DERRMR_PIPEB_SCANLINE (1 << 8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
+#define DERRMR_PIPEB_VBLANK (1 << 11)
+#define DERRMR_PIPEB_HBLANK (1 << 13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1 << 14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
+#define DERRMR_PIPEC_VBLANK (1 << 21)
+#define DERRMR_PIPEC_HBLANK (1 << 22)
+
+#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
+ VLV_IER, \
+ VLV_IIR)
+
+#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
+#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
+#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
+#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
+#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
+#define VLV_ERROR_PAGE_TABLE (1 << 4)
+#define VLV_ERROR_CLAIM (1 << 0)
+
+#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
+
+#define _MBUS_ABOX0_CTL 0x45038
+#define _MBUS_ABOX1_CTL 0x45048
+#define _MBUS_ABOX2_CTL 0x4504C
+#define MBUS_ABOX_CTL(x) \
+ _MMIO(_PICK_EVEN_2RANGES(x, 2, \
+ _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
+
+#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
+#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
+#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
+#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
+#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
+#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
+
+#define IPS_CTL _MMIO(0x43408)
+#define IPS_ENABLE REG_BIT(31)
+#define IPS_FALSE_COLOR REG_BIT(4)
+
+/*
+ * Clock control & power management
+ */
+#define _DPLL_A 0x6014
+#define _DPLL_B 0x6018
+#define _CHV_DPLL_C 0x6030
+#define DPLL(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
+
+#define VGA0 _MMIO(0x6000)
+#define VGA1 _MMIO(0x6004)
+#define VGA_PD _MMIO(0x6010)
+#define VGA0_PD_P2_DIV_4 (1 << 7)
+#define VGA0_PD_P1_DIV_2 (1 << 5)
+#define VGA0_PD_P1_SHIFT 0
+#define VGA0_PD_P1_MASK (0x1f << 0)
+#define VGA1_PD_P2_DIV_4 (1 << 15)
+#define VGA1_PD_P1_DIV_2 (1 << 13)
+#define VGA1_PD_P1_SHIFT 8
+#define VGA1_PD_P1_MASK (0x1f << 8)
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_SDVO_HIGH_SPEED (1 << 30)
+#define DPLL_DVO_2X_MODE (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1 << 15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
+#define DPLL_SSC_REF_CLK_CHV (1 << 13)
+#define DPLL_PORTC_READY_MASK (0xf << 4)
+#define DPLL_PORTB_READY_MASK (0xf)
+
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+
+/* Additional CHV pll/phy registers */
+#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
+#define DPLL_PORTD_READY_MASK (0xf)
+#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
+#define PHY_LDO_DELAY_0NS 0x0
+#define PHY_LDO_DELAY_200NS 0x1
+#define PHY_LDO_DELAY_600NS 0x2
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
+#define PHY_CH_SU_PSR 0x1
+#define PHY_CH_DEEP_PSR 0x7
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
+#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
+#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
+
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+/* i830, required in DVO non-gang */
+#define PLL_P2_DIVIDE_BY_4 (1 << 23)
+#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
+# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+#define _DPLL_A_MD 0x601c
+#define _DPLL_B_MD 0x6020
+#define _CHV_DPLL_C_MD 0x603c
+#define DPLL_MD(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
+
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
+
+#define _FPA0 0x6040
+#define _FPA1 0x6044
+#define _FPB0 0x6048
+#define _FPB1 0x604c
+#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
+#define FP_M2_DIV_SHIFT 0
+
+#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
+#define FW_CSPWRDWNEN (1 << 15)
+
+#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
+
+#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
+#define CDCLK_FREQ_SHIFT 4
+#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
+#define CZCLK_FREQ_MASK 0xf
+
+#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
+#define PFI_CREDIT_63 (9 << 28) /* chv only */
+#define PFI_CREDIT_31 (8 << 28) /* chv only */
+#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
+#define PFI_CREDIT_RESEND (1 << 27)
+#define VGA_FAST_MODE_DISABLE (1 << 14)
+
+#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
+
+#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
+
+/*
+ * Overlay regs
+ */
+#define OVADD _MMIO(0x30000)
+#define DOVSTA _MMIO(0x30008)
+#define OC_BUF (0x3 << 20)
+#define OGAMC5 _MMIO(0x30010)
+#define OGAMC4 _MMIO(0x30014)
+#define OGAMC3 _MMIO(0x30018)
+#define OGAMC2 _MMIO(0x3001c)
+#define OGAMC1 _MMIO(0x30020)
+#define OGAMC0 _MMIO(0x30024)
+
+#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
+#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define DG2_DPFC_GATING_DIS REG_BIT(31)
+
+#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
+#define DPCE_GATING_DIS REG_BIT(17)
+
+#define _CLKGATE_DIS_PSL_A 0x46520
+#define _CLKGATE_DIS_PSL_B 0x46524
+#define _CLKGATE_DIS_PSL_C 0x46528
+#define DUPS1_GATING_DIS (1 << 15)
+#define DUPS2_GATING_DIS (1 << 19)
+#define DUPS3_GATING_DIS (1 << 23)
+#define CURSOR_GATING_DIS REG_BIT(28)
+#define DPF_GATING_DIS (1 << 10)
+#define DPF_RAM_GATING_DIS (1 << 9)
+#define DPFR_GATING_DIS (1 << 8)
+
+#define CLKGATE_DIS_PSL(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+
+#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
+#define _CLKGATE_DIS_PSL_EXT_B 0x46550
+#define PIPEDMC_GATING_DIS REG_BIT(12)
+
+#define CLKGATE_DIS_PSL_EXT(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
+
+/*
+ * Display engine regs
+ */
+/* Pipe/transcoder A timing regs */
+#define _TRANS_HTOTAL_A 0x60000
+#define _TRANS_HTOTAL_B 0x61000
+#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
+#define HTOTAL_MASK REG_GENMASK(31, 16)
+#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
+#define HACTIVE_MASK REG_GENMASK(15, 0)
+#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
+
+#define _TRANS_HBLANK_A 0x60004
+#define _TRANS_HBLANK_B 0x61004
+#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
+#define HBLANK_END_MASK REG_GENMASK(31, 16)
+#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
+#define HBLANK_START_MASK REG_GENMASK(15, 0)
+#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
+
+#define _TRANS_HSYNC_A 0x60008
+#define _TRANS_HSYNC_B 0x61008
+#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
+#define HSYNC_END_MASK REG_GENMASK(31, 16)
+#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
+#define HSYNC_START_MASK REG_GENMASK(15, 0)
+#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
+
+#define _TRANS_VTOTAL_A 0x6000c
+#define _TRANS_VTOTAL_B 0x6100c
+#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
+#define VTOTAL_MASK REG_GENMASK(31, 16)
+#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
+#define VACTIVE_MASK REG_GENMASK(15, 0)
+#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
+
+#define _TRANS_VBLANK_A 0x60010
+#define _TRANS_VBLANK_B 0x61010
+#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
+#define VBLANK_END_MASK REG_GENMASK(31, 16)
+#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
+#define VBLANK_START_MASK REG_GENMASK(15, 0)
+#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
+
+#define _TRANS_VSYNC_A 0x60014
+#define _TRANS_VSYNC_B 0x61014
+#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
+#define VSYNC_END_MASK REG_GENMASK(31, 16)
+#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
+#define VSYNC_START_MASK REG_GENMASK(15, 0)
+#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
+
+#define _PIPEASRC 0x6001c
+#define _PIPEBSRC 0x6101c
+#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
+#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
+#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
+#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
+#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
+
+#define _BCLRPAT_A 0x60020
+#define _BCLRPAT_B 0x61020
+#define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
+
+#define _TRANS_VSYNCSHIFT_A 0x60028
+#define _TRANS_VSYNCSHIFT_B 0x61028
+#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
+
+#define _TRANS_MULT_A 0x6002c
+#define _TRANS_MULT_B 0x6102c
+#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
+ PORTC_HOTPLUG_INT_EN | \
+ PORTD_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+
+#define PORT_HOTPLUG_STAT(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
+/* HDMI/DP bits are g4x+ */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
+#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
+#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
+/* CRT/TV common between gen3+ */
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
+#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
+#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
+#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
+
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
+#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_G4X | \
+ SDVOC_HOTPLUG_INT_STATUS_G4X | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_I915 | \
+ SDVOC_HOTPLUG_INT_STATUS_I915 | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define _GEN3_SDVOB 0x61140
+#define _GEN3_SDVOC 0x61160
+#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
+#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
+#define GEN4_HDMIB GEN3_SDVOB
+#define GEN4_HDMIC GEN3_SDVOC
+#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
+#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
+#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
+#define PCH_SDVOB _MMIO(0xe1140)
+#define PCH_HDMIB PCH_SDVOB
+#define PCH_HDMIC _MMIO(0xe1150)
+#define PCH_HDMID _MMIO(0xe1160)
+
+#define PORT_DFT_I9XX _MMIO(0x61150)
+#define DC_BALANCE_RESET (1 << 25)
+#define PORT_DFT2_G4X(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
+#define DC_BALANCE_RESET_VLV (1 << 31)
+#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
+#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
+#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
+#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
+
+/* Gen 3 SDVO bits: */
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_SEL_SHIFT 30
+#define SDVO_PIPE_SEL_MASK (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/*
+ * 915G/GM SDVO pixel multiplier.
+ * Programmed value is multiplier - 1, up to 5x.
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
+#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
+#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+ SDVO_INTERRUPT_ENABLE)
+#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_COLOR_FORMAT_MASK (7 << 26)
+#define SDVO_ENCODING_SDVO (0 << 10)
+#define SDVO_ENCODING_HDMI (2 << 10)
+#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
+#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
+#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
+#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
+#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
+#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+
+/* CHV SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
+#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA _MMIO(0x61178)
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_ASYNC_DATA_SIZE 36
+#define VIDEO_DIP_GMP_DATA_SIZE 36
+#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
+#define VIDEO_DIP_CTL _MMIO(0x61170)
+/* Pre HSW: */
+#define VIDEO_DIP_ENABLE (1 << 31)
+#define VIDEO_DIP_PORT(port) ((port) << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
+#define VDIP_ENABLE_PPS (1 << 24)
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+/* ADL and later: */
+#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
+
+#define PCH_GTC_CTL _MMIO(0xe7000)
+#define PCH_GTC_ENABLE (1 << 31)
+
+/* Display Port */
+#define DP_A _MMIO(0x64000) /* eDP */
+#define DP_B _MMIO(0x64100)
+#define DP_C _MMIO(0x64200)
+#define DP_D _MMIO(0x64300)
+#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
+#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
+#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
+#define DP_PORT_EN REG_BIT(31)
+#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
+#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
+#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
+#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
+#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
+#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
+#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
+#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
+#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
+#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
+#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
+#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
+#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
+#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
+#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
+#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
+#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
+#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
+#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
+#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
+#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
+#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
+#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
+#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
+#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
+#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
+#define DP_ENHANCED_FRAMING REG_BIT(18)
+#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
+#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
+#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
+#define DP_PORT_REVERSAL REG_BIT(15)
+#define EDP_PLL_ENABLE REG_BIT(14)
+#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
+#define DP_SCRAMBLING_DISABLE REG_BIT(12)
+#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
+#define DP_COLOR_RANGE_16_235 REG_BIT(8)
+#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
+#define DP_SYNC_VS_HIGH REG_BIT(4)
+#define DP_SYNC_HS_HIGH REG_BIT(3)
+#define DP_DETECTED REG_BIT(2)
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define _PIPEA_DATA_M_G4X 0x70050
+#define _PIPEB_DATA_M_G4X 0x71050
+#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define TU_SIZE_MASK REG_GENMASK(30, 25)
+#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */
+#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0)
+#define DATA_LINK_N_MAX (0x800000)
+
+#define _PIPEA_DATA_N_G4X 0x70054
+#define _PIPEB_DATA_N_G4X 0x71054
+#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+#define _PIPEA_LINK_M_G4X 0x70060
+#define _PIPEB_LINK_M_G4X 0x71060
+#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+
+#define _PIPEA_LINK_N_G4X 0x70064
+#define _PIPEB_LINK_N_G4X 0x71064
+#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
+
+/* Pipe A */
+#define _PIPEADSL 0x70000
+#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
+#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
+#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
+
+#define _TRANSACONF 0x70008
+#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
+#define TRANSCONF_ENABLE REG_BIT(31)
+#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
+#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
+#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
+#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
+#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
+#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
+#define TRANSCONF_FORCE_BORDER REG_BIT(25)
+#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
+#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
+#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
+#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
+#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
+#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
+#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
+#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
+/*
+ * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
+ * DBL=power saving pixel doubling, PF-ID* requires panel fitter
+ */
+#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
+#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
+#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
+#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
+#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
+#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
+#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
+#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
+#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
+#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
+#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
+#define TRANSCONF_WGC_ENABLE REG_BIT(15) /* vlv/chv only */
+#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
+#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
+#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
+#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
+#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
+#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
+#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
+#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
+#define TRANSCONF_DITHER_EN REG_BIT(4)
+#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
+#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
+#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
+#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
+#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
+
+#define _PIPEASTAT 0x70024
+#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
+#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
+#define PIPE_CRC_DONE_ENABLE (1UL << 28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
+#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_HBLANK_INT_STATUS (1UL << 0)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
+#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
+
+#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
+#define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
+
+#define _PIPE_MISC_A 0x70030
+#define _PIPE_MISC_B 0x71030
+#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
+#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
+#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
+#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */
+#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */
+#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */
+#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */
+#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20)
+#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
+#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
+#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
+#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
+#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
+#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
+#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
+#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
+#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
+#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
+#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
+
+#define _PIPE_MISC2_A 0x7002C
+#define _PIPE_MISC2_B 0x7102C
+#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
+#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
+
+#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
+#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
+#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
+#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
+#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
+#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
+#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
+#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
+#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
+#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
+#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
+#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
+#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
+#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
+#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
+#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
+#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
+#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
+#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
+#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
+#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
+#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
+#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
+#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
+#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
+#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
+#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
+#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
+#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
+
+#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
+#define CBR_PND_DEADLINE_DISABLE (1 << 31)
+#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
+
+#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
+#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
+
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ * do {
+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT;
+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ * PIPE_FRAME_LOW_SHIFT);
+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT);
+ * } while (high1 != high2);
+ * frame = (high1 << 8) | low1;
+ */
+#define _PIPEAFRAMEHIGH 0x70040
+#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+
+#define _PIPEAFRAMEPIXEL 0x70044
+#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+
+/* GM45+ just has to be different */
+#define _PIPEA_FRMCOUNT_G4X 0x70040
+#define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
+
+#define _PIPEA_FLIPCOUNT_G4X 0x70044
+#define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
+
+/* CHV pipe B blender */
+#define _CHV_BLEND_A 0x60a00
+#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
+#define CHV_BLEND_MASK REG_GENMASK(31, 30)
+#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0)
+#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1)
+#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2)
+
+#define _CHV_CANVAS_A 0x60a04
+#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
+#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20)
+#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10)
+#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
+
+/*
+ * VBIOS flags
+ * gen2:
+ * [00:06] alm,mgm
+ * [10:16] all
+ * [30:32] alm,mgm
+ * gen3+:
+ * [00:0f] all
+ * [10:1f] all
+ * [30:32] all
+ */
+#define SWF0(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
+#define SWF1(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
+#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
+#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
+#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
+#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
+#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
+#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
+#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL _MMIO(0x45300)
+#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
+#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+#define _PIPEA_DATA_M1 0x60030
+#define _PIPEB_DATA_M1 0x61030
+#define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
+
+#define _PIPEA_DATA_N1 0x60034
+#define _PIPEB_DATA_N1 0x61034
+#define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
+
+#define _PIPEA_DATA_M2 0x60038
+#define _PIPEB_DATA_M2 0x61038
+#define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
+
+#define _PIPEA_DATA_N2 0x6003c
+#define _PIPEB_DATA_N2 0x6103c
+#define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
+
+#define _PIPEA_LINK_M1 0x60040
+#define _PIPEB_LINK_M1 0x61040
+#define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
+
+#define _PIPEA_LINK_N1 0x60044
+#define _PIPEB_LINK_N1 0x61044
+#define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
+
+#define _PIPEA_LINK_M2 0x60048
+#define _PIPEB_LINK_M2 0x61048
+#define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
+
+#define _PIPEA_LINK_N2 0x6004c
+#define _PIPEB_LINK_N2 0x6104c
+#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
+
+/*
+ * Skylake scalers
+ */
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
+#define _PS_1A_CTRL 0x68180
+#define _PS_2A_CTRL 0x68280
+#define _PS_1B_CTRL 0x68980
+#define _PS_2B_CTRL 0x68A80
+#define _PS_1C_CTRL 0x69180
+#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
+ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
+#define PS_SCALER_EN REG_BIT(31)
+#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */
+#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0)
+#define PS_SCALER_TYPE_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 1)
+#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */
+#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0)
+#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1)
+#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2)
+#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */
+#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0)
+#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1)
+#define PS_ADAPTIVE_FILTERING_EN REG_BIT(28) /* icl+ */
+#define PS_BINDING_MASK REG_GENMASK(27, 25)
+#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0)
+#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1)
+#define PS_FILTER_MASK REG_GENMASK(24, 23)
+#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0)
+#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1)
+#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2)
+#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3)
+#define PS_ADAPTIVE_FILTER_MASK REG_BIT(22) /* icl+ */
+#define PS_ADAPTIVE_FILTER_MEDIUM REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 0)
+#define PS_ADAPTIVE_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 1)
+#define PS_PIPE_SCALER_LOC_MASK REG_BIT(21) /* icl+ */
+#define PS_PIPE_SCALER_LOC_AFTER_OUTPUT_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 0) /* non-linear */
+#define PS_PIPE_SCALER_LOC_AFTER_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 1) /* linear */
+#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */
+#define PS_VERT_INT_INVERT_FIELD REG_BIT(20)
+#define PS_PROG_SCALE_FACTOR REG_BIT(19) /* tgl+ */
+#define PS_PWRUP_PROGRESS REG_BIT(17)
+#define PS_V_FILTER_BYPASS REG_BIT(8)
+#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */
+#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */
+#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0)
+#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1)
+#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3)
+#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */
+#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1)
+#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */
+#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set))
+#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */
+#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set))
+#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */
+#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set))
+#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */
+#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set))
+
+#define _PS_PWR_GATE_1A 0x68160
+#define _PS_PWR_GATE_2A 0x68260
+#define _PS_PWR_GATE_1B 0x68960
+#define _PS_PWR_GATE_2B 0x68A60
+#define _PS_PWR_GATE_1C 0x69160
+#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
+ _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
+#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
+#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
+#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
+#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1)
+#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2)
+#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3)
+#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0)
+#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0)
+#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1)
+#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2)
+#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3)
+
+#define _PS_WIN_POS_1A 0x68170
+#define _PS_WIN_POS_2A 0x68270
+#define _PS_WIN_POS_1B 0x68970
+#define _PS_WIN_POS_2B 0x68A70
+#define _PS_WIN_POS_1C 0x69170
+#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
+ _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
+#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
+#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y))
+
+#define _PS_WIN_SZ_1A 0x68174
+#define _PS_WIN_SZ_2A 0x68274
+#define _PS_WIN_SZ_1B 0x68974
+#define _PS_WIN_SZ_2B 0x68A74
+#define _PS_WIN_SZ_1C 0x69174
+#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
+ _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
+#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
+#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h))
+
+#define _PS_VSCALE_1A 0x68184
+#define _PS_VSCALE_2A 0x68284
+#define _PS_VSCALE_1B 0x68984
+#define _PS_VSCALE_2B 0x68A84
+#define _PS_VSCALE_1C 0x69184
+#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
+ _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
+
+#define _PS_HSCALE_1A 0x68190
+#define _PS_HSCALE_2A 0x68290
+#define _PS_HSCALE_1B 0x68990
+#define _PS_HSCALE_2B 0x68A90
+#define _PS_HSCALE_1C 0x69190
+#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
+ _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
+
+#define _PS_VPHASE_1A 0x68188
+#define _PS_VPHASE_2A 0x68288
+#define _PS_VPHASE_1B 0x68988
+#define _PS_VPHASE_2B 0x68A88
+#define _PS_VPHASE_1C 0x69188
+#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
+ _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
+#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
+#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
+#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
+#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x))
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
+
+#define _PS_HPHASE_1A 0x68194
+#define _PS_HPHASE_2A 0x68294
+#define _PS_HPHASE_1B 0x68994
+#define _PS_HPHASE_2B 0x68A94
+#define _PS_HPHASE_1C 0x69194
+#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
+ _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
+
+#define _PS_ECC_STAT_1A 0x681D0
+#define _PS_ECC_STAT_2A 0x682D0
+#define _PS_ECC_STAT_1B 0x689D0
+#define _PS_ECC_STAT_2B 0x68AD0
+#define _PS_ECC_STAT_1C 0x691D0
+#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
+ _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
+
+#define _PS_COEF_SET0_INDEX_1A 0x68198
+#define _PS_COEF_SET0_INDEX_2A 0x68298
+#define _PS_COEF_SET0_INDEX_1B 0x68998
+#define _PS_COEF_SET0_INDEX_2B 0x68A98
+#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
+#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
+
+#define _PS_COEF_SET0_DATA_1A 0x6819C
+#define _PS_COEF_SET0_DATA_2A 0x6829C
+#define _PS_COEF_SET0_DATA_1B 0x6899C
+#define _PS_COEF_SET0_DATA_2B 0x68A9C
+#define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
+
+/* More Ivybridge lolz */
+#define DE_ERR_INT_IVB (1 << 30)
+#define DE_GSE_IVB (1 << 29)
+#define DE_PCH_EVENT_IVB (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB (1 << 26)
+#define DE_EDP_PSR_INT_HSW (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
+#define DE_PIPEC_VBLANK_IVB (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
+#define DE_PIPEB_VBLANK_IVB (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB (1 << 0)
+#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
+
+#define XELPD_DISPLAY_ERR_FATAL_MASK _MMIO(0x4421c)
+
+#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
+#define GEN8_PIPE_FIFO_UNDERRUN REG_BIT(31)
+#define GEN8_PIPE_CDCLK_CRC_ERROR REG_BIT(29)
+#define GEN8_PIPE_CDCLK_CRC_DONE REG_BIT(28)
+#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
+#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl-mtl */
+#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl */
+#define GEN12_PIPEDMC_FLIPQ_DONE REG_BIT(24) /* tgl-adl */
+#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
+#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
+#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
+#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
+#define MTL_PLANE_ATS_FAULT REG_BIT(18) /* mtl+ */
+#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
+#define MTL_PIPEDMC_FLIPQ_DONE REG_BIT(17) /* mtl */
+#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
+#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
+#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
+#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
+#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
+#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
+#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
+#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
+#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
+#define GEN9_PIPE_PLANE3_FAULT REG_BIT(9) /* skl+ */
+#define GEN8_PIPE_SPRITE_FAULT REG_BIT(9) /* bdw */
+#define GEN9_PIPE_PLANE2_FAULT REG_BIT(8) /* skl+ */
+#define GEN8_PIPE_PRIMARY_FAULT REG_BIT(8) /* bdw */
+#define GEN9_PIPE_PLANE1_FAULT REG_BIT(7) /* skl+ */
+#define GEN9_PIPE_PLANE4_FLIP_DONE REG_BIT(6) /* skl+ */
+#define GEN9_PIPE_PLANE3_FLIP_DONE REG_BIT(5) /* skl+ */
+#define GEN8_PIPE_SPRITE_FLIP_DONE REG_BIT(5) /* bdw */
+#define GEN9_PIPE_PLANE2_FLIP_DONE REG_BIT(4) /* skl+ */
+#define GEN8_PIPE_PRIMARY_FLIP_DONE REG_BIT(4) /* bdw */
+#define GEN9_PIPE_PLANE1_FLIP_DONE REG_BIT(3) /* skl+ */
+#define GEN9_PIPE_PLANE_FLIP_DONE(plane_id) \
+ REG_BIT(((plane_id) >= PLANE_5 ? 16 - PLANE_5 : 3 - PLANE_1) + (plane_id)) /* skl+ */
+#define GEN8_PIPE_SCAN_LINE_EVENT REG_BIT(2)
+#define GEN8_PIPE_VSYNC REG_BIT(1)
+#define GEN8_PIPE_VBLANK REG_BIT(0)
+
+#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \
+ GEN8_DE_PIPE_IER(pipe), \
+ GEN8_DE_PIPE_IIR(pipe))
+
+#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
+#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
+
+#define GEN8_DE_PORT_ISR _MMIO(0x44440)
+#define GEN8_DE_PORT_IMR _MMIO(0x44444)
+#define GEN8_DE_PORT_IIR _MMIO(0x44448)
+#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define DSI1_NON_TE (1 << 31)
+#define DSI0_NON_TE (1 << 30)
+#define ICL_AUX_CHANNEL_E (1 << 29)
+#define ICL_AUX_CHANNEL_F (1 << 28)
+#define GEN9_AUX_CHANNEL_D (1 << 27)
+#define GEN9_AUX_CHANNEL_C (1 << 26)
+#define GEN9_AUX_CHANNEL_B (1 << 25)
+#define DSI1_TE (1 << 24)
+#define DSI0_TE (1 << 23)
+#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
+#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
+#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
+#define BXT_DE_PORT_GMBUS (1 << 1)
+#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 REG_BIT(13)
+#define XELPD_DE_PORT_AUX_DDIE REG_BIT(13)
+#define TGL_DE_PORT_AUX_USBC5 REG_BIT(12)
+#define XELPD_DE_PORT_AUX_DDID REG_BIT(12)
+#define TGL_DE_PORT_AUX_USBC4 REG_BIT(11)
+#define TGL_DE_PORT_AUX_USBC3 REG_BIT(10)
+#define TGL_DE_PORT_AUX_USBC2 REG_BIT(9)
+#define TGL_DE_PORT_AUX_USBC1 REG_BIT(8)
+#define TGL_DE_PORT_AUX_DDIC REG_BIT(2)
+#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
+#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
+
+#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \
+ GEN8_DE_PORT_IER, \
+ GEN8_DE_PORT_IIR)
+
+#define GEN8_DE_MISC_ISR _MMIO(0x44460)
+#define GEN8_DE_MISC_IMR _MMIO(0x44464)
+#define GEN8_DE_MISC_IIR _MMIO(0x44468)
+#define GEN8_DE_MISC_IER _MMIO(0x4446c)
+#define XELPDP_RM_TIMEOUT REG_BIT(29)
+#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
+#define GEN8_DE_MISC_GSE REG_BIT(27)
+#define GEN8_DE_EDP_PSR REG_BIT(19)
+#define XELPDP_PMDEMAND_RSP REG_BIT(3)
+#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1)
+
+#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \
+ GEN8_DE_MISC_IER, \
+ GEN8_DE_MISC_IIR)
+
+#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
+#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
+#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
+#define GEN11_DE_PCH_IRQ (1 << 23)
+#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_HPD_IRQ (1 << 21)
+#define GEN11_DE_PORT_IRQ (1 << 20)
+#define GEN11_DE_PIPE_C (1 << 18)
+#define GEN11_DE_PIPE_B (1 << 17)
+#define GEN11_DE_PIPE_A (1 << 16)
+
+#define GEN11_DE_HPD_ISR _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR _MMIO(0x44478)
+#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC1))
+#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
+
+#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \
+ GEN11_DE_HPD_IER, \
+ GEN11_DE_HPD_IIR)
+
+#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
+#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
+
+#define PICAINTERRUPT_ISR _MMIO(0x16FE50)
+#define PICAINTERRUPT_IMR _MMIO(0x16FE54)
+#define PICAINTERRUPT_IIR _MMIO(0x16FE58)
+#define PICAINTERRUPT_IER _MMIO(0x16FE5C)
+#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16)
+#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8)
+#define XE2LPD_AUX_DDI(hpd_pin) REG_BIT(6 + _HPD_PIN_DDI(hpd_pin))
+#define XE2LPD_AUX_DDI_MASK REG_GENMASK(7, 6)
+#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
+
+#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \
+ PICAINTERRUPT_IER, \
+ PICAINTERRUPT_IIR)
+
+#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
+#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
+#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
+#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4)
+#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2)
+#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
+#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
+
+#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword))
+#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
+#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
+#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
+#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4)
+#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
+#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
+#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
+
+#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31)
+#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20)
+#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8)
+#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4)
+#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0)
+
+#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
+#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
+
+#define FUSE_STRAP _MMIO(0x42014)
+#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
+#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30)
+#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29)
+#define IVB_PIPE_C_DISABLE REG_BIT(28)
+#define ILK_HDCP_DISABLE REG_BIT(25)
+#define ILK_eDP_A_DISABLE REG_BIT(24)
+#define HSW_CDCLK_LIMIT REG_BIT(24)
+#define ILK_DESKTOP REG_BIT(23)
+#define HSW_CPU_SSC_ENABLE REG_BIT(21)
+
+#define FUSE_STRAP3 _MMIO(0x42020)
+#define HSW_REF_CLK_SELECT REG_BIT(1)
+
+#define CHICKEN_MISC_2 _MMIO(0x42084)
+#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
+#define BMG_DARB_HALF_BLK_END_BURST REG_BIT(27)
+#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
+#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
+#define GLK_CL2_PWR_DOWN REG_BIT(12)
+#define GLK_CL1_PWR_DOWN REG_BIT(11)
+#define GLK_CL0_PWR_DOWN REG_BIT(10)
+
+#define CHICKEN_MISC_3 _MMIO(0x42088)
+#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
+#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
+#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
+
+#define CHICKEN_MISC_4 _MMIO(0x4208c)
+#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
+#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
+#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
+
+#define _CHICKEN_TRANS_A 0x420c0
+#define _CHICKEN_TRANS_B 0x420c4
+#define _CHICKEN_TRANS_C 0x420c8
+#define _CHICKEN_TRANS_EDP 0x420cc
+#define _CHICKEN_TRANS_D 0x420d8
+#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+ [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
+ [TRANSCODER_A] = _CHICKEN_TRANS_A, \
+ [TRANSCODER_B] = _CHICKEN_TRANS_B, \
+ [TRANSCODER_C] = _CHICKEN_TRANS_C, \
+ [TRANSCODER_D] = _CHICKEN_TRANS_D))
+#define _MTL_CHICKEN_TRANS_A 0x604e0
+#define _MTL_CHICKEN_TRANS_B 0x614e0
+#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+ _MTL_CHICKEN_TRANS_A, \
+ _MTL_CHICKEN_TRANS_B)
+#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
+#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
+#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
+#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
+#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
+#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
+#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
+#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+#define DP_FEC_BS_JITTER_WA REG_BIT(15)
+#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
+#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
+#define HDCP_LINE_REKEY_DISABLE REG_BIT(0)
+
+#define DISP_ARB_CTL2 _MMIO(0x45004)
+#define DISP_DATA_PARTITION_5_6 REG_BIT(6)
+#define DISP_IPC_ENABLE REG_BIT(3)
+
+#define GEN7_MSG_CTL _MMIO(0x45010)
+#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
+#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
+
+#define _BW_BUDDY0_CTL 0x45130
+#define _BW_BUDDY1_CTL 0x45140
+#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_CTL, \
+ _BW_BUDDY1_CTL))
+#define BW_BUDDY_DISABLE REG_BIT(31)
+#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
+#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
+
+#define _BW_BUDDY0_PAGE_MASK 0x45134
+#define _BW_BUDDY1_PAGE_MASK 0x45144
+#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_PAGE_MASK, \
+ _BW_BUDDY1_PAGE_MASK))
+
+#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
+#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
+#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
+
+#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
+#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
+#define DCPR_MASK_LPMODE REG_BIT(26)
+#define DCPR_SEND_RESP_IMM REG_BIT(25)
+#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
+
+#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438)
+#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19)
+
+#define SKL_DFSM _MMIO(0x51000)
+#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
+#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define ICL_DFSM_DMC_DISABLE (1 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
+#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
+
+#define XE2LPD_DE_CAP _MMIO(0x41100)
+#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
+#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
+#define XE2LPD_DE_CAP_DSC_REMOVED 1
+#define XE2LPD_DE_CAP_SCALER_MASK REG_GENMASK(27, 26)
+#define XE2LPD_DE_CAP_SCALER_SINGLE 1
+
+#define SKL_DSSM _MMIO(0x51004)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
+#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
+#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
+#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
+#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
+
+#define PCH_DISPLAY_BASE 0xc0000u
+
+/* south display engine interrupt: IBX */
+#define SDE_AUDIO_POWER_D (1 << 27)
+#define SDE_AUDIO_POWER_C (1 << 26)
+#define SDE_AUDIO_POWER_B (1 << 25)
+#define SDE_AUDIO_POWER_SHIFT (25)
+#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS (1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
+#define SDE_AUDIO_HDCP_MASK (3 << 22)
+#define SDE_AUDIO_TRANSB (1 << 21)
+#define SDE_AUDIO_TRANSA (1 << 20)
+#define SDE_AUDIO_TRANS_MASK (3 << 20)
+#define SDE_POISON (1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB (1 << 17)
+#define SDE_FDI_RXA (1 << 16)
+#define SDE_FDI_MASK (3 << 16)
+#define SDE_AUXD (1 << 15)
+#define SDE_AUXC (1 << 14)
+#define SDE_AUXB (1 << 13)
+#define SDE_AUX_MASK (7 << 13)
+/* 12 reserved */
+#define SDE_CRT_HOTPLUG (1 << 11)
+#define SDE_PORTD_HOTPLUG (1 << 10)
+#define SDE_PORTC_HOTPLUG (1 << 9)
+#define SDE_PORTB_HOTPLUG (1 << 8)
+#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
+ SDE_SDVOB_HOTPLUG | \
+ SDE_PORTB_HOTPLUG | \
+ SDE_PORTC_HOTPLUG | \
+ SDE_PORTD_HOTPLUG)
+#define SDE_TRANSB_CRC_DONE (1 << 5)
+#define SDE_TRANSB_CRC_ERR (1 << 4)
+#define SDE_TRANSB_FIFO_UNDER (1 << 3)
+#define SDE_TRANSA_CRC_DONE (1 << 2)
+#define SDE_TRANSA_CRC_ERR (1 << 1)
+#define SDE_TRANSA_FIFO_UNDER (1 << 0)
+#define SDE_TRANS_MASK (0x3f)
+
+/* south display engine interrupt: CPT - CNP */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
+#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_SDVOB_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT | \
+ SDE_PORTA_HOTPLUG_SPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_ERROR_CPT (1 << 16)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
+
+/* south display engine interrupt: ICP/TGP/MTP */
+#define SDE_PICAINTERRUPT REG_BIT(31)
+#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
+#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
+#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
+#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
+#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
+
+#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
+ SDEIER, \
+ SDEIIR)
+
+#define SERR_INT _MMIO(0xc4040)
+#define SERR_INT_POISON (1 << 31)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
+#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
+#define BXT_DDIA_HPD_INVERT (1 << 27)
+#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
+#define PORTD_HOTPLUG_ENABLE (1 << 20)
+#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
+#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
+#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define BXT_DDIC_HPD_INVERT (1 << 11)
+#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
+#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
+#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define BXT_DDIB_HPD_INVERT (1 << 3)
+#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
+#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
+ BXT_DDIB_HPD_INVERT | \
+ BXT_DDIC_HPD_INVERT)
+
+#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
+#define PORTE_HOTPLUG_ENABLE (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
+#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+
+#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
+#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
+
+#define SHPD_FILTER_CNT _MMIO(0xc4038)
+#define SHPD_FILTER_CNT_500_ADJ 0x001D9
+#define SHPD_FILTER_CNT_250 0x000F8
+
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+
+#define _PCH_FPA0 0xc6040
+#define _PCH_FPB0 0xc6048
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define FP_CB_TUNE (0x3 << 22)
+
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB1 0xc604c
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
+
+#define PCH_DPLL_TEST _MMIO(0xc606c)
+
+#define PCH_DREF_CONTROL _MMIO(0xC6200)
+#define DREF_CONTROL_MASK 0x7fc3
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
+#define DREF_SSC_SOURCE_DISABLE (0 << 11)
+#define DREF_SSC_SOURCE_ENABLE (2 << 11)
+#define DREF_SSC_SOURCE_MASK (3 << 11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
+#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
+#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
+#define DREF_SSC4_DOWNSPREAD (0 << 6)
+#define DREF_SSC4_CENTERSPREAD (1 << 6)
+#define DREF_SSC1_DISABLE (0 << 1)
+#define DREF_SSC1_ENABLE (1 << 1)
+#define DREF_SSC4_DISABLE (0)
+#define DREF_SSC4_ENABLE (1)
+
+#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
+#define FDL_TP1_TIMER_SHIFT 12
+#define FDL_TP1_TIMER_MASK (3 << 12)
+#define FDL_TP2_TIMER_SHIFT 10
+#define FDL_TP2_TIMER_MASK (3 << 10)
+#define RAWCLK_FREQ_MASK 0x3ff
+#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
+#define CNP_RAWCLK_DIV(div) ((div) << 16)
+#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
+#define CNP_RAWCLK_DEN(den) ((den) << 26)
+#define ICP_RAWCLK_NUM(num) ((num) << 11)
+
+#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
+
+#define PCH_SSC4_PARMS _MMIO(0xc6210)
+#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
+
+#define PCH_DPLL_SEL _MMIO(0xc7000)
+#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
+#define TRANS_DPLLA_SEL(pipe) 0
+#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
+
+/* transcoder */
+#define _PCH_TRANS_HTOTAL_A 0xe0000
+#define _PCH_TRANS_HTOTAL_B 0xe1000
+#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+
+#define _PCH_TRANS_HBLANK_A 0xe0004
+#define _PCH_TRANS_HBLANK_B 0xe1004
+#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+
+#define _PCH_TRANS_HSYNC_A 0xe0008
+#define _PCH_TRANS_HSYNC_B 0xe1008
+#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+
+#define _PCH_TRANS_VTOTAL_A 0xe000c
+#define _PCH_TRANS_VTOTAL_B 0xe100c
+#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+
+#define _PCH_TRANS_VBLANK_A 0xe0010
+#define _PCH_TRANS_VBLANK_B 0xe1010
+#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+
+#define _PCH_TRANS_VSYNC_A 0xe0014
+#define _PCH_TRANS_VSYNC_B 0xe1014
+#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+
+#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
+#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
+#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
+
+#define _PCH_TRANSA_DATA_M1 0xe0030
+#define _PCH_TRANSB_DATA_M1 0xe1030
+#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+
+#define _PCH_TRANSA_DATA_N1 0xe0034
+#define _PCH_TRANSB_DATA_N1 0xe1034
+#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+
+#define _PCH_TRANSA_DATA_M2 0xe0038
+#define _PCH_TRANSB_DATA_M2 0xe1038
+#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+
+#define _PCH_TRANSA_DATA_N2 0xe003c
+#define _PCH_TRANSB_DATA_N2 0xe103c
+#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+
+#define _PCH_TRANSA_LINK_M1 0xe0040
+#define _PCH_TRANSB_LINK_M1 0xe1040
+#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+
+#define _PCH_TRANSA_LINK_N1 0xe0044
+#define _PCH_TRANSB_LINK_N1 0xe1044
+#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+
+#define _PCH_TRANSA_LINK_M2 0xe0048
+#define _PCH_TRANSB_LINK_M2 0xe1048
+#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+
+#define _PCH_TRANSA_LINK_N2 0xe004c
+#define _PCH_TRANSB_LINK_N2 0xe104c
+#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
+
+/* Per-transcoder DIP controls (PCH) */
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+
+#define _VIDEO_DIP_GCP_A 0xe0210
+#define _VIDEO_DIP_GCP_B 0xe1210
+#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define GCP_COLOR_INDICATION (1 << 2)
+#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
+#define GCP_AV_MUTE (1 << 0)
+
+/* Per-transcoder DIP controls (VLV) */
+#define _VLV_VIDEO_DIP_CTL_A 0x60200
+#define _VLV_VIDEO_DIP_CTL_B 0x61170
+#define _CHV_VIDEO_DIP_CTL_C 0x611f0
+#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_CTL_A, \
+ _VLV_VIDEO_DIP_CTL_B, \
+ _CHV_VIDEO_DIP_CTL_C)
+
+#define _VLV_VIDEO_DIP_DATA_A 0x60208
+#define _VLV_VIDEO_DIP_DATA_B 0x61174
+#define _CHV_VIDEO_DIP_DATA_C 0x611f4
+#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_DATA_A, \
+ _VLV_VIDEO_DIP_DATA_B, \
+ _CHV_VIDEO_DIP_DATA_C)
+
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
+#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
+ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
+
+/* Haswell DIP controls */
+#define _HSW_VIDEO_DIP_CTL_A 0x60200
+#define _HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
+
+#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+
+/*ADLP and later: */
+#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
+#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
+#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
+ _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
+
+#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
+#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
+#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+
+#define _HSW_VIDEO_DIP_GCP_A 0x60210
+#define _HSW_VIDEO_DIP_GCP_B 0x61210
+#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+
+#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
+#define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+
+#define _HSW_STEREO_3D_CTL_A 0x70020
+#define _HSW_STEREO_3D_CTL_B 0x71020
+#define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
+#define S3D_ENABLE (1 << 31)
+
+#define _PCH_TRANSACONF 0xf0008
+#define _PCH_TRANSBCONF 0xf1008
+#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
+#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
+#define TRANS_ENABLE REG_BIT(31)
+#define TRANS_STATE_ENABLE REG_BIT(30)
+#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */
+#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */
+#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21)
+#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0)
+#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */
+#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3)
+#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */
+#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0)
+#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1)
+#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
+#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
+
+#define PCH_DP_B _MMIO(0xe4100)
+#define PCH_DP_C _MMIO(0xe4200)
+#define PCH_DP_D _MMIO(0xe4300)
+
+/* CPT */
+#define _TRANS_DP_CTL_A 0xe0300
+#define _TRANS_DP_CTL_B 0xe1300
+#define _TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
+#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31)
+#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29)
+#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3)
+#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B)
+#define TRANS_DP_AUDIO_ONLY REG_BIT(26)
+#define TRANS_DP_ENH_FRAMING REG_BIT(18)
+#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9)
+#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0)
+#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1)
+#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2)
+#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3)
+
+#define _TRANS_DP2_CTL_A 0x600a0
+#define _TRANS_DP2_CTL_B 0x610a0
+#define _TRANS_DP2_CTL_C 0x620a0
+#define _TRANS_DP2_CTL_D 0x630a0
+#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
+#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
+#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
+#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
+
+#define _TRANS_DP2_VFREQHIGH_A 0x600a4
+#define _TRANS_DP2_VFREQHIGH_B 0x610a4
+#define _TRANS_DP2_VFREQHIGH_C 0x620a4
+#define _TRANS_DP2_VFREQHIGH_D 0x630a4
+#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
+
+#define _TRANS_DP2_VFREQLOW_A 0x600a8
+#define _TRANS_DP2_VFREQLOW_B 0x610a8
+#define _TRANS_DP2_VFREQLOW_C 0x620a8
+#define _TRANS_DP2_VFREQLOW_D 0x630a8
+#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
+
+#define _DP_MIN_HBLANK_CTL_A 0x600ac
+#define _DP_MIN_HBLANK_CTL_B 0x610ac
+#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
+
+#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
+#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
+#define PIXEL_OVERLAP_CNT_SHIFT 30
+
+/*
+ * HSW - ICL power wells
+ *
+ * Platforms have up to 3 power well control register sets, each set
+ * controlling up to 16 power wells via a request/status HW flag tuple:
+ * - main (HSW_PWR_WELL_CTL[1-4])
+ * - AUX (ICL_PWR_WELL_CTL_AUX[1-4])
+ * - DDI (ICL_PWR_WELL_CTL_DDI[1-4])
+ * Each control register set consists of up to 4 registers used by different
+ * sources that can request a power well to be enabled:
+ * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
+ * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
+ * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set)
+ * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
+ */
+#define HSW_PWR_WELL_CTL1 _MMIO(0x45400)
+#define HSW_PWR_WELL_CTL2 _MMIO(0x45404)
+#define HSW_PWR_WELL_CTL3 _MMIO(0x45408)
+#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C)
+#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2))
+#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2))
+
+/* HSW/BDW power well */
+#define HSW_PW_CTL_IDX_GLOBAL 15
+
+/* SKL/BXT/GLK power wells */
+#define SKL_PW_CTL_IDX_PW_2 15
+#define SKL_PW_CTL_IDX_PW_1 14
+#define GLK_PW_CTL_IDX_AUX_C 10
+#define GLK_PW_CTL_IDX_AUX_B 9
+#define GLK_PW_CTL_IDX_AUX_A 8
+#define SKL_PW_CTL_IDX_DDI_D 4
+#define SKL_PW_CTL_IDX_DDI_C 3
+#define SKL_PW_CTL_IDX_DDI_B 2
+#define SKL_PW_CTL_IDX_DDI_A_E 1
+#define GLK_PW_CTL_IDX_DDI_A 1
+#define SKL_PW_CTL_IDX_MISC_IO 0
+
+/* ICL/TGL - power wells */
+#define TGL_PW_CTL_IDX_PW_5 4
+#define ICL_PW_CTL_IDX_PW_4 3
+#define ICL_PW_CTL_IDX_PW_3 2
+#define ICL_PW_CTL_IDX_PW_2 1
+#define ICL_PW_CTL_IDX_PW_1 0
+
+/* XE_LPD - power wells */
+#define XELPD_PW_CTL_IDX_PW_D 8
+#define XELPD_PW_CTL_IDX_PW_C 7
+#define XELPD_PW_CTL_IDX_PW_B 6
+#define XELPD_PW_CTL_IDX_PW_A 5
+
+#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
+#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
+#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
+#define TGL_PW_CTL_IDX_AUX_TBT6 14
+#define TGL_PW_CTL_IDX_AUX_TBT5 13
+#define TGL_PW_CTL_IDX_AUX_TBT4 12
+#define ICL_PW_CTL_IDX_AUX_TBT4 11
+#define TGL_PW_CTL_IDX_AUX_TBT3 11
+#define ICL_PW_CTL_IDX_AUX_TBT3 10
+#define TGL_PW_CTL_IDX_AUX_TBT2 10
+#define ICL_PW_CTL_IDX_AUX_TBT2 9
+#define TGL_PW_CTL_IDX_AUX_TBT1 9
+#define ICL_PW_CTL_IDX_AUX_TBT1 8
+#define TGL_PW_CTL_IDX_AUX_TC6 8
+#define XELPD_PW_CTL_IDX_AUX_E 8
+#define TGL_PW_CTL_IDX_AUX_TC5 7
+#define XELPD_PW_CTL_IDX_AUX_D 7
+#define TGL_PW_CTL_IDX_AUX_TC4 6
+#define ICL_PW_CTL_IDX_AUX_F 5
+#define TGL_PW_CTL_IDX_AUX_TC3 5
+#define ICL_PW_CTL_IDX_AUX_E 4
+#define TGL_PW_CTL_IDX_AUX_TC2 4
+#define ICL_PW_CTL_IDX_AUX_D 3
+#define TGL_PW_CTL_IDX_AUX_TC1 3
+#define ICL_PW_CTL_IDX_AUX_C 2
+#define ICL_PW_CTL_IDX_AUX_B 1
+#define ICL_PW_CTL_IDX_AUX_A 0
+
+#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
+#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
+#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define XELPD_PW_CTL_IDX_DDI_E 8
+#define TGL_PW_CTL_IDX_DDI_TC6 8
+#define XELPD_PW_CTL_IDX_DDI_D 7
+#define TGL_PW_CTL_IDX_DDI_TC5 7
+#define TGL_PW_CTL_IDX_DDI_TC4 6
+#define ICL_PW_CTL_IDX_DDI_F 5
+#define TGL_PW_CTL_IDX_DDI_TC3 5
+#define ICL_PW_CTL_IDX_DDI_E 4
+#define TGL_PW_CTL_IDX_DDI_TC2 4
+#define ICL_PW_CTL_IDX_DDI_D 3
+#define TGL_PW_CTL_IDX_DDI_TC1 3
+#define ICL_PW_CTL_IDX_DDI_C 2
+#define ICL_PW_CTL_IDX_DDI_B 1
+#define ICL_PW_CTL_IDX_DDI_A 0
+
+/* HSW - power well misc debug registers */
+#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
+#define HSW_PWR_WELL_FORCE_ON (1 << 19)
+#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
+
+/* SKL Fuse Status */
+enum skl_power_gate {
+ SKL_PG0,
+ SKL_PG1,
+ SKL_PG2,
+ ICL_PG3,
+ ICL_PG4,
+};
+
+#define SKL_FUSE_STATUS _MMIO(0x42000)
+#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
+#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
+
+/* Per-pipe DDI Function Control */
+#define _TRANS_DDI_FUNC_CTL_A 0x60400
+#define _TRANS_DDI_FUNC_CTL_B 0x61400
+#define _TRANS_DDI_FUNC_CTL_C 0x62400
+#define _TRANS_DDI_FUNC_CTL_D 0x63400
+#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
+#define TRANS_DDI_FUNC_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
+
+#define TRANS_DDI_FUNC_ENABLE (1 << 31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define TRANS_DDI_PORT_SHIFT 28
+#define TGL_TRANS_DDI_PORT_SHIFT 27
+#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
+#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
+#define TRANS_DDI_BPC_MASK (7 << 20)
+#define TRANS_DDI_BPC_8 (0 << 20)
+#define TRANS_DDI_BPC_10 (1 << 20)
+#define TRANS_DDI_BPC_6 (2 << 20)
+#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
+#define TRANS_DDI_PVSYNC (1 << 17)
+#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
+#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
+#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
+#define TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(12)
+#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
+#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
+ REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
+#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
+#define TRANS_DDI_BFI_ENABLE (1 << 4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1)
+#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
+#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
+ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
+ | TRANS_DDI_HDMI_SCRAMBLING)
+
+#define _TRANS_DDI_FUNC_CTL2_A 0x60404
+#define _TRANS_DDI_FUNC_CTL2_B 0x61404
+#define _TRANS_DDI_FUNC_CTL2_C 0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
+#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define CMTG_SECONDARY_MODE REG_BIT(3)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
+
+#define TRANS_CMTG_CHICKEN _MMIO(0x6fa90)
+#define DISABLE_DPT_CLK_GATING REG_BIT(1)
+
+/* DisplayPort Transport Control */
+#define _DP_TP_CTL_A 0x64040
+#define _DP_TP_CTL_B 0x64140
+#define _TGL_DP_TP_CTL_A 0x60540
+#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
+#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
+#define DP_TP_CTL_ENABLE REG_BIT(31)
+#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
+#define DP_TP_CTL_MODE_MASK REG_BIT(27)
+#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
+#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
+#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
+#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
+#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
+#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
+#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
+
+/* DisplayPort Transport Status */
+#define _DP_TP_STATUS_A 0x64044
+#define _DP_TP_STATUS_B 0x64144
+#define _TGL_DP_TP_STATUS_A 0x60544
+#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
+#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
+#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
+#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
+#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
+#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
+
+/* DDI Buffer Control */
+#define _DDI_BUF_CTL_A 0x64000
+#define _DDI_BUF_CTL_B 0x64100
+/* Known as DDI_CTL_DE in MTL+ */
+#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE REG_BIT(31)
+#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
+#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
+#define DDI_BUF_EMP_MASK REG_GENMASK(27, 24)
+#define DDI_BUF_TRANS_SELECT(n) REG_FIELD_PREP(DDI_BUF_EMP_MASK, (n))
+#define DDI_BUF_PHY_LINK_RATE_MASK REG_GENMASK(23, 20)
+#define DDI_BUF_PHY_LINK_RATE(r) REG_FIELD_PREP(DDI_BUF_PHY_LINK_RATE_MASK, (r))
+#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
+#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
+#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
+#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
+#define DDI_BUF_PORT_REVERSAL REG_BIT(16)
+#define DDI_BUF_LANE_STAGGER_DELAY_MASK REG_GENMASK(15, 8)
+#define DDI_BUF_LANE_STAGGER_DELAY(symbols) REG_FIELD_PREP(DDI_BUF_LANE_STAGGER_DELAY_MASK, \
+ (symbols))
+#define DDI_BUF_IS_IDLE REG_BIT(7)
+#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
+#define DDI_A_4_LANES REG_BIT(4)
+#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
+ ((width) == 3 ? 4 : (width) - 1))
+#define DDI_PORT_WIDTH_SHIFT 1
+#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)
+
+/* DDI Buffer Translations */
+#define _DDI_BUF_TRANS_A 0x64E00
+#define _DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
+#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
+#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
+
+/* DDI DP Compliance Control */
+#define _DDI_DP_COMP_CTL_A 0x605F0
+#define _DDI_DP_COMP_CTL_B 0x615F0
+#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
+#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
+#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
+#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
+#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
+#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
+#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
+
+/* DDI DP Compliance Pattern */
+#define _DDI_DP_COMP_PAT_A 0x605F4
+#define _DDI_DP_COMP_PAT_B 0x615F4
+#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE _MMIO(0xC6020)
+#define PIXCLK_GATE_UNGATE (1 << 0)
+#define PIXCLK_GATE_GATE (0 << 0)
+
+/* SPLL */
+#define SPLL_CTL _MMIO(0x46020)
+#define SPLL_PLL_ENABLE (1 << 31)
+#define SPLL_REF_BCLK (0 << 28)
+#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define SPLL_REF_NON_SSC_HSW (2 << 28)
+#define SPLL_REF_PCH_SSC_BDW (2 << 28)
+#define SPLL_REF_LCPLL (3 << 28)
+#define SPLL_REF_MASK (3 << 28)
+#define SPLL_FREQ_810MHz (0 << 26)
+#define SPLL_FREQ_1350MHz (1 << 26)
+#define SPLL_FREQ_2700MHz (2 << 26)
+#define SPLL_FREQ_MASK (3 << 26)
+
+/* WRPLL */
+#define _WRPLL_CTL1 0x46040
+#define _WRPLL_CTL2 0x46060
+#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
+#define WRPLL_PLL_ENABLE (1 << 31)
+#define WRPLL_REF_BCLK (0 << 28)
+#define WRPLL_REF_PCH_SSC (1 << 28)
+#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
+#define WRPLL_REF_LCPLL (3 << 28)
+#define WRPLL_REF_MASK (3 << 28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
+#define WRPLL_DIVIDER_REF_MASK (0xff)
+#define WRPLL_DIVIDER_POST(x) ((x) << 8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
+#define WRPLL_DIVIDER_POST_SHIFT 8
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
+#define WRPLL_DIVIDER_FB_SHIFT 16
+#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
+
+/* Port clock selection */
+#define _PORT_CLK_SEL_A 0x46100
+#define _PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
+#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
+#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
+#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
+#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
+#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
+#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
+#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
+#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
+
+/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
+#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
+#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
+#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
+#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
+#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
+#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
+#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
+#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
+
+/* Transcoder clock selection */
+#define _TRANS_CLK_SEL_A 0x46140
+#define _TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
+#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
+#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
+#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
+
+#define CDCLK_FREQ _MMIO(0x46200)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define _TRANSC_MSA_MISC 0x62410
+#define _TRANS_EDP_MSA_MISC 0x6f410
+#define TRANS_MSA_MISC(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
+/* See DP_MSA_MISC_* for the bit definitions */
+
+#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
+#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
+#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
+#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
+#define TRANS_SET_CONTEXT_LATENCY(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
+#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
+#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
+
+/* LCPLL Control */
+#define LCPLL_CTL _MMIO(0x130040)
+#define LCPLL_PLL_DISABLE (1 << 31)
+#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_REF_NON_SSC (0 << 28)
+#define LCPLL_REF_BCLK (2 << 28)
+#define LCPLL_REF_PCH_SSC (3 << 28)
+#define LCPLL_REF_MASK (3 << 28)
+#define LCPLL_CLK_FREQ_MASK (3 << 26)
+#define LCPLL_CLK_FREQ_450 (0 << 26)
+#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
+#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
+#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
+#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
+#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
+#define LCPLL_CD_SOURCE_FCLK (1 << 21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
+
+/*
+ * SKL Clocks
+ */
+/* CDCLK_CTL */
+#define CDCLK_CTL _MMIO(0x46000)
+#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
+#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
+#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
+#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
+#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
+#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
+#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
+#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
+#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
+#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
+#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
+#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
+#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+
+/* CDCLK_SQUASH_CTL */
+#define CDCLK_SQUASH_CTL _MMIO(0x46008)
+#define CDCLK_SQUASH_ENABLE REG_BIT(31)
+#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
+#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
+#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
+#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL _MMIO(0x46010)
+#define LCPLL2_CTL _MMIO(0x46014)
+#define LCPLL_PLL_ENABLE (1 << 31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1 _MMIO(0x6C058)
+#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
+#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
+#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
+#define DPLL_CTRL1_LINK_RATE_2700 0
+#define DPLL_CTRL1_LINK_RATE_1350 1
+#define DPLL_CTRL1_LINK_RATE_810 2
+#define DPLL_CTRL1_LINK_RATE_1620 3
+#define DPLL_CTRL1_LINK_RATE_1080 4
+#define DPLL_CTRL1_LINK_RATE_2160 5
+
+/* DPLL control2 */
+#define DPLL_CTRL2 _MMIO(0x6C05C)
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
+
+/* DPLL Status */
+#define DPLL_STATUS _MMIO(0x6C060)
+#define DPLL_LOCK(id) (1 << ((id) * 8))
+
+/* DPLL cfg */
+#define _DPLL1_CFGCR1 0x6C040
+#define _DPLL2_CFGCR1 0x6C048
+#define _DPLL3_CFGCR1 0x6C050
+#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
+#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
+#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
+
+#define _DPLL1_CFGCR2 0x6C044
+#define _DPLL2_CFGCR2 0x6C04C
+#define _DPLL3_CFGCR2 0x6C054
+#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
+#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
+#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
+#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
+#define DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
+#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define DPLL_CFGCR2_PDIV_7 (4 << 2)
+#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
+#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
+
+/* ICL Clocks */
+#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
+#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
+ (tc_port) + 12 : \
+ (tc_port) - TC_PORT_4 + 21))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
+ (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
+ ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
+/*
+ * DG1 Clocks
+ * First registers controls the first A and B, while the second register
+ * controls the phy C and D. The bits on these registers are the
+ * same, but refer to different phys
+ */
+#define _DG1_DPCLKA_CFGCR0 0x164280
+#define _DG1_DPCLKA1_CFGCR0 0x16C280
+#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
+#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
+#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
+ _DG1_DPCLKA_CFGCR0, \
+ _DG1_DPCLKA1_CFGCR0)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT(_DG1_DPCLKA_PHY_IDX(phy) + 10)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
+/* ADLS Clocks */
+#define _ADLS_DPCLKA_CFGCR0 0x164280
+#define _ADLS_DPCLKA_CFGCR1 0x1642BC
+#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
+ _ADLS_DPCLKA_CFGCR0, \
+ _ADLS_DPCLKA_CFGCR1)
+#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
+/* ADLS DPCLKA_CFGCR0 DDI mask */
+#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
+#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
+/* ADLS DPCLKA_CFGCR1 DDI mask */
+#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
+#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
+ ADLS_DPCLKA_DDIA_SEL_MASK, \
+ ADLS_DPCLKA_DDIB_SEL_MASK, \
+ ADLS_DPCLKA_DDII_SEL_MASK, \
+ ADLS_DPCLKA_DDIJ_SEL_MASK, \
+ ADLS_DPCLKA_DDIK_SEL_MASK)
+
+/* ICL PLL */
+#define _DPLL0_ENABLE 0x46010
+#define _DPLL1_ENABLE 0x46014
+#define _ADLS_DPLL2_ENABLE 0x46018
+#define _ADLS_DPLL3_ENABLE 0x46030
+#define PLL_ENABLE REG_BIT(31)
+#define PLL_LOCK REG_BIT(30)
+#define PLL_POWER_ENABLE REG_BIT(27)
+#define PLL_POWER_STATE REG_BIT(26)
+#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
+
+#define _DG2_PLL3_ENABLE 0x4601C
+
+#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
+
+#define TBT_PLL_ENABLE _MMIO(0x46020)
+
+#define _MG_PLL1_ENABLE 0x46030
+#define _MG_PLL2_ENABLE 0x46034
+#define _MG_PLL3_ENABLE 0x46038
+#define _MG_PLL4_ENABLE 0x4603C
+/* Bits are the same as _DPLL0_ENABLE */
+#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
+ _MG_PLL2_ENABLE)
+
+/* DG1 PLL */
+#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
+
+/* ADL-P Type C PLL */
+#define PORTTC1_PLL_ENABLE 0x46038
+#define PORTTC2_PLL_ENABLE 0x46040
+#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
+ PORTTC1_PLL_ENABLE, \
+ PORTTC2_PLL_ENABLE)
+
+#define _ICL_DPLL0_CFGCR0 0x164000
+#define _ICL_DPLL1_CFGCR0 0x164080
+#define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
+ _ICL_DPLL1_CFGCR0)
+#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
+#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
+#define DPLL_CFGCR0_SSC_ENABLE_ICL (1 << 25)
+#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
+#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
+#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
+#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
+#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
+#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
+#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
+#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
+#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
+#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
+
+#define _ICL_DPLL0_CFGCR1 0x164004
+#define _ICL_DPLL1_CFGCR1 0x164084
+#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
+ _ICL_DPLL1_CFGCR1)
+#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
+#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
+#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
+#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
+#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
+#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
+#define DPLL_CFGCR1_KDIV_SHIFT (6)
+#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
+#define DPLL_CFGCR1_KDIV_1 (1 << 6)
+#define DPLL_CFGCR1_KDIV_2 (2 << 6)
+#define DPLL_CFGCR1_KDIV_3 (4 << 6)
+#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
+#define DPLL_CFGCR1_PDIV_SHIFT (2)
+#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR1_PDIV_2 (1 << 2)
+#define DPLL_CFGCR1_PDIV_3 (2 << 2)
+#define DPLL_CFGCR1_PDIV_5 (4 << 2)
+#define DPLL_CFGCR1_PDIV_7 (8 << 2)
+#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
+#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
+#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
+
+#define _TGL_DPLL0_CFGCR0 0x164284
+#define _TGL_DPLL1_CFGCR0 0x16428C
+#define _TGL_TBTPLL_CFGCR0 0x16429C
+#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
+#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0)
+
+#define _TGL_DPLL0_DIV0 0x164B00
+#define _TGL_DPLL1_DIV0 0x164C00
+#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
+#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
+
+#define _TGL_DPLL0_CFGCR1 0x164288
+#define _TGL_DPLL1_CFGCR1 0x164290
+#define _TGL_TBTPLL_CFGCR1 0x1642A0
+#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
+#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1)
+
+#define _DG1_DPLL2_CFGCR0 0x16C284
+#define _DG1_DPLL3_CFGCR0 0x16C28C
+#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
+
+#define _DG1_DPLL2_CFGCR1 0x16C288
+#define _DG1_DPLL3_CFGCR1 0x16C290
+#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
+
+/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
+#define _ADLS_DPLL4_CFGCR0 0x164294
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
+
+#define _ADLS_DPLL4_CFGCR1 0x164298
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
+
+/* BXT display engine PLL */
+#define BXT_DE_PLL_CTL _MMIO(0x6d000)
+#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
+#define BXT_DE_PLL_RATIO_MASK 0xff
+
+#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
+#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
+#define BXT_DE_PLL_LOCK (1 << 30)
+#define BXT_DE_PLL_FREQ_REQ (1 << 23)
+#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
+#define ICL_CDCLK_PLL_RATIO(x) (x)
+#define ICL_CDCLK_PLL_RATIO_MASK 0xff
+
+/* GEN9 DC */
+#define DC_STATE_EN _MMIO(0x45504)
+#define DC_STATE_DISABLE 0
+#define DC_STATE_EN_DC3CO REG_BIT(30)
+#define DC_STATE_DC3CO_STATUS REG_BIT(29)
+#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
+#define HOLD_PHY_PG1_LATCH REG_BIT(20)
+#define DC_STATE_EN_UPTO_DC5 (1 << 0)
+#define DC_STATE_EN_DC9 (1 << 3)
+#define DC_STATE_EN_UPTO_DC6 (2 << 0)
+#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
+
+#define DC_STATE_DEBUG _MMIO(0x45520)
+#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
+
+#define D_COMP_BDW _MMIO(0x138144)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define _WM_LINETIME_A 0x45270
+#define _WM_LINETIME_B 0x45274
+#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
+#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
+#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
+#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
+#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP _MMIO(0xc2014)
+#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
+#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
+#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
+#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
+#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
+#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
+#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
+
+/* Gen4+ Timestamp and Pipe Frame time stamp registers */
+#define GEN4_TIMESTAMP _MMIO(0x2358)
+#define ILK_TIMESTAMP_HI _MMIO(0x70070)
+#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FRMTMSTMP_A 0x70048
+#define _PIPE_FRMTMSTMP_B 0x71048
+#define PIPE_FRMTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FLIPTMSTMP_A 0x7004C
+#define _PIPE_FLIPTMSTMP_B 0x7104C
+#define PIPE_FLIPTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
+
+/* tgl+ */
+#define _PIPE_FLIPDONETMSTMP_A 0x70054
+#define _PIPE_FLIPDONETMSTMP_B 0x71054
+#define PIPE_FLIPDONETIMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
+
+#define _VLV_PIPE_MSA_MISC_A 0x70048
+#define VLV_PIPE_MSA_MISC(__display, pipe) \
+ _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
+#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
+#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
+
+#define _ICL_PHY_MISC_A 0x64C00
+#define _ICL_PHY_MISC_B 0x64C04
+#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
+#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B)
+#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \
+ ICL_PHY_MISC(port))
+#define ICL_PHY_MISC_MUX_DDID (1 << 28)
+#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
+
+#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
+#define MODULAR_FIA_MASK (1 << 4)
+#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
+#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
+#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
+
+#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
+#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
+#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
+#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
+#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
+#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+/* See enum intel_tc_pin_assignment for the pin assignment field values. */
+
+#define _TCSS_DDI_STATUS_1 0x161500
+#define _TCSS_DDI_STATUS_2 0x161504
+#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
+ _TCSS_DDI_STATUS_1, \
+ _TCSS_DDI_STATUS_2))
+#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
+/* See enum intel_tc_pin_assignment for the pin assignment field values. */
+#define TCSS_DDI_STATUS_READY REG_BIT(2)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
+
+#define CLKREQ_POLICY _MMIO(0x101038)
+#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
+
+#define CLKGATE_DIS_MISC _MMIO(0x46534)
+#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+
+#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
+#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
+#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
+#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+
+#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
+#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
+#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
+#define MTL_DPFC_GATING_DIS REG_BIT(6)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
+#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
+#define MTL_TRCD_MASK REG_GENMASK(31, 24)
+#define MTL_TRP_MASK REG_GENMASK(23, 16)
+#define MTL_DCLK_MASK REG_GENMASK(15, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8 + 4)
+#define MTL_TRAS_MASK REG_GENMASK(16, 8)
+#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
+
+
+
+#endif /* __INTEL_DISPLAY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 093b386c95e8..03e8c68d2913 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -4,54 +4,48 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
#include "intel_clock_gating.h"
+#include "intel_cx0_phy.h"
+#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_reset.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
#include "intel_pps.h"
-static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+bool intel_display_reset_test(struct intel_display *display)
{
- return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(to_gt(dev_priv)));
+ return display->params.force_reset_modeset_test;
}
-void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
+/* returns true if intel_display_reset_finish() needs to be called */
+bool intel_display_reset_prepare(struct intel_display *display,
+ modeset_stuck_fn modeset_stuck, void *context)
{
- struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx;
+ struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(dev_priv))
- return;
+ if (!HAS_DISPLAY(display))
+ return false;
- /* reset doesn't touch the display */
- if (!dev_priv->display.params.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
- return;
-
- /* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
- smp_mb__after_atomic();
- wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
-
- if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (atomic_read(&display->restore.pending_fb_pin)) {
+ drm_dbg_kms(display->drm,
"Modeset potentially stuck, unbreaking through wedging\n");
- intel_gt_set_wedged(to_gt(dev_priv));
+ modeset_stuck(context);
}
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
*/
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
drm_modeset_acquire_init(ctx, 0);
while (1) {
- ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
+ ret = drm_modeset_lock_all_ctx(display->drm, ctx);
if (ret != -EDEADLK)
break;
@@ -61,38 +55,36 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
+ state = drm_atomic_helper_duplicate_state(display->drm, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
- drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+ drm_err(display->drm, "Duplicating state failed with %i\n",
ret);
- return;
+ return true;
}
- ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
+ ret = drm_atomic_helper_disable_all(display->drm, ctx);
if (ret) {
- drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ drm_err(display->drm, "Suspending crtc's failed with %i\n",
ret);
drm_atomic_state_put(state);
- return;
+ return true;
}
- dev_priv->display.restore.modeset_state = state;
+ display->restore.modeset_state = state;
state->acquire_ctx = ctx;
+
+ return true;
}
-void intel_display_reset_finish(struct drm_i915_private *i915)
+void intel_display_reset_finish(struct intel_display *display, bool test_only)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(i915))
- return;
-
- /* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
+ if (!HAS_DISPLAY(display))
return;
state = fetch_and_zero(&display->restore.modeset_state);
@@ -100,12 +92,12 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
goto unlock;
/* reset doesn't touch the display */
- if (!gpu_reset_clobbers_display(i915)) {
+ if (test_only) {
/* for testing only restore the display */
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
if (ret) {
- drm_WARN_ON(&i915->drm, ret == -EDEADLK);
- drm_err(&i915->drm,
+ drm_WARN_ON(display->drm, ret == -EDEADLK);
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
}
} else {
@@ -116,21 +108,20 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
intel_pps_unlock_regs_wa(display);
intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
- intel_hpd_init(i915);
+ intel_cx0_pll_power_save_wa(display);
+ intel_hpd_init(display);
ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
}
drm_atomic_state_put(state);
unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
- mutex_unlock(&i915->drm.mode_config.mutex);
-
- clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.h b/drivers/gpu/drm/i915/display/intel_display_reset.h
index f06d0d35b86b..8b3bda134454 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.h
@@ -6,9 +6,15 @@
#ifndef __INTEL_RESET_H__
#define __INTEL_RESET_H__
-struct drm_i915_private;
+#include <linux/types.h>
-void intel_display_reset_prepare(struct drm_i915_private *i915);
-void intel_display_reset_finish(struct drm_i915_private *i915);
+struct intel_display;
+
+typedef void modeset_stuck_fn(void *context);
+
+bool intel_display_reset_test(struct intel_display *display);
+bool intel_display_reset_prepare(struct intel_display *display,
+ modeset_stuck_fn modeset_stuck, void *context);
+void intel_display_reset_finish(struct intel_display *display, bool test_only);
#endif /* __INTEL_RESET_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
new file mode 100644
index 000000000000..0a331f89b4db
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/intel/display_parent_interface.h>
+
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return display->parent->rpm->get_raw(display->drm);
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ display->parent->rpm->put_raw(display->drm, wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return display->parent->rpm->get(display->drm);
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return display->parent->rpm->get_if_in_use(display->drm);
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ return display->parent->rpm->get_noresume(display->drm);
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ display->parent->rpm->put(display->drm, wakeref);
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ display->parent->rpm->put_unchecked(display->drm);
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ return display->parent->rpm->suspended(display->drm);
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ display->parent->rpm->assert_held(display->drm);
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ display->parent->rpm->assert_block(display->drm);
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ display->parent->rpm->assert_unblock(display->drm);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.h b/drivers/gpu/drm/i915/display/intel_display_rpm.h
new file mode 100644
index 000000000000..6ef48515f84b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_RPM__
+#define __INTEL_DISPLAY_RPM__
+
+#include <linux/types.h>
+
+struct intel_display;
+struct ref_tracker;
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display);
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref);
+
+#define __with_intel_display_rpm(__display, __wakeref) \
+ for (struct ref_tracker *(__wakeref) = intel_display_rpm_get(__display); (__wakeref); \
+ intel_display_rpm_put((__display), (__wakeref)), (__wakeref) = NULL)
+
+#define with_intel_display_rpm(__display) \
+ __with_intel_display_rpm((__display), __UNIQUE_ID(wakeref))
+
+/* Only for special cases. */
+bool intel_display_rpm_suspended(struct intel_display *display);
+
+void assert_display_rpm_held(struct intel_display *display);
+void intel_display_rpm_assert_block(struct intel_display *display);
+void intel_display_rpm_assert_unblock(struct intel_display *display);
+
+/* Only for display power implementation. */
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display);
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref);
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display);
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display);
+void intel_display_rpm_put_unchecked(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_RPM__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 918d0327169a..82ea1ec482e4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -8,6 +8,9 @@
#include "gt/intel_rps.h"
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_display_core.h"
+#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
@@ -43,12 +46,13 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct wait_rps_boost *wait;
if (!dma_fence_is_i915(fence))
return;